diff --git a/.gitignore b/.gitignore index 4dbc17519..d41df3815 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,11 @@ -/.idea/ .DS_Store -/src/.sass-cache/ +.env* + +/.idea/ + +/site-content/site.yaml +/site-content/build/ + +/site-ui/build/ +/site-ui/node_modules/ +/site-ui/public/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 1ffee23ef..000000000 --- a/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -FROM debian:stretch - -# Set up non-root user, 'build', with default uid:gid -# This allows passing --build-arg to use local host user's uid:gid: -# $ docker-compose build \ -# --build-arg UID=$(id -u) \ -# --build-arg GID=$(id -g) \ -# cassandra-website -ARG UID=1000 -ARG GID=1000 -RUN echo "Setting up user 'build' with UID=${UID} GID=${GID}" -RUN groupadd --gid $GID --non-unique build -RUN useradd --create-home --shell /bin/bash \ - --uid $UID --gid $GID --non-unique build - -# Install tools -RUN apt-get update && \ - apt-get install -y \ - openjdk-8-jdk \ - procps \ - git \ - python2.7 \ - python-pip \ - ruby-full \ - make \ - ant \ - ant-optional \ - maven - -# Install Sphinx for generating Cassandra docs -RUN pip install --no-cache-dir \ - sphinx \ - sphinx_rtd_theme - -COPY ./src/Gemfile / -COPY ./src/Gemfile.lock / - -RUN gem install bundler && \ - bundle install && \ - rm /Gemfile /Gemfile.lock - -ENV BUILD_DIR="/home/build" - -# Setup directories for building the docs -# Give the build user rw access to everything in the build directory, -# neccessary for the ASF 'websites' jenkins agent (which can't chown) -RUN mkdir -p ${BUILD_DIR}/cassandra-site && \ - git clone https://gitbox.apache.org/repos/asf/cassandra.git ${BUILD_DIR}/cassandra && \ - chmod -R a+rw ${BUILD_DIR} - -EXPOSE 4000/tcp - -# Run as build user from here -USER build -COPY docker-entrypoint.sh /home/build/ -ENTRYPOINT ["/home/build/docker-entrypoint.sh"] -CMD [""] diff --git a/README.md b/README.md index 6f39a6c6e..fda4bb4d4 100644 --- a/README.md +++ b/README.md @@ -1,79 +1,311 @@ -Apache Cassandra website -======================== +# Apache Cassandra website -Development Cycle ------------------ +The website repository code is separated into two main parts. These parts are represented by the directories at the root level of the project. Specifically the structure of the repository is: -Making changes to the website is done with the following steps. +``` +ROOT + - site-content + - site-ui +``` -1. Test changes locally -2. Commit changes in `src/` to a fork and branch, and create a pull request -3. Get the pull request reviewed and merged to `master` -4. Preview the rendered site on https://cassandra.staged.apache.org/ (wait til [ci-cassandra.apache.org](https://ci-cassandra.apache.org/job/cassandra-website/) has deployed it) -5. Merge `asf-staging` to `asf-site` -6. View the rendered site on https://cassandra.apache.org/ +## Site UI +The 'site-ui' directory contains only the UI styling files that determines the look and feel of the site. A *ui-bundle.zip* file containing the styling information will be generated from the contents of this directory. Generation of the *ui-bundle.zip* will be done using `gulp` launched inside a Docker container. -To test changes before committing, it is a requirement that you build the website locally. Building the Apache Cassandra website takes a number of steps. To make things easier we have provided a Docker container which can build the full website in two simple commands and have it ready to commit via git. If you are interested in the process head over to the [README](./src/README) in _src_ directory. +## Site Content -Building Prerequisites ----------------------- +The 'site-content' directory contains all the raw page information e.g. where to download, developer guidelines, how to commit patches, etc. The live website HTML is generated from the contents of this directory. Generation of the HTML content is done by `antora` launched inside a Docker container. As part of the website HTML generation, the ui-bundle.zip file, and the Cassandra documentation location are passed to `antora`. It uses the ui-bundle.zip to style the website. The Cassandra documentation location will be used to gather and generate documentation for each Cassandra version. -To build and run the Docker container you will need `Docker` version 2.0.0.0 or greater. If you need a copy of the site code you will need `git` as well. +## Further Reading + +For further details about why the directories are separated as described above and why we use `antora` please see the [Details](#details) section at the bottom of this page. + +## Development Cycle + +Making changes to the website content can be done using the following steps. + +1. Preview changes locally. See below for further details about how to do this. +2. Commit changes in `site-content/source/html` to a fork and branch. +3. Create a pull request back to this repository. +3. Get the pull request reviewed and merged to `trunk`. +4. Preview the rendered site on https://cassandra.staged.apache.org/ (wait til [ci-cassandra.apache.org](https://ci-cassandra.apache.org/job/cassandra-website/) has deployed it). +5. Merge `asf-staging` to `asf-site`. +6. View the rendered site on https://cassandra.apache.org/. + +# Developer Quickstart + +To test changes before committing, it is a requirement that you build the website locally. Building the Apache Cassandra website takes a number of steps. To make things easier we have provided a suite of tools to build the full website in a few simple commands and have it ready to commit via git. Please see the [Details](#details) section at the bottom of this page for further details about the tooling. +## Building Prerequisites -Building the site ------------------ +To build and run the Docker container you will need `Docker` version 2.0.0.0 or greater. If you need a copy of the site code you will need `git` as well. + +## Building the Website If you need a copy of the site code run this command: ```bash $ git clone https://github.com/apache/cassandra-website.git $ cd ./cassandra-website +``` + +A `run.sh` wrapper script has been provided to simplify generating the docs and building the site. It provides a single commandline interface that generates the docker commands to run the website and UI docker containers. + +The script has the following usage format +```bash +$ ./run.sh [OPTIONS] ``` -To build the website run the following commands from within the `./cassandra-website` directory (assuming you used the above clone command): +A complete list of components, commands and options can be found by running the following command. ```bash -$ docker-compose build cassandra-website -$ docker-compose run cassandra-website +$ ./run.sh -h ``` -:warning: *Tip:* In order to prevent root-owned modified files in your repository, the container user, `build`, is set up with a default UID=1000:GID=1000, which is usually the first user configured on a linux machine. If your local user is different you should set up the container user with your local host user's UID:GID, replace the above with: +To build the website only, run the following command from within the `./cassandra-website` directory (assuming you used the above clone command). ```bash -$ docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g) cassandra-website -$ docker-compose run cassandra-website +$ ./run.sh website build ``` -Go make yourself a cup of coffee, this will take a while... +This will build the website content using your local copy of the cassandra-website, and the current checked-out branch. Use this command if you want to make a change to a top-level webpage without building the docs for any versions of cassandra. + +Once building has completed, the HTML content will be in the `./site-content/build/html/` directory ready to be reviewed and committed. -Once building has completed, the site content will be in the `./cassandra-website/content` directory ready to be tested. +:warning: *Tip:* In order to prevent root-owned modified files in your repository, the container executes operations as a non-root user. By default, the user is `build` and has the user and group permissions set to `UID=1000` and `GID=1000` respectfully. These permissions are usually the first user configured on a linux machine. +If your local user has different user and group permissions you can set up the container user with your local UID:GID. In addition, you can set the build user in the container your local username. These changes can be made when building the container using the following command: -Previewing the site -------------------- +```bash +$ ./run.sh website container -a BUILD_USER_ARG:$(whoami) -a UID_ARG:$(id -u) -a GID_ARG:$(id -g) +``` -The fastest way to preview the site is to run the following: +If you need to customise the container user as noted above, you must do this before you build the website or run any other website command. + +## Build the Website when Developing + +The website tooling is very flexible and allows for a wide range of development scenarios. + +### Build the website from a different branch + +You can tell the website builder to use a different branch to the one you are on. This can be done using the following command. ```bash -$ docker-compose up preview +$ ./run.sh website build -b cassandra-website:my_branch ``` -Then view the site on http://localhost:8000 +This will build the website content using your local copy of the cassandra-website, and the branch named `my_branch`. + +### Build the website using a local clone of the repository -If you want to preview the site as you are editing it run this command: +You can tell the website builder to use a different clone or fork of the repository. + +To build using another local copy of the cassandra-website run the following command. ```bash -$ docker-compose build cassandra-website -$ docker-compose up cassandra-website-serve +$ ./run.sh website build -u cassandra-website:/local/path/to/another/clone/of/cassandra-website ``` -For information about the site layout see the **Layout** section of [README](src/README#layout) in the _src_ directory. +This will build the website using the contents of the local repository located in */local/path/to/another/clone/of/cassandra-website* + +### Build the website using a remote clone of the repository -Merging `asf-staging` to `asf-site` ------------------------------------ +To build using a remote copy of the cassandra-website run the following command. + +```bash +$ ./run.sh website build -u cassandra-website:https://github.com/my_orgranisation/cassandra-website.git +``` + +This will build the website using the contents of the remote repository located at *https://github.com/my_orgranisation/cassandra-website.git* + +:warning: *Tip:* The `HEAD` branch of the Cassandra Website repository is always used by default unless an alternative branch is specified. + +In both cases above the `HEAD` branch is used and translates to different branches. In first case where the repository is local, `HEAD` will translate to the currently checked out branch. In the second case where the repository is remote and needs to be cloned, `HEAD` will translate to default branch selected when the repository is cloned. You can specify a different branch using the `-b` option as per the example in [Build a different branch](#build-a-different-branch). + +## Previewing the Website + +An offline preview mode exists if you want to view the website as you make changes to the content. Preview mode can be launched using the following command. + +```bash +$ ./run.sh website preview +``` + +The site can be viewed on [http://localhost:5151](http://localhost:5151). + +The `preview` command operates the same as the `build` command. It will build the website content using your local copy of the cassandra-website, and the current checked-out branch. Additionally, it will then start a webserver to serve the HTML and a process that monitors the content files in your local copy of the repository. If a change is made to a content file, the website HTML will automatically be regenerated. + +Press `Ctrl+C` to stop the preview server and end the continuous build. + +:warning: *Tip:* You may need to refresh your browser when the auto rendering of the site is complete. + +All options that are available in the `build` command can be used by the `preview` command. Hence, the options used in the previous examples can be specified when using the `preview` command. + +# Developer Advanced Usage Guide + +The cassandra-website tooling can also be used to perform a number of other operations: + +* Generate the Apache Cassandra documentation. +* Update the website styling and behaviour. + +## Generating the Cassandra Documentation + +The website tooling provides the ability to generate the Cassandra AsciiDoc (.adoc) files. The content of the AsciiDoc files are sourced from nodetool and the cassandra.yaml configuration file. The AsciiDoc files need to exist in the Cassandra repository as either commits to the branch or changes ready to be committed before `antora` can generate the HTML formatted equivalent for publication to the Cassandra website. + +By default, the Docker container that generates the version documentation will clone the Apache Cassandra project within the container when generating the documentation. The document generation process commits the generated AsciiDoc to this repository. This is done so `antora` can generate website HTML as well as the HTML for the various versions of the documentation using the committed AsciiDoc in each branch. + +You can use your own local copy or fork of the Cassandra repository as the source for the versioned documentation. Doing this will also allow you to view and access the generated AsciiDoc files. To use your own copy or fork of the Cassandra repository, you can specify a local path to the `run.sh` script. In this case, when rendering multiple versions of the documentation, any changes to the generated AsciiDoc files will be committed to your local copy or fork. If multiple source branches are specified, commits will be made to each branch. + +### Generate documentation using a local clone of the repository + +To generate the latest version of the Cassandra docs using a local copy or fork of the Cassandra repository, run the following command. + +```bash +$ ./run.sh website docs -u cassandra:/local/path/to/cassandra/repository -b cassandra:trunk +``` + +The output of this command will be AsciiDoc (`.adoc`) files that `antora` can render into HTML. Note that `antora` is never executed when only the Cassandra versioned documentation is generated. + +If you are generating only the Cassandra version documentation, you should always specify a local copy of a Cassandra repository. This is because the generated AsciiDoc files will be placed in the Cassandra directory or committed to the branch used to generate them. You will then be able to view the contents of the AsciiDoc files. + +If you are generating only the Cassandra version documentation, and you specify a remote Cassandra repository location, the generated AsciiDoc files will be inaccessible. This is because the Cassandra repository will be cloned inside the container before the AsciiDoc files are generated. In this case a warning message will be displayed, and you will be asked whether to proceed with the operation. Specifying a remote Cassandra repository location is useful in cases where the HTML website is generated as well. See [Generating the website and versioned documentation HTML at the sametime](#generating-the-website-and-versioned-documentation-html-at-the-sametime). + +### Generate documentation for multiple Cassandra versions + +To generate multiple versions of the Cassandra documentation using a local copy or fork of the Cassandra repository, run the following command. + +```bash +$ ./run.sh website docs -u cassandra:/local/path/to/cassandra/repository -b cassandra:trunk,cassandra-3.11,my_branch +``` + +In the above command, multiple branches separated by a comma (`,`) can be specified in the `-b` option. + +## Generating the website and versioned documentation HTML at the sametime + +The website tooling can be used to run a complete end-to-end generation of the HTML website and versioned documentation. That is, it can first generate the AsciiDoc files for each Cassandra version, and then launch `antora` to generate the HTML website and versioned documentation for publication. + +### Generate the website and documentation using pre-generated Cassandra AsciiDoc in local repositories + +If you have already generated the Cassandra AsciiDoc (`.adoc`) files and committed them to your repository, you can skip the Cassandra AsciiDoc generation process. + +To build the website using a local clone of the Cassandra repository that contains the generated AsciiDoc files, and the Cassandra Website run the following command. + +```bash +$ ./run.sh \ + website \ + build \ + -i \ + -u cassandra:/local/path/to/cassandra/repository \ + -u cassandra-website:/local/path/to/cassandra-website/repository +``` + +In the above command, the `-i` option is used to tell the tooling to include the Cassandra repository when `antora` is generating the HTML. This ensures the versioned documentation HTML is generated along with the website HTML. In this case, exiting AsciiDoc files in the Cassandra repository are used to generate the versioned documentation HTML. That is, no additional operations are run to pre-generate the Cassandra AsciiDoc files. + +### Generate the website and documentation using pre-generated Cassandra AsciiDoc in remote repositories + +To build using a remote copy of the Cassandra repository that contains the generated AsciiDoc files, and the Cassandra Website run the following command. + +```bash +$ ./run.sh \ + website \ + build \ + -i \ + -u cassandra:https://github.com/my_orgranisation/cassandra.git \ + -u cassandra-website:https://github.com/my_orgranisation/cassandra-website.git +``` + +You can have a combination of local and remote repository paths. For example, you could have a local copy of the Cassandra Website repository and want to use the remote Cassandra repository to include the current documentation when rendering the website. To do this you can run the following command. + +```bash +$ ./run.sh \ + website \ + build \ + -i \ + -u cassandra:https://github.com/my_orgranisation/cassandra.git \ + -u cassandra-website:/local/path/to/cassandra-website/repository +``` + +### Generate the website using local copy of the ui-bundle + +You can use your own *ui-bundle.zip* file containing the information on how to style the website when building it. The *ui-bundle.zip* file can be generated using the `./run.sh` script. See the [Building the Site UI](#building-the-site-ui) section for furher details on how to build the *ui-bundle.zip*. + +To supply your own *ui-bundle.zip* file when building the website, run the following command. + +```bash +$ ./run.sh website build -z /local/path/to/ui-bundle.zip +``` + +The path to the *ui-bundle.zip* file can also be a remote URL. You can supply the URL using the following command. + +```bash +$ ./run.sh website build -z https://github.com/apache/cassandra-website/archive/refs/tags/ui-bundle-1.0.zip +``` + +The styling contained in the *ui-bundle.zip* will be applied to docs if they are being rendered as well. + +By default, the Docker container used to render the site will reference a GitHub release version of the *ui-bundle.zip*. + +## Building the Site UI + +To get a list of the tasks that can be executed, run the following commands. + +```bash +$ ./run.sh website-ui +``` + +A task can be executed using the following commands: + +```bash +$ ./run.sh website-ui +``` + +A full list of tasks can be found by running the following command + +```bash +$ ./run.sh website-ui tasks +``` + +### Building Bundle + +Antora needs a *ui-bundle.zip* when rendering the website and documentation content. It contains CSS, Java Script, fonts, images and other assets which define the look and feel of the website. + +To generate the *ui-bundle.zip* from the assets in the *site-ui* directory, run the following command. + +```bash +$ ./run.sh website-ui bundle +``` + +This will build the UI Bundle using your local copy of the styling assets located in *./site-ui/*. + +When packaged successfully, the *ui-bundle.zip* will be located in the *./site-ui/build/* directory of the repository. + +:warning: *Tip:* In order to prevent root-owned modified files in your repository, the container executes operations as a non-root user. By default, the user is `build` and has the user and group permissions set to `UID=1000` and `GID=1000` respectfully. These permissions are usually the first user configured on a linux machine. + +If your local user has different user and group permissions you can set up the container user with your local UID:GID. In addition, you can set the build user in the container your local username. These changes can be made when building the container using the following command: + +```bash +$ ./run.sh website-ui container -a BUILD_USER_ARG:$(whoami) -a UID_ARG:$(id -u) -a GID_ARG:$(id -g) +``` + +If you need to customise the container user as noted above, you must do this before you build the UI Bundle or run any other website-ui command. + +### Preview UI + +An offline preview mode exists if you want to view example website content as you make changes to the UI. Preview mode can be launched using the following command. + +```bash +$ ./run website-ui preview +``` + +The example content can be viewed on [http://localhost:5252](http://localhost:5252). + +While preview mode is running, any changes you make to the source files will be instantly reflected in the browser. This works by monitoring the project for changes, running the build task if a change is detected, and sending the updates to the browser. + +The files in the *preview-src/* folder provide the sample content that allow you to see the UI in action. In this folder, you will primarily find pages written in AsciiDoc. These pages provide a representative sample and kitchen sink of content from the real site. + +Press `Ctrl+C` to stop the preview server and end the continuous build. + +# Merging `asf-staging` to `asf-site` Updating the main website, after verifying the staged website, involves copying the `asf-staging` branch to `asf-site`. A normal git merge is not used, because the `asf-staging` is forced updated after each ci-cassandra.apache.org build. Instead make live the staged website by copying the `asf-staging` to the `asf-site` branch. @@ -81,3 +313,22 @@ Updating the main website, after verifying the staged website, involves copying git switch asf-site git reset --hard origin/asf-staging git push -f origin asf-site + +# Details + +## Tooling components + +The tooling is made up of the following components + +* Run script: `./run.sh` - Provides a single commandline interface that generates the docker commands to run the website and UI docker containers. Using the containers, it can build the Cassandra website UI components, generate the Cassandra versioned documentation, and generate the website HTML. +* Website Docker container: `site-content/Dockerfile` and `site-content/docker-entrypoint.sh` - Contains the libraries necessary to generate the Cassandra versioned documentation, and generate the website HTML using Antora. +* Antora Site YAML script: `bin/site_yaml_generator.py` - Used by the Website Docker container to create the YAML file that defines the sources for the website content, optionally the cassandra versioned documentation, and the UI styling bundle to apply. +* Website UI Docker container: `site-ui/Dockerfile` and `site-ui/docker-entrypoint.sh` - Contains the libraries necessary to generate the UI bundle ZIP file the is applied by Antora to style the website and documentation. + +## Why is Antora being used + +Antora is being used for the website generation because it is designed to create websites that have version documentation. For example, when a new version of Cassandra is released, a new version of the documentation will be generated as well. Hence, if there is a change in the behaviour of Cassandra or a tool in the project, it will be captured in the latest version of the documentation. Users of the project will have the ability to select the version of the documentation they are interested in. + +## Why is the styling separated from the content + +Separating the layout/style, and the content means that stying changes can be made with little to no impact on content and vice-versa. In addition, changes to the styling can happen in parallel while website content is updated without conflict. \ No newline at end of file diff --git a/content/.keepdir b/content/.keepdir deleted file mode 100644 index 0488e103a..000000000 --- a/content/.keepdir +++ /dev/null @@ -1 +0,0 @@ -# dummy file to keep git directory diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 5d4f546c2..000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,31 +0,0 @@ -version: '3.3' - -services: - cassandra-website: - build: . - image: cassandra-website:latest - user: build - volumes: - - ./src:/home/build/cassandra-site/src - - ./content:/home/build/cassandra-site/publish - - cassandra-website-serve: - build: . - image: cassandra-website:latest - user: build - entrypoint: /home/build/docker-entrypoint-jekyll-serve.sh - ports: - - 4000:4000 - volumes: - - ./src:/home/build/cassandra-site/src - - ./content:/home/build/cassandra-site/publish - - ./docker-entrypoint-jekyll-serve.sh:/home/build/docker-entrypoint-jekyll-serve.sh - - preview: - image: nginx - ports: - - "8000:80" - volumes: - - "./content:/usr/share/nginx/html" - command: [nginx-debug, '-g', 'daemon off;'] - diff --git a/docker-entrypoint-jekyll-serve.sh b/docker-entrypoint-jekyll-serve.sh deleted file mode 100755 index b34b92710..000000000 --- a/docker-entrypoint-jekyll-serve.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -set -e - -export CASSANDRA_SITE_DIR="/home/build/cassandra-site" - -GREEN='\033[1;32m' -YELLOW='\033[0;33m' -NC='\033[0m' # No Color - -# Run a process in the background to correct the resource paths for the landing pages of each -# version in the publish directory -while [ 1 ] -do - sed -i 's/\.\/\.\.\//\.\/\.\.\/\.\.\//g' ${CASSANDRA_SITE_DIR}/publish/doc/*/index.html - sleep 5 -done & - -cd ${CASSANDRA_SITE_DIR}/src - -JEKYLL_COMMAND="jekyll serve --host 0.0.0.0" - -echo -echo " Starting Jekyll: ${JEKYLL_COMMAND}" -echo "------------------------------------------------" -echo -e "${GREEN} Site Address: http://127.0.0.1:4000/${NC}" -echo "------------------------------------------------" - -${JEKYLL_COMMAND} diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100755 index 875f39c0a..000000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -set -xe - -export CASSANDRA_SITE_DIR="${BUILD_DIR}/cassandra-site" -export CASSANDRA_DIR="${BUILD_DIR}/cassandra" - -jekyll --version - -# Make sure we have the latest commit of Cassandra trunk -cd ${CASSANDRA_DIR} -git checkout trunk -git pull --rebase --prune - -# Now make the docs for the latest version -cd ${CASSANDRA_SITE_DIR}/src -make add-latest-doc - - -# Make sure we have the latest commit of Cassandra 3.11 -pushd ${CASSANDRA_DIR} -ant realclean -git checkout cassandra-3.11 -git pull --rebase --prune -popd - -# Now make the docs for 3.11 -make .build-doc - -# Relink the 3.11 version -LATEST_VERSION=$(basename $(find ./doc -iname 3.11* -type d | sort | tail -n 1)) -rm -f doc/3.11 -ln -s -f ${LATEST_VERSION} doc/3.11 - -make build - - -# Generate the rest of the site -make - -# Fix the links in the resource paths for the landing pages of each version in the publish directory -cd ${CASSANDRA_SITE_DIR} -sed -i 's/\.\/\.\.\//\.\/\.\.\/\.\.\//g' ./publish/doc/*/index.html diff --git a/run.sh b/run.sh new file mode 100755 index 000000000..fb8f635a1 --- /dev/null +++ b/run.sh @@ -0,0 +1,677 @@ +#!/bin/bash + +debug() { + >&2 echo "[DEBUG] $*" +} + +usage() { + cat < [OPTIONS] + +Components + The following values can be supplied for the COMPONENT argument. + * website This component is used to generate the website content and Apache Cassandra documentation. + * website-ui This component is used to generate the UI components that style the Apache Cassandra website. + +Commands + Each of the components contains their own set of commands. Below is a list of the components and its associated commands. + * website + container Builds the container used to carry out the tasks. + docs Generates the AsciiDoc (.adoc) documentation files only for the specified Cassandra versions. + No website HTML files are rendered. + build Renders the HTML files for the website using Antora. Cassandra documentation can be optionally + generated and rendered as part of this operation. See options below. + preview Launches a server that monitors the site content and renders the site when a change is detected. + + * website-ui + container Build the container used to carry out the tasks. +EOF + + local INDENT_SPACE=14 + + local parser_state="process_line" + local previous_word="" + local command_help_line="" + + for word_i in $(grep -B1 "desc" site-ui/gulpfile.js) + do + case ${word_i} in + name:) + parser_state="add_name" + continue + ;; + + desc:) + parser_state="add_desc" + continue + ;; + + --) + echo "${command_help_line} $(echo "${previous_word}" | tr -s ',' '.')" + command_help_line="" + previous_word="" + parser_state="process_line" + continue + ;; + + *) + case ${parser_state} in + add_name) + parser_state="process_line" + + command=$(echo "${word_i}" | tr -d "\'" | tr -d ',') + command_help_line=" ${command}" + + command_len=${#command} + padding=$((INDENT_SPACE - command_len)) + padding_count=0 + while [ ${padding_count} -lt ${padding} ] + do + command_help_line="${command_help_line} " + padding_count=$((padding_count + 1)) + done + + command_help_line="${command_help_line}" + continue + ;; + + add_desc) + command_help_line="${command_help_line} ${previous_word}" + previous_word=$(echo "${word_i}" | tr -d "\'") + ;; + esac + ;; + esac + done + + echo "${command_help_line} $(echo "${previous_word}" | tr -s ',' '.')" + + cat < /dev/null || { echo "ERROR: Failed to change directory to '${relative_path}'."; exit 1; } + location_source=$(pwd) + popd > /dev/null || { echo "ERROR: Failed to change back to working directory after changing to '${relative_path}'."; exit 1; } + fi + + if [ -n "${file_name}" ] + then + location_source="${location_source}/${file_name}" + fi +} + +exec_docker_run_command() { + local remove_container_option="--rm" + + if [ "${persist_container_after_run}" = "enabled" ] + then + remove_container_option="" + fi + + exec_docker_command "run -i -t ${remove_container_option} $*" +} + +exec_docker_build_command() { + exec_docker_command "build $*" +} + +exec_docker_command() { + echo + + if [ "${dry_run}" = "enabled" ] + then + echo "Dry run mode enabled. Docker command generated:" + echo "docker $*" + else + echo "Executing docker command:" + echo "docker $*" + eval "docker $*" + fi +} + +# This function requires the following two variables to be defined at a higher scope level +# +# url_source_name - Name for any of the following: file, directory, or repository. +# url_source_value - Value assigned to url_source_name. This value may change if it points to a local object. +# [cassandra_website|cassandra]_volume_mount_set +# - Flag indicating if a volume mount has been defined for the local cassandra-website or cassandra +# directory. +# cassandra_website_source_set +# - Flag indicating if a source for the cassandra-website content has been defined. +set_antora_url_source() { + local location_source="" + local location_type="" + get_source_location_information + + if [ "${location_type}" = "dir" ] || [ "${location_type}" = "file" ] + then + if [ "${url_source_name}" = "cassandra-website" ] || [ "${url_source_name}" = "cassandra" ] + then + eval "$(tr -s '-' '_' <<< "${url_source_name}")"_volume_mount_set=true + fi + url_source_value="${container_build_dir}/${url_source_name}" + vol_args+=("-v ${location_source}:${url_source_value}") + fi + + if [ "${url_source_name}" = "cassandra-website" ] + then + cassandra_website_source_set="true" + fi +} + +run_docker_website_command() { + local container_command=$1 + local container_build_dir="" + local port_map_option="" + + if [ -z "$(docker images -q "${container_tag}")" ] + then + build_container "content" + fi + + if [ "${container_command}" = "preview" ] + then + port_map_option="-p 5151:5151/tcp" + fi + + container_build_dir=$(get_container_build_dir) + + local env_file_arg + local env_args=() + local vol_args=() + + local repository_name + local repository_value + + local antora_content_source_env_name + + local cassandra_volume_mount_set="false" + local cassandra_website_volume_mount_set="false" + local cassandra_website_source_set="false" + + if [ -f "${env_file}" ] + then + env_file_arg="--env-file ${env_file}" + fi + + # The following are examples of the optional repository inputs we want to parse and convert into their equivalent + # ANTORA_CONTENT_SOURCES_* docker environment s. + # branch option - cassandra:trunk,my_test_branch_311,my_test_branch_40 -> ANTORA_CONTENT_SOURCES_CASSANDRA_BRANCHES=trunk,my_test_branch_311,my_test_branch_40 + # tag option - cassandra:cassandra-4.0,cassandra-3.11,cassandra-3.0 -> ANTORA_CONTENT_SOURCES_CASSANDRA_TAGS=cassandra-4.0,cassandra-3.11,cassandra-3.0 + # url option - cassandra:https://github.com/myfork/cassandra.git -> ANTORA_CONTENT_SOURCES_CASSANDRA_URL=https://github.com/myfork/cassandra.git + # + # We can do this by iterating through each of the repository source arrays and splitting the repository name from its + # supplied value. The eval command for the inner for loop resolves to "${repository_[*]}". This combined with + # the outer for loop means we will process all the values in arrays: repository_branches, repository_tags, repository_url. + for repository_source_type in branches tags url + do + for repository_source in $(eval echo \$\{"repository_${repository_source_type}"\[\*\]\}) + do + repository_name=$(sed '1,/:/s/:/=/' <<< "${repository_source}" | cut -d'=' -f1) + repository_value=$(sed '1,/:/s/:/=/' <<< "${repository_source}" | cut -d'=' -f2) + + # When we are asked to generate only the docs (i.e. command argument passed is 'docs'), we can ignore all + # repository information except for the cassandra repository. + if [ "${container_command}" = "generate-docs" ] && [ "${repository_name}" != "cassandra" ] + then + continue + fi + + case "${repository_source_type}" in + url) + local url_source_name="${repository_name}" + local url_source_value="${repository_value}" + set_antora_url_source + repository_value="${url_source_value}" + ;; + branches | tags) + # Check if we have a comma delimited list. If so, replace the commas with spaces and quote the string so the + # spaces and values are preserved when passed to Docker. + if [ "$(tr -dc ',' <<< "${repository_value}" | wc -c)" -gt 0 ] + then + repository_value="\"${repository_value//,/ }\"" + fi + ;; + esac + + antora_content_source_env_name="ANTORA_CONTENT_SOURCES_$( + tr '[:lower:]' '[:upper:]' <<< "${repository_name}" | + sed 's/\-/_/g' + )_$( + tr '[:lower:]' '[:upper:]' <<< "${repository_source_type}" + )" + + env_args+=("-e ${antora_content_source_env_name}=${repository_value}") + done + done + + if [ "${container_command}" = "generate-docs" ] + then + # Check if a local Cassandra repository path will be mounted inside the container when only generating the docs. If + # the caller is to access the generated AsciiDocs they will need to do this. If no local Cassandra repository is + # going to be mounted then display a warning message and prompt the caller to continue. + if [ "${cassandra_volume_mount_set}" = "false" ] + then + cat < 0: + for attr in attrs: + if attr[0] == "href": + regex_match = self._version_pattern.match(attr[1]) + + if regex_match: + self._current_version = Version(regex_match.group()) + + if self._current_version.release_number > self.latest: + self.latest = self._current_version.release_number + + def handle_endtag(self, tag): + self._previous_tag = tag + + def handle_data(self, data): + if self._previous_tag == "a" and self._current_version: + regex_match = self._date_pattern.match(data.strip()) + + if regex_match: + self._current_version.date = regex_match.group() + self.version_list.append(self._current_version) + self._current_version = None + + self._previous_tag = None + + +def build_arg_parser(): + parser = argparse.ArgumentParser(description="Generate the site.yml using the site.yml.template.") + + parser.add_argument( + "file_path", metavar="FILE_PATH", help="Path to site.template to use to generate site.yaml") + parser.add_argument( + "-s", + "--site-info", + metavar="JSON", + required=True, + dest="site_info", + help="Information about the site.") + parser.add_argument( + "-c" + "--content-source", + metavar="JSON", + required=True, + action="append", + dest="content_source_list", + help="JSON object containing the url, branches, tags, and start_path of a source for the website.") + parser.add_argument( + "-u", + "--ui-bundle-zip-url", + metavar="URL", + required=True, + dest="ui_bundle_url", + help="Local path or URL to UI bundle.zip.") + parser.add_argument( + "-r", + "--release-download-url", + metavar="URL", + required=True, + dest="release_download_url", + help="URL to the page listing all the available downloads.") + + return parser + +# --- main --- + + +args = build_arg_parser().parse_args() +site_yaml = SiteYAML() +site_yaml.set_site_setting(args.site_info) +site_yaml.set_content_source_setting(args.content_source_list) +site_yaml.set_ui_bundle_setting(args.ui_bundle_url) +site_yaml.set_asciidoc_attributes(args.release_download_url) +site_yaml.generate_file() diff --git a/site-content/docker-entrypoint.sh b/site-content/docker-entrypoint.sh new file mode 100755 index 000000000..43d4b7b31 --- /dev/null +++ b/site-content/docker-entrypoint.sh @@ -0,0 +1,299 @@ +#!/bin/bash + +# Abort script if a command fails +set -e + +export CASSANDRA_USE_JDK11=true +export CASSANDRA_WEBSITE_DIR="${BUILD_DIR}/cassandra-website" +export CASSANDRA_DIR="${BUILD_DIR}/cassandra" +export CASSANDRA_DOC="${CASSANDRA_DIR}/doc" +GIT_USER_SETUP="false" + +setup_git_user() { + if [ "${GIT_USER_SETUP}" = "false" ] + then + # Setup git so we can commit back to the Cassandra repository locally + git config --global user.email "${GIT_EMAIL_ADDRESS}" + git config --global user.name "${GIT_USER_NAME}" + GIT_USER_SETUP="true" + fi +} + +generate_cassandra_versioned_docs() { + if [ "$(find "${CASSANDRA_DIR}" -mindepth 1 -type f | wc -l)" -eq 0 ] + then + git clone "${ANTORA_CONTENT_SOURCES_CASSANDRA_URL}" "${BUILD_DIR}"/cassandra + + # Once the repository has been cloned set the Antora Cassandra source URL to be the local copy we have cloned in + # the container. This is so it will be used as the version documentation source by Antora if we are generating the + # website HTML. + ANTORA_CONTENT_SOURCES_CASSANDRA_URL="${BUILD_DIR}"/cassandra + fi + + # If we are generating the website HTML as well, make sure the versioned documentation is part of the output. + INCLUDE_VERSION_DOCS_WHEN_GENERATING_WEBSITE="enabled" + + mkdir -p "${CASSANDRA_DIR}"/cassandra/doc/build_gen + + local commit_changes_to_branch="" + if [ "$(wc -w <<< "${GENERATE_CASSANDRA_VERSIONS}")" -gt 1 ] || [ "${COMMIT_GENERATED_VERSION_DOCS_TO_REPOSITORY}" = "enabled" ] + then + commit_changes_to_branch="enabled" + else + commit_changes_to_branch="disabled" + fi + + for version in ${GENERATE_CASSANDRA_VERSIONS} + do + echo "Checking out '${version}'" + pushd "${CASSANDRA_DIR}" > /dev/null + git clean -xdff + git checkout "${version}" + git pull --rebase --prune + + echo "Building JAR files" + # Nodetool docs are autogenerated, but that needs nodetool to be built + ant jar + local doc_version="" + doc_version=$(ant echo-base-version | grep "\[echo\]" | tr -s ' ' | cut -d' ' -f3) + popd > /dev/null + + pushd "${CASSANDRA_DOC}" > /dev/null + # cassandra-3.11 is missing gen-nodetool-docs.py, ref: CASSANDRA-16093 + gen_nodetool_docs=$(find . -iname gen-nodetool-docs.py | head -n 1) + if [ ! -f "${gen_nodetool_docs}" ] + then + echo "Unable to find ${gen_nodetool_docs}, so I will download it from the Cassandra repository using commit a47be7e." + wget \ + -nc \ + -O ./gen-nodetool-docs.py \ + https://raw.githubusercontent.com/apache/cassandra/a47be7eddd5855fc7723d4080ca1a63c611efdab/doc/gen-nodetool-docs.py + fi + + echo "Generating asciidoc for version ${doc_version}" + # generate the nodetool docs + python3 "${gen_nodetool_docs}" + + # generate cassandra.yaml docs + local convert_yaml_to_adoc=$(find . -iname convert_yaml_to_adoc.py | head -n 1) + if [ -f "${gen_nodetool_docs}" ] + then + YAML_INPUT="${CASSANDRA_DIR}/conf/cassandra.yaml" + YAML_OUTPUT="${CASSANDRA_DOC}/modules/cassandra/pages/configuration/cass_yaml_file.adoc" + python3 "${convert_yaml_to_adoc}" "${YAML_INPUT}" "${YAML_OUTPUT}" + fi + + if [ "${commit_changes_to_branch}" = "enabled" ] + then + git add . + git commit -m "Generated nodetool and configuration documentation for ${doc_version}." || echo "No new changes to commit." + fi + popd > /dev/null + done +} + +string_to_json() { + local key="${1}" + local value="${2}" + + echo -e "\"${key}\":\"${value}\"" +} + +list_to_json() { + local key="${1}" + local value="${2}" + + echo -e "\"${key}\":[$(echo \""${value}"\" | sed 's~\ ~\",\"~g')]" +} + +generate_json() { + local json_output + local count + + json_output="{" + count=1 + while true + do + local arg + local json_type + local key + local value + + arg="${!count}" + + if [ -z "${arg}" ] + then + break + fi + + json_type="$(cut -d':' -f1 <<< ${arg})" + key="$(cut -d':' -f2 <<< ${arg})" + value=${arg//${json_type}:${key}:/} + if [ -n "${value}" ] + then + json_obj=$("${json_type}_to_json" "${key}" "${value}") + + if [ "${json_output}" = "{" ] + then + json_output="${json_output}${json_obj}" + else + json_output="${json_output},${json_obj}" + fi + fi + count=$((count + 1)) + done + json_output="${json_output}}" + + echo -e "${json_output}" +} + +generate_site_yaml() { + pushd "${CASSANDRA_WEBSITE_DIR}/site-content" > /dev/null + + if [ "${INCLUDE_VERSION_DOCS_WHEN_GENERATING_WEBSITE}" = "enabled" ] + then + ANTORA_CONTENT_SOURCE_REPOSITORIES+=(CASSANDRA) + fi + + local repository_url="" + local start_path="" + local branches="" + local tags="" + local content_source_options=() + for repo in ${ANTORA_CONTENT_SOURCE_REPOSITORIES[*]} + do + repository_url=$(eval echo "$"ANTORA_CONTENT_SOURCES_${repo}_URL"") + start_path=$(eval echo "$"ANTORA_CONTENT_SOURCES_${repo}_START_PATH"") + branches=$(eval echo "$"ANTORA_CONTENT_SOURCES_${repo}_BRANCHES"") + tags=$(eval echo "$"ANTORA_CONTENT_SOURCES_${repo}_TAGS"") + + if [ -n "${repository_url}" ] && [ -n "${start_path}" ] && { [ -n "${branches}" ] || [ -n "${tags}" ]; } + then + content_source_options+=("-c") + content_source_options+=("$(generate_json \ + "string:url:${repository_url}" \ + "string:start_path:${start_path}" \ + "list:branches:${branches}" \ + "list:tags:${tags}")") + fi + done + + echo "Building site.yaml" + rm -f site.yaml + python3 ./bin/site_yaml_generator.py \ + -s "$(generate_json \ + "string:title:${ANTORA_SITE_TITLE}" \ + "string:url:${ANTORA_SITE_URL}" \ + "string:start_page:${ANTORA_SITE_START_PAGE}") "\ + "${content_source_options[@]}" \ + -u "${ANTORA_UI_BUNDLE_URL}" \ + -r "${CASSANDRA_DOWNLOADS_URL}" \ + site.template.yaml + popd > /dev/null +} + +render_site_content_to_html() { + pushd "${CASSANDRA_WEBSITE_DIR}/site-content" > /dev/null + echo "Building the site HTML content." + antora --generator antora-site-generator-lunr site.yaml + echo "Rendering complete!" + popd > /dev/null +} + +run_preview_mode() { + echo "Entering preview mode!" + + export -f render_site_content_to_html + + local on_change_functions="render_site_content_to_html" + local find_paths="${CASSANDRA_WEBSITE_DIR}/${ANTORA_CONTENT_SOURCES_CASSANDRA_WEBSITE_START_PATH}" + + if [ "${COMMAND_GENERATE_DOCS}" = "run" ] + then + on_change_functions="generate_cassandra_versioned_docs && ${on_change_functions}" + find_paths="${find_paths} ${CASSANDRA_DIR}/${ANTORA_CONTENT_SOURCES_CASSANDRA_START_PATH}" + + export -f generate_cassandra_versioned_docs + + # Ensure we only have one branch to generate docs for + GENERATE_CASSANDRA_VERSIONS=$(cut -d' ' -f1 <<< "${GENERATE_CASSANDRA_VERSIONS}") + fi + + if [ "${COMMAND_BUILD_SITE}" != "run" ] + then + generate_site_yaml + + export DOCSEARCH_ENABLED=true + export DOCSEARCH_ENGINE=lunr + export NODE_PATH="$(npm -g root)" + export DOCSEARCH_INDEX_VERSION=latest + + render_site_content_to_html + fi + + pushd "${CASSANDRA_WEBSITE_DIR}/site-content/build/html" > /dev/null + live-server --port=5151 --host=0.0.0.0 --no-browser --no-css-inject --wait=2000 & + popd > /dev/null + + find "${find_paths}" -type f | entr /bin/bash -c "${on_change_functions}" +} + + +# ============ MAIN ============ + +GENERATE_CASSANDRA_VERSIONS=$(sed 's/^[[:space:]]]*//' <<< "${ANTORA_CONTENT_SOURCES_CASSANDRA_BRANCHES} ${ANTORA_CONTENT_SOURCES_CASSANDRA_TAGS}") +export GENERATE_CASSANDRA_VERSIONS + +ANTORA_CONTENT_SOURCE_REPOSITORIES=( + CASSANDRA_WEBSITE +) + +# Initialise commands and assume none of them will run +COMMAND_GENERATE_DOCS="skip" +COMMAND_BUILD_SITE="skip" +COMMAND_PREVIEW="skip" + +# Work out which commands the caller has requested. We need to do this first as the commands should be run in a certain order +while [ "$1" != "" ] +do + case $1 in + "generate-docs") + COMMAND_GENERATE_DOCS="run" + ;; + "build-site") + COMMAND_BUILD_SITE="run" + ;; + "preview") + COMMAND_PREVIEW="run" + ;; + *) + echo "Skipping unrecognised command '$1'." + ;; + esac + + shift +done + +# Execute the commands as requested by the caller. +if [ "${COMMAND_GENERATE_DOCS}" = "run" ] +then + setup_git_user + generate_cassandra_versioned_docs +fi + +if [ "${COMMAND_BUILD_SITE}" = "run" ] +then + generate_site_yaml + + export DOCSEARCH_ENABLED=true + export DOCSEARCH_ENGINE=lunr + export NODE_PATH="$(npm -g root)" + export DOCSEARCH_INDEX_VERSION=latest + + render_site_content_to_html +fi + +if [ "${COMMAND_PREVIEW}" = "run" ] +then + run_preview_mode +fi diff --git a/site-content/lib/tabs-block.js b/site-content/lib/tabs-block.js new file mode 100644 index 000000000..6a84944ca --- /dev/null +++ b/site-content/lib/tabs-block.js @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 OpenDevise, Inc. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Extends the AsciiDoc syntax to support a tabset. The tabset is created from + * a dlist enclosed in an example block that is marked with the tabs style. + * + * Usage: + * + * [tabs] + * ==== + * Tab A:: + * + + * -- + * Contents of tab A. + * -- + * Tab B:: + * + + * -- + * Contents of tab B. + * -- + * ==== + * + * @author Dan Allen + */ +const IdSeparatorCh = '-' +const ExtraIdSeparatorsRx = /^-+|-+$|-(-)+/g +const InvalidIdCharsRx = /[^a-zA-Z0-9_]/g +const List = Opal.const_get_local(Opal.module(null, 'Asciidoctor'), 'List') +const ListItem = Opal.const_get_local(Opal.module(null, 'Asciidoctor'), 'ListItem') + +const generateId = (str, idx) => + `tabset${idx}_${str.toLowerCase().replace(InvalidIdCharsRx, IdSeparatorCh).replace(ExtraIdSeparatorsRx, '$1')}` + +function tabsBlock () { + this.onContext('example') + this.process((parent, reader, attrs) => { + const createHtmlFragment = (html) => this.createBlock(parent, 'pass', html) + const tabsetIdx = parent.getDocument().counter('idx-tabset') + const nodes = [] + nodes.push(createHtmlFragment('
')) + const container = this.parseContent(this.createBlock(parent, 'open'), reader) + const sourceTabs = container.getBlocks()[0] + if (!(sourceTabs && sourceTabs.getContext() === 'dlist' && sourceTabs.getItems().length)) return + const tabs = List.$new(parent, 'ulist') + tabs.addRole('tabs') + const panes = {} + sourceTabs.getItems().forEach(([[title], details]) => { + const tab = ListItem.$new(tabs) + tabs.$append(tab) + const id = generateId(title.getText(), tabsetIdx) + tab.text = `[[${id}]]${title.text}` + let blocks = details.getBlocks() + const numBlocks = blocks.length + if (numBlocks) { + if (blocks[0].context === 'open' && numBlocks === 1) blocks = blocks[0].getBlocks() + panes[id] = blocks.map((block) => (block.parent = parent) && block) + } + }) + nodes.push(tabs) + nodes.push(createHtmlFragment('
')) + Object.entries(panes).forEach(([id, blocks]) => { + nodes.push(createHtmlFragment(`
`)) + nodes.push(...blocks) + nodes.push(createHtmlFragment('
')) + }) + nodes.push(createHtmlFragment('
')) + nodes.push(createHtmlFragment('
')) + parent.blocks.push(...nodes) + }) +} + +function register (registry, context) { + registry.block('tabs', tabsBlock) +} + +module.exports.register = register diff --git a/site-content/site.template.yaml b/site-content/site.template.yaml new file mode 100644 index 000000000..93bbe2952 --- /dev/null +++ b/site-content/site.template.yaml @@ -0,0 +1,80 @@ +site: + title: {{ site.title }} + url: {{ site.url }} + start_page: {{ site.start_page }}::index.adoc + +content: + sources: + {%- for source in source_list %} + - url: {{ source.url }} + {%- if 'branches' in source %} + branches: + {%- for branch in source.branches %} + - '{{ branch }}' + {%- endfor %} + {%- endif %} + {%- if 'tags' in source %} + tags: + {%- for tag in source.tags %} + - '{{ tag }}' + {%- endfor %} + {%- endif %} + start_path: {{ source.start_path }} + {%- endfor %} + +ui: + bundle: + url: {{ ui_bundle.url }} + snapshot: true + output_dir: assets + +output: + clean: true + dir: ./build/html + +asciidoc: + attributes: + idprefix: '' + idseparator: '-' + experimental: '' + source-language: asciidoc + current-version: 4.0 + latest-version: 4.0 + previous-version: 3.11 + 40_version: '4.0' + 3x_version: '3.11' + example-caption: ~ + hide-uri-scheme: '' + linkattrs: '' + table-caption: ~ + tabs: tabs + page-pagination: true + latest-name: '{{ asciidoc_attributes.latest.name }}' + latest-date: '{{ asciidoc_attributes.latest.date }}' + 3_11-name: '{{ asciidoc_attributes.v311.name }}' + 3_11-date: '{{ asciidoc_attributes.v311.date }}' + 3_0-name: '{{ asciidoc_attributes.v30.name }}' + 3_0-date: '{{ asciidoc_attributes.v30.date }}' + 2_2-name: '{{ asciidoc_attributes.v22.name }}' + 2_2-date: '{{ asciidoc_attributes.v22.date }}' + 2_1-name: '{{ asciidoc_attributes.v21.name }}' + 2_1-date: '{{ asciidoc_attributes.v21.date }}' + url-downloads-cassandra: {{ asciidoc_attributes.url_downloads_cassandra }} + url-apache-closer: https://www.apache.org/dyn/closer.lua/cassandra + url-project: https://asciidoctor.org + url-org: https://github.com/asciidoctor + url-exten-lab: https://github.com/asciidoctor/asciidoctor-extensions-lab + url-rubygem: https://rubygems.org/gems/asciidoctor + url-tilt: https://github.com/rtomayko/tilt + url-foundation: https://foundation.zurb.com + url-jruby: https://jruby.org + url-highlightjs: https://highlightjs.org/ + url-highlightjs-lang: https://highlightjs.org/download/ + url-prettify: https://code.google.com/p/google-code-prettify + url-pygments: http://pygments.org + url-pygments-lang: http://pygments.org/languages/ + url-pygments-gem: https://rubygems.org/gems/pygments.rb + url-python: https://www.python.org + extensions: + - ./lib/tabs-block.js + - "@djencks/asciidoctor-openblock" diff --git a/site-content/source/antora.yml b/site-content/source/antora.yml new file mode 100644 index 000000000..97d5773c2 --- /dev/null +++ b/site-content/source/antora.yml @@ -0,0 +1,4 @@ +name: Website +version: master +nav: +- modules/ROOT/nav.adoc diff --git a/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v3.spec b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v3.spec new file mode 100644 index 000000000..9b1084ba1 --- /dev/null +++ b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v3.spec @@ -0,0 +1,1040 @@ + + CQL BINARY PROTOCOL v3 + + +Table of Contents + + 1. Overview + 2. Frame header + 2.1. version + 2.2. flags + 2.3. stream + 2.4. opcode + 2.5. length + 3. Notations + 4. Messages + 4.1. Requests + 4.1.1. STARTUP + 4.1.2. AUTH_RESPONSE + 4.1.3. OPTIONS + 4.1.4. QUERY + 4.1.5. PREPARE + 4.1.6. EXECUTE + 4.1.7. BATCH + 4.1.8. REGISTER + 4.2. Responses + 4.2.1. ERROR + 4.2.2. READY + 4.2.3. AUTHENTICATE + 4.2.4. SUPPORTED + 4.2.5. RESULT + 4.2.5.1. Void + 4.2.5.2. Rows + 4.2.5.3. Set_keyspace + 4.2.5.4. Prepared + 4.2.5.5. Schema_change + 4.2.6. EVENT + 4.2.7. AUTH_CHALLENGE + 4.2.8. AUTH_SUCCESS + 5. Compression + 6. Data Type Serialization Formats + 7. User Defined Type Serialization + 8. Result paging + 9. Error codes + 10. Changes from v2 + + +1. Overview + + The CQL binary protocol is a frame based protocol. Frames are defined as: + + 0 8 16 24 32 40 + +---------+---------+---------+---------+---------+ + | version | flags | stream | opcode | + +---------+---------+---------+---------+---------+ + | length | + +---------+---------+---------+---------+ + | | + . ... body ... . + . . + . . + +---------------------------------------- + + The protocol is big-endian (network byte order). + + Each frame contains a fixed size header (9 bytes) followed by a variable size + body. The header is described in Section 2. The content of the body depends + on the header opcode value (the body can in particular be empty for some + opcode values). The list of allowed opcode is defined Section 2.4 and the + details of each corresponding message is described Section 4. + + The protocol distinguishes 2 types of frames: requests and responses. Requests + are those frame sent by the clients to the server, response are the ones sent + by the server. Note however that the protocol supports server pushes (events) + so responses does not necessarily come right after a client request. + + Note to client implementors: clients library should always assume that the + body of a given frame may contain more data than what is described in this + document. It will however always be safe to ignore the remaining of the frame + body in such cases. The reason is that this may allow to sometimes extend the + protocol with optional features without needing to change the protocol + version. + + + +2. Frame header + +2.1. version + + The version is a single byte that indicate both the direction of the message + (request or response) and the version of the protocol in use. The up-most bit + of version is used to define the direction of the message: 0 indicates a + request, 1 indicates a responses. This can be useful for protocol analyzers to + distinguish the nature of the packet from the direction which it is moving. + The rest of that byte is the protocol version (3 for the protocol defined in + this document). In other words, for this version of the protocol, version will + have one of: + 0x03 Request frame for this protocol version + 0x83 Response frame for this protocol version + + Please note that the while every message ship with the version, only one version + of messages is accepted on a given connection. In other words, the first message + exchanged (STARTUP) sets the version for the connection for the lifetime of this + connection. + + This document describe the version 3 of the protocol. For the changes made since + version 2, see Section 10. + + +2.2. flags + + Flags applying to this frame. The flags have the following meaning (described + by the mask that allow to select them): + 0x01: Compression flag. If set, the frame body is compressed. The actual + compression to use should have been set up beforehand through the + Startup message (which thus cannot be compressed; Section 4.1.1). + 0x02: Tracing flag. For a request frame, this indicate the client requires + tracing of the request. Note that not all requests support tracing. + Currently, only QUERY, PREPARE and EXECUTE queries support tracing. + Other requests will simply ignore the tracing flag if set. If a + request support tracing and the tracing flag was set, the response to + this request will have the tracing flag set and contain tracing + information. + If a response frame has the tracing flag set, its body contains + a tracing ID. The tracing ID is a [uuid] and is the first thing in + the frame body. The rest of the body will then be the usual body + corresponding to the response opcode. + + The rest of the flags is currently unused and ignored. + +2.3. stream + + A frame has a stream id (a [short] value). When sending request messages, this + stream id must be set by the client to a non-negative value (negative stream id + are reserved for streams initiated by the server; currently all EVENT messages + (section 4.2.6) have a streamId of -1). If a client sends a request message + with the stream id X, it is guaranteed that the stream id of the response to + that message will be X. + + This allow to deal with the asynchronous nature of the protocol. If a client + sends multiple messages simultaneously (without waiting for responses), there + is no guarantee on the order of the responses. For instance, if the client + writes REQ_1, REQ_2, REQ_3 on the wire (in that order), the server might + respond to REQ_3 (or REQ_2) first. Assigning different stream id to these 3 + requests allows the client to distinguish to which request an received answer + respond to. As there can only be 32768 different simultaneous streams, it is up + to the client to reuse stream id. + + Note that clients are free to use the protocol synchronously (i.e. wait for + the response to REQ_N before sending REQ_N+1). In that case, the stream id + can be safely set to 0. Clients should also feel free to use only a subset of + the 32768 maximum possible stream ids if it is simpler for those + implementation. + +2.4. opcode + + An integer byte that distinguish the actual message: + 0x00 ERROR + 0x01 STARTUP + 0x02 READY + 0x03 AUTHENTICATE + 0x05 OPTIONS + 0x06 SUPPORTED + 0x07 QUERY + 0x08 RESULT + 0x09 PREPARE + 0x0A EXECUTE + 0x0B REGISTER + 0x0C EVENT + 0x0D BATCH + 0x0E AUTH_CHALLENGE + 0x0F AUTH_RESPONSE + 0x10 AUTH_SUCCESS + + Messages are described in Section 4. + + (Note that there is no 0x04 message in this version of the protocol) + + +2.5. length + + A 4 byte integer representing the length of the body of the frame (note: + currently a frame is limited to 256MB in length). + + +3. Notations + + To describe the layout of the frame body for the messages in Section 4, we + define the following: + + [int] A 4 bytes signed integer + [long] A 8 bytes signed integer + [short] A 2 bytes unsigned integer + [string] A [short] n, followed by n bytes representing an UTF-8 + string. + [long string] An [int] n, followed by n bytes representing an UTF-8 string. + [uuid] A 16 bytes long uuid. + [string list] A [short] n, followed by n [string]. + [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0, + no byte should follow and the value represented is `null`. + [short bytes] A [short] n, followed by n bytes if n >= 0. + + [option] A pair of where is a [short] representing + the option id and depends on that option (and can be + of size 0). The supported id (and the corresponding ) + will be described when this is used. + [option list] A [short] n, followed by n [option]. + [inet] An address (ip and port) to a node. It consists of one + [byte] n, that represents the address size, followed by n + [byte] representing the IP address (in practice n can only be + either 4 (IPv4) or 16 (IPv6)), following by one [int] + representing the port. + [consistency] A consistency level specification. This is a [short] + representing a consistency level with the following + correspondance: + 0x0000 ANY + 0x0001 ONE + 0x0002 TWO + 0x0003 THREE + 0x0004 QUORUM + 0x0005 ALL + 0x0006 LOCAL_QUORUM + 0x0007 EACH_QUORUM + 0x0008 SERIAL + 0x0009 LOCAL_SERIAL + 0x000A LOCAL_ONE + + [string map] A [short] n, followed by n pair where and + are [string]. + [string multimap] A [short] n, followed by n pair where is a + [string] and is a [string list]. + + +4. Messages + +4.1. Requests + + Note that outside of their normal responses (described below), all requests + can get an ERROR message (Section 4.2.1) as response. + +4.1.1. STARTUP + + Initialize the connection. The server will respond by either a READY message + (in which case the connection is ready for queries) or an AUTHENTICATE message + (in which case credentials will need to be provided using AUTH_RESPONSE). + + This must be the first message of the connection, except for OPTIONS that can + be sent before to find out the options supported by the server. Once the + connection has been initialized, a client should not send any more STARTUP + message. + + The body is a [string map] of options. Possible options are: + - "CQL_VERSION": the version of CQL to use. This option is mandatory and + currenty, the only version supported is "3.0.0". Note that this is + different from the protocol version. + - "COMPRESSION": the compression algorithm to use for frames (See section 5). + This is optional, if not specified no compression will be used. + + +4.1.2. AUTH_RESPONSE + + Answers a server authentication challenge. + + Authentication in the protocol is SASL based. The server sends authentication + challenges (a bytes token) to which the client answer with this message. Those + exchanges continue until the server accepts the authentication by sending a + AUTH_SUCCESS message after a client AUTH_RESPONSE. It is however that client that + initiate the exchange by sending an initial AUTH_RESPONSE in response to a + server AUTHENTICATE request. + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + The response to a AUTH_RESPONSE is either a follow-up AUTH_CHALLENGE message, + an AUTH_SUCCESS message or an ERROR message. + + +4.1.3. OPTIONS + + Asks the server to return what STARTUP options are supported. The body of an + OPTIONS message should be empty and the server will respond with a SUPPORTED + message. + + +4.1.4. QUERY + + Performs a CQL query. The body of the message must be: + + where is a [long string] representing the query and + must be + [[name_1]...[name_n]][][][][] + where: + - is the [consistency] level for the operation. + - is a [byte] whose bits define the options for this query and + in particular influence what the remainder of the message contains. + A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given there mask: + 0x01: Values. In that case, a [short] followed by [bytes] + values are provided. Those value are used for bound variables in + the query. Optionally, if the 0x40 flag is present, each value + will be preceded by a [string] name, representing the name of + the marker the value must be binded to. This is optional, and + if not present, values will be binded by position. + 0x02: Skip_metadata. If present, the Result Set returned as a response + to that query (if any) will have the NO_METADATA flag (see + Section 4.2.5.2). + 0x04: Page_size. In that case, is an [int] + controlling the desired page size of the result (in CQL3 rows). + See the section on paging (Section 8) for more details. + 0x08: With_paging_state. If present, should be present. + is a [bytes] value that should have been returned + in a result set (Section 4.2.5.2). If provided, the query will be + executed but starting from a given paging state. This also to + continue paging on a different node from the one it has been + started (See Section 8 for more details). + 0x10: With serial consistency. If present, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consitency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else that a + conditional update/insert. + 0x20: With default timestamp. If present, should be present. + is a [long] representing the default timestamp for the query + in microseconds (negative values are discouraged but supported for + backward compatibility reasons except for the smallest negative + value (-2^63) that is forbidden). If provided, this will + replace the server side assigned timestamp as default timestamp. + Note that a timestamp in the query itself will still override + this timestamp. This is entirely optional. + 0x40: With names for values. This only makes sense if the 0x01 flag is set and + is ignored otherwise. If present, the values from the 0x01 flag will + be preceded by a name (see above). Note that this is only useful for + QUERY requests where named bind markers are used; for EXECUTE statements, + since the names for the expected values was returned during preparation, + a client can always provide values in the right order without any names + and using this flag, while supported, is almost surely inefficient. + + Note that the consistency is ignored by some queries (USE, CREATE, ALTER, + TRUNCATE, ...). + + The server will respond to a QUERY message with a RESULT message, the content + of which depends on the query. + + +4.1.5. PREPARE + + Prepare a query for later execution (through EXECUTE). The body consists of + the CQL query to prepare as a [long string]. + + The server will respond with a RESULT message with a `prepared` kind (0x0004, + see Section 4.2.5). + + +4.1.6. EXECUTE + + Executes a prepared query. The body of the message must be: + + where is the prepared query ID. It's the [short bytes] returned as a + response to a PREPARE message. As for , it has the exact + same definition than in QUERY (see Section 4.1.4). + + The response from the server will be a RESULT message. + + +4.1.7. BATCH + + Allows executing a list of queries (prepared or not) as a batch (note that + only DML statements are accepted in a batch). The body of the message must + be: + ...[][] + where: + - is a [byte] indicating the type of batch to use: + - If == 0, the batch will be "logged". This is equivalent to a + normal CQL3 batch statement. + - If == 1, the batch will be "unlogged". + - If == 2, the batch will be a "counter" batch (and non-counter + statements will be rejected). + - is a [byte] whose bits define the options for this query and + in particular influence the remainder of the message contains. It is similar + to the from QUERY and EXECUTE methods, except that the 4 rightmost + bits must always be 0 as their corresponding option do not make sense for + Batch. A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given there mask: + 0x10: With serial consistency. If present, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consitency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else that a + conditional update/insert. + 0x20: With default timestamp. If present, should be present. + is a [long] representing the default timestamp for the query + in microseconds. If provided, this will replace the server side assigned + timestamp as default timestamp. Note that a timestamp in the query itself + will still override this timestamp. This is entirely optional. + 0x40: With names for values. If set, then all values for all must be + preceded by a [string] that have the same meaning as in QUERY + requests [IMPORTANT NOTE: this feature does not work and should not be + used. It is specified in a way that makes it impossible for the server + to implement. This will be fixed in a future version of the native + protocol. See https://issues.apache.org/jira/browse/CASSANDRA-10246 for + more details]. + - is a [short] indicating the number of following queries. + - ... are the queries to execute. A must be of the + form: + []...[] + where: + - is a [byte] indicating whether the following query is a prepared + one or not. value must be either 0 or 1. + - depends on the value of . If == 0, it should be + a [long string] query string (as in QUERY, the query string might contain + bind markers). Otherwise (that is, if == 1), it should be a + [short bytes] representing a prepared query ID. + - is a [short] indicating the number (possibly 0) of following values. + - is the optional name of the following . It must be present + if and only if the 0x40 flag is provided for the batch. + - is the [bytes] to use for bound variable i (of bound variable + if the 0x40 flag is used). + - is the [consistency] level for the operation. + - is only present if the 0x10 flag is set. In that case, + is the [consistency] level for the serial phase of + conditional updates. That consitency can only be either SERIAL or + LOCAL_SERIAL and if not present will defaults to SERIAL. This option will + be ignored for anything else that a conditional update/insert. + + The server will respond with a RESULT message. + + +4.1.8. REGISTER + + Register this connection to receive some type of events. The body of the + message is a [string list] representing the event types to register to. See + section 4.2.6 for the list of valid event types. + + The response to a REGISTER message will be a READY message. + + Please note that if a client driver maintains multiple connections to a + Cassandra node and/or connections to multiple nodes, it is advised to + dedicate a handful of connections to receive events, but to *not* register + for events on all connections, as this would only result in receiving + multiple times the same event messages, wasting bandwidth. + + +4.2. Responses + + This section describes the content of the frame body for the different + responses. Please note that to make room for future evolution, clients should + support extra informations (that they should simply discard) to the one + described in this document at the end of the frame body. + +4.2.1. ERROR + + Indicates an error processing a request. The body of the message will be an + error code ([int]) followed by a [string] error message. Then, depending on + the exception, more content may follow. The error codes are defined in + Section 9, along with their additional content if any. + + +4.2.2. READY + + Indicates that the server is ready to process queries. This message will be + sent by the server either after a STARTUP message if no authentication is + required, or after a successful CREDENTIALS message. + + The body of a READY message is empty. + + +4.2.3. AUTHENTICATE + + Indicates that the server require authentication, and which authentication + mechanism to use. + + The authentication is SASL based and thus consists on a number of server + challenges (AUTH_CHALLENGE, Section 4.2.7) followed by client responses + (AUTH_RESPONSE, Section 4.1.2). The Initial exchange is however boostrapped + by an initial client response. The details of that exchange (including how + much challenge-response pair are required) are specific to the authenticator + in use. The exchange ends when the server sends an AUTH_SUCCESS message or + an ERROR message. + + This message will be sent following a STARTUP message if authentication is + required and must be answered by a AUTH_RESPONSE message from the client. + + The body consists of a single [string] indicating the full class name of the + IAuthenticator in use. + + +4.2.4. SUPPORTED + + Indicates which startup options are supported by the server. This message + comes as a response to an OPTIONS message. + + The body of a SUPPORTED message is a [string multimap]. This multimap gives + for each of the supported STARTUP options, the list of supported values. + + +4.2.5. RESULT + + The result to a query (QUERY, PREPARE, EXECUTE or BATCH messages). + + The first element of the body of a RESULT message is an [int] representing the + `kind` of result. The rest of the body depends on the kind. The kind can be + one of: + 0x0001 Void: for results carrying no information. + 0x0002 Rows: for results to select queries, returning a set of rows. + 0x0003 Set_keyspace: the result to a `use` query. + 0x0004 Prepared: result to a PREPARE message. + 0x0005 Schema_change: the result to a schema altering query. + + The body for each kind (after the [int] kind) is defined below. + + +4.2.5.1. Void + + The rest of the body for a Void result is empty. It indicates that a query was + successful without providing more information. + + +4.2.5.2. Rows + + Indicates a set of rows. The rest of body of a Rows result is: + + where: + - is composed of: + [][?...] + where: + - is an [int]. The bits of provides information on the + formatting of the remaining informations. A flag is set if the bit + corresponding to its `mask` is set. Supported flags are, given there + mask: + 0x0001 Global_tables_spec: if set, only one table spec (keyspace + and table name) is provided as . If not + set, is not present. + 0x0002 Has_more_pages: indicates whether this is not the last + page of results and more should be retrieve. If set, the + will be present. The is a + [bytes] value that should be used in QUERY/EXECUTE to + continue paging and retrieve the remained of the result for + this query (See Section 8 for more details). + 0x0004 No_metadata: if set, the is only composed of + these , the and optionally the + (depending on the Has_more_pages flage) but + no other information (so no nor ). + This will only ever be the case if this was requested + during the query (see QUERY and RESULT messages). + - is an [int] representing the number of columns selected + by the query this result is of. It defines the number of + elements in and the number of element for each row in . + - is present if the Global_tables_spec is set in + . If present, it is composed of two [string] representing the + (unique) keyspace name and table name the columns return are of. + - specifies the columns returned in the query. There is + such column specifications that are composed of: + ()? + The initial and are two [string] are only present + if the Global_tables_spec flag is not set. The is a + [string] and is an [option] that correspond to the description + (what this description is depends a bit on the context: in results to + selects, this will be either the user chosen alias or the selection used + (often a colum name, but it can be a function call too). In results to + a PREPARE, this will be either the name of the bind variable corresponding + or the column name for the variable if it is "anonymous") and type of + the corresponding result. The option for is either a native + type (see below), in which case the option has no value, or a + 'custom' type, in which case the value is a [string] representing + the full qualified class name of the type represented. Valid option + ids are: + 0x0000 Custom: the value is a [string], see above. + 0x0001 Ascii + 0x0002 Bigint + 0x0003 Blob + 0x0004 Boolean + 0x0005 Counter + 0x0006 Decimal + 0x0007 Double + 0x0008 Float + 0x0009 Int + 0x000B Timestamp + 0x000C Uuid + 0x000D Varchar + 0x000E Varint + 0x000F Timeuuid + 0x0010 Inet + 0x0020 List: the value is an [option], representing the type + of the elements of the list. + 0x0021 Map: the value is two [option], representing the types of the + keys and values of the map + 0x0022 Set: the value is an [option], representing the type + of the elements of the set + 0x0030 UDT: the value is ... + where: + - is a [string] representing the keyspace name this + UDT is part of. + - is a [string] representing the UDT name. + - is a [short] reprensenting the number of fields of + the UDT, and thus the number of pair + following + - is a [string] representing the name of the + i_th field of the UDT. + - is an [option] representing the type of the + i_th field of the UDT. + 0x0031 Tuple: the value is ... where is a [short] + representing the number of value in the type, and + are [option] representing the type of the i_th component + of the tuple + + - is an [int] representing the number of rows present in this + result. Those rows are serialized in the part. + - is composed of ... where m is . + Each is composed of ... where n is + and where is a [bytes] representing the value + returned for the jth column of the ith row. In other words, + is composed of ( * ) [bytes]. + + +4.2.5.3. Set_keyspace + + The result to a `use` query. The body (after the kind [int]) is a single + [string] indicating the name of the keyspace that has been set. + + +4.2.5.4. Prepared + + The result to a PREPARE message. The rest of the body of a Prepared result is: + + where: + - is [short bytes] representing the prepared query ID. + - is defined exactly as for a Rows RESULT (See section 4.2.5.2; you + can however assume that the Has_more_pages flag is always off) and + is the specification for the variable bound in this prepare statement. + - is defined exactly as but correspond to the + metadata for the resultSet that execute this query will yield. Note that + may be empty (have the No_metadata flag and 0 columns, See + section 4.2.5.2) and will be for any query that is not a Select. There is + in fact never a guarantee that this will non-empty so client should protect + themselves accordingly. The presence of this information is an + optimization that allows to later execute the statement that has been + prepared without requesting the metadata (Skip_metadata flag in EXECUTE). + Clients can safely discard this metadata if they do not want to take + advantage of that optimization. + + Note that prepared query ID return is global to the node on which the query + has been prepared. It can be used on any connection to that node and this + until the node is restarted (after which the query must be reprepared). + +4.2.5.5. Schema_change + + The result to a schema altering query (creation/update/drop of a + keyspace/table/index). The body (after the kind [int]) is the same + as the body for a "SCHEMA_CHANGE" event, so 3 strings: + + Please refer to the section 4.2.6 below for the meaning of those fields. + + Note that queries to create and drop an index are considered as change + updating the table the index is on. + + +4.2.6. EVENT + + And event pushed by the server. A client will only receive events for the + type it has REGISTER to. The body of an EVENT message will start by a + [string] representing the event type. The rest of the message depends on the + event type. The valid event types are: + - "TOPOLOGY_CHANGE": events related to change in the cluster topology. + Currently, events are sent when new nodes are added to the cluster, and + when nodes are removed. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of change ("NEW_NODE", "REMOVED_NODE", or "MOVED_NODE") followed + by the address of the new/removed/moved node. + - "STATUS_CHANGE": events related to change of node status. Currently, + up/down events are sent. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of status change ("UP" or "DOWN") followed by the address of the + concerned node. + - "SCHEMA_CHANGE": events related to schema change. After the event type, + the rest of the message will be where: + - is a [string] representing the type of changed involved. + It will be one of "CREATED", "UPDATED" or "DROPPED". + - is a [string] that can be one of "KEYSPACE", "TABLE" or "TYPE" + and describes what has been modified ("TYPE" stands for modifications + related to user types). + - depends on the preceding . If is + "KEYSPACE", then will be a single [string] representing the + keyspace changed. Otherwise, if is "TABLE" or "TYPE", then + will be 2 [string]: the first one will be the keyspace + containing the affected object, and the second one will be the name + of said affected object (so either the table name or the user type + name). + + All EVENT message have a streamId of -1 (Section 2.3). + + Please note that "NEW_NODE" and "UP" events are sent based on internal Gossip + communication and as such may be sent a short delay before the binary + protocol server on the newly up node is fully started. Clients are thus + advise to wait a short time before trying to connect to the node (1 seconds + should be enough), otherwise they may experience a connection refusal at + first. + + It is possible for the same event to be sent multiple times. Therefore, + a client library should ignore the same event if it has already been notified + of a change. + +4.2.7. AUTH_CHALLENGE + + A server authentication challenge (see AUTH_RESPONSE (Section 4.1.2) for more + details). + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + Clients are expected to answer the server challenge by an AUTH_RESPONSE + message. + +4.2.7. AUTH_SUCCESS + + Indicate the success of the authentication phase. See Section 4.2.3 for more + details. + + The body of this message is a single [bytes] token holding final information + from the server that the client may require to finish the authentication + process. What that token contains and whether it can be null depends on the + actual authenticator used. + + +5. Compression + + Frame compression is supported by the protocol, but then only the frame body + is compressed (the frame header should never be compressed). + + Before being used, client and server must agree on a compression algorithm to + use, which is done in the STARTUP message. As a consequence, a STARTUP message + must never be compressed. However, once the STARTUP frame has been received + by the server can be compressed (including the response to the STARTUP + request). Frame do not have to be compressed however, even if compression has + been agreed upon (a server may only compress frame above a certain size at its + discretion). A frame body should be compressed if and only if the compressed + flag (see Section 2.2) is set. + + As of this version 2 of the protocol, the following compressions are available: + - lz4 (https://code.google.com/p/lz4/). In that, note that the 4 first bytes + of the body will be the uncompressed length (followed by the compressed + bytes). + - snappy (https://code.google.com/p/snappy/). This compression might not be + available as it depends on a native lib (server-side) that might not be + avaivable on some installation. + + +6. Data Type Serialization Formats + + This sections describes the serialization formats for all CQL data types + supported by Cassandra through the native protocol. These serialization + formats should be used by client drivers to encode values for EXECUTE + messages. Cassandra will use these formats when returning values in + RESULT messages. + + All values are represented as [bytes] in EXECUTE and RESULT messages. + The [bytes] format includes an int prefix denoting the length of the value. + For that reason, the serialization formats described here will not include + a length component. + + For legacy compatibility reasons, note that most non-string types support + "empty" values (i.e. a value with zero length). An empty value is distinct + from NULL, which is encoded with a negative length. + + As with the rest of the native protocol, all encodings are big-endian. + +6.1. ascii + + A sequence of bytes in the ASCII range [0, 127]. Bytes with values outside of + this range will result in a validation error. + +6.2 bigint + + An eight-byte two's complement integer. + +6.3 blob + + Any sequence of bytes. + +6.4 boolean + + A single byte. A value of 0 denotes "false"; any other value denotes "true". + (However, it is recommended that a value of 1 be used to represent "true".) + +6.5 decimal + + The decimal format represents an arbitrary-precision number. It contains an + [int] "scale" component followed by a varint encoding (see section 6.17) + of the unscaled value. The encoded value represents "E<-scale>". + In other words, " * 10 ^ (-1 * )". + +6.6 double + + An eight-byte floating point number in the IEEE 754 binary64 format. + +6.7 float + + An four-byte floating point number in the IEEE 754 binary32 format. + +6.8 inet + + A 4 byte or 16 byte sequence denoting an IPv4 or IPv6 address, respectively. + +6.9 int + + A four-byte two's complement integer. + +6.10 list + + A [int] n indicating the number of elements in the list, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.11 map + + A [int] n indicating the number of key/value pairs in the map, followed by + n entries. Each entry is composed of two [bytes] representing the key + and value. + +6.12 set + + A [int] n indicating the number of elements in the set, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.13 text + + A sequence of bytes conforming to the UTF-8 specifications. + +6.14 timestamp + + An eight-byte two's complement integer representing a millisecond-precision + offset from the unix epoch (00:00:00, January 1st, 1970). Negative values + represent a negative offset from the epoch. + +6.15 uuid + + A 16 byte sequence representing any valid UUID as defined by RFC 4122. + +6.16 varchar + + An alias of the "text" type. + +6.17 varint + + A variable-length two's complement encoding of a signed integer. + + The following examples may help implementors of this spec: + + Value | Encoding + ------|--------- + 0 | 0x00 + 1 | 0x01 + 127 | 0x7F + 128 | 0x0080 + 129 | 0x0081 + -1 | 0xFF + -128 | 0x80 + -129 | 0xFF7F + + Note that positive numbers must use a most-significant byte with a value + less than 0x80, because a most-significant bit of 1 indicates a negative + value. Implementors should pad positive values that have a MSB >= 0x80 + with a leading 0x00 byte. + +6.18 timeuuid + + A 16 byte sequence representing a version 1 UUID as defined by RFC 4122. + +6.19 tuple + + A sequence of [bytes] values representing the items in a tuple. The encoding + of each element depends on the data type for that position in the tuple. + Null values may be represented by using length -1 for the [bytes] + representation of an element. + + Within a tuple, all data types should use the v3 protocol serialization format. + + +7. User Defined Types + + This section describes the serialization format for User defined types (UDT), + as described in section 4.2.5.2. + + A UDT value is composed of successive [bytes] values, one for each field of the UDT + value (in the order defined by the type). A UDT value will generally have one value + for each field of the type it represents, but it is allowed to have less values than + the type has fields. + + Within a user-defined type value, all data types should use the v3 protocol + serialization format. + + +8. Result paging + + The protocol allows for paging the result of queries. For that, the QUERY and + EXECUTE messages have a value that indicate the desired + page size in CQL3 rows. + + If a positive value is provided for , the result set of the + RESULT message returned for the query will contain at most the + first rows of the query result. If that first page of result + contains the full result set for the query, the RESULT message (of kind `Rows`) + will have the Has_more_pages flag *not* set. However, if some results are not + part of the first response, the Has_more_pages flag will be set and the result + will contain a value. In that case, the value + should be used in a QUERY or EXECUTE message (that has the *same* query than + the original one or the behavior is undefined) to retrieve the next page of + results. + + Only CQL3 queries that return a result set (RESULT message with a Rows `kind`) + support paging. For other type of queries, the value is + ignored. + + Note to client implementors: + - While can be as low as 1, it will likely be detrimental + to performance to pick a value too low. A value below 100 is probably too + low for most use cases. + - Clients should not rely on the actual size of the result set returned to + decide if there is more result to fetch or not. Instead, they should always + check the Has_more_pages flag (unless they did not enabled paging for the query + obviously). Clients should also not assert that no result will have more than + results. While the current implementation always respect + the exact value of , we reserve ourselves the right to return + slightly smaller or bigger pages in the future for performance reasons. + - The is specific to a protocol version and drivers should not + send a returned by a node using the protocol v3 to query a node + using the protocol v4 for instance. + + +9. Error codes + + The supported error codes are described below: + 0x0000 Server error: something unexpected happened. This indicates a + server-side bug. + 0x000A Protocol error: some client message triggered a protocol + violation (for instance a QUERY message is sent before a STARTUP + one has been sent) + 0x0100 Bad credentials: CREDENTIALS request failed because Cassandra + did not accept the provided credentials. + + 0x1000 Unavailable exception. The rest of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of node that + should be alive to respect + is an [int] representing the number of replica that + were known to be alive when the request has been + processed (since an unavailable exception has been + triggered, there will be < ) + 0x1001 Overloaded: the request cannot be processed because the + coordinator node is overloaded + 0x1002 Is_bootstrapping: the request was a read request but the + coordinator node is bootstrapping + 0x1003 Truncate_error: error during a truncation error. + 0x1100 Write_timeout: Timeout exception during a write request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + acknowledged the request. + is an [int] representing the number of replica whose + acknowledgement is required to achieve . + is a [string] that describe the type of the write + that timeouted. The value of that string can be one + of: + - "SIMPLE": the write was a non-batched + non-counter write. + - "BATCH": the write was a (logged) batch write. + If this type is received, it means the batch log + has been successfully written (otherwise a + "BATCH_LOG" type would have been send instead). + - "UNLOGGED_BATCH": the write was an unlogged + batch. Not batch log write has been attempted. + - "COUNTER": the write was a counter write + (batched or not). + - "BATCH_LOG": the timeout occured during the + write to the batch log when a (logged) batch + write was requested. + - "CAS": the timeout occured during the Compare And Set write/update. + 0x1200 Read_timeout: Timeout exception during a read request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replica whose + response is required to achieve . Please note that + it is possible to have >= if + is false. And also in the (unlikely) + case were is achieved but the coordinator node + timeout while waiting for read-repair + acknowledgement. + is a single byte. If its value is 0, it means + the replica that was asked for data has not + responded. Otherwise, the value is != 0. + + 0x2000 Syntax_error: The submitted query has a syntax error. + 0x2100 Unauthorized: The logged user doesn't have the right to perform + the query. + 0x2200 Invalid: The query is syntactically correct but invalid. + 0x2300 Config_error: The query is invalid because of some configuration issue + 0x2400 Already_exists: The query attempted to create a keyspace or a + table that was already existing. The rest of the ERROR message + body will be where: + is a [string] representing either the keyspace that + already exists, or the keyspace in which the table that + already exists is. +
is a [string] representing the name of the table that + already exists. If the query was attempting to create a + keyspace,
will be present but will be the empty + string. + 0x2500 Unprepared: Can be thrown while a prepared statement tries to be + executed if the provide prepared statement ID is not known by + this host. The rest of the ERROR message body will be [short + bytes] representing the unknown ID. + +10. Changes from v2 + * stream id is now 2 bytes long (a [short] value), so the header is now 1 byte longer (9 bytes total). + * BATCH messages now have (like QUERY and EXECUTE) and a corresponding optional + parameters (see Section 4.1.7). + * User Defined Types and tuple types have to added to ResultSet metadata (see 4.2.5.2) and a + new section on the serialization format of UDT and tuple values has been added to the documentation + (Section 7). + * The serialization format for collection has changed (both the collection size and + the length of each argument is now 4 bytes long). See Section 6. + * QUERY, EXECUTE and BATCH messages can now optionally provide the default timestamp for the query. + As this feature is optionally enabled by clients, implementing it is at the discretion of the + client. + * QUERY and EXECUTE messages can now optionally provide the names for the values of the + query. As this feature is optionally enabled by clients, implementing it is at the discretion of the + client (Note that while the BATCH message has a flag for this, it actually doesn't work for BATCH, + see Section 4.1.7 for details). + * The format of "Schema_change" results (Section 4.2.5.5) and "SCHEMA_CHANGE" events (Section 4.2.6) + has been modified, and now includes changes related to user types. + diff --git a/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v4.spec b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v4.spec new file mode 100644 index 000000000..567024165 --- /dev/null +++ b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v4.spec @@ -0,0 +1,1191 @@ + + CQL BINARY PROTOCOL v4 + + +Table of Contents + + 1. Overview + 2. Frame header + 2.1. version + 2.2. flags + 2.3. stream + 2.4. opcode + 2.5. length + 3. Notations + 4. Messages + 4.1. Requests + 4.1.1. STARTUP + 4.1.2. AUTH_RESPONSE + 4.1.3. OPTIONS + 4.1.4. QUERY + 4.1.5. PREPARE + 4.1.6. EXECUTE + 4.1.7. BATCH + 4.1.8. REGISTER + 4.2. Responses + 4.2.1. ERROR + 4.2.2. READY + 4.2.3. AUTHENTICATE + 4.2.4. SUPPORTED + 4.2.5. RESULT + 4.2.5.1. Void + 4.2.5.2. Rows + 4.2.5.3. Set_keyspace + 4.2.5.4. Prepared + 4.2.5.5. Schema_change + 4.2.6. EVENT + 4.2.7. AUTH_CHALLENGE + 4.2.8. AUTH_SUCCESS + 5. Compression + 6. Data Type Serialization Formats + 7. User Defined Type Serialization + 8. Result paging + 9. Error codes + 10. Changes from v3 + + +1. Overview + + The CQL binary protocol is a frame based protocol. Frames are defined as: + + 0 8 16 24 32 40 + +---------+---------+---------+---------+---------+ + | version | flags | stream | opcode | + +---------+---------+---------+---------+---------+ + | length | + +---------+---------+---------+---------+ + | | + . ... body ... . + . . + . . + +---------------------------------------- + + The protocol is big-endian (network byte order). + + Each frame contains a fixed size header (9 bytes) followed by a variable size + body. The header is described in Section 2. The content of the body depends + on the header opcode value (the body can in particular be empty for some + opcode values). The list of allowed opcodes is defined in Section 2.4 and the + details of each corresponding message are described Section 4. + + The protocol distinguishes two types of frames: requests and responses. Requests + are those frames sent by the client to the server. Responses are those frames sent + by the server to the client. Note, however, that the protocol supports server pushes + (events) so a response does not necessarily come right after a client request. + + Note to client implementors: client libraries should always assume that the + body of a given frame may contain more data than what is described in this + document. It will however always be safe to ignore the remainder of the frame + body in such cases. The reason is that this may enable extending the protocol + with optional features without needing to change the protocol version. + + + +2. Frame header + +2.1. version + + The version is a single byte that indicates both the direction of the message + (request or response) and the version of the protocol in use. The most + significant bit of version is used to define the direction of the message: + 0 indicates a request, 1 indicates a response. This can be useful for protocol + analyzers to distinguish the nature of the packet from the direction in which + it is moving. The rest of that byte is the protocol version (4 for the protocol + defined in this document). In other words, for this version of the protocol, + version will be one of: + 0x04 Request frame for this protocol version + 0x84 Response frame for this protocol version + + Please note that while every message ships with the version, only one version + of messages is accepted on a given connection. In other words, the first message + exchanged (STARTUP) sets the version for the connection for the lifetime of this + connection. + + This document describes version 4 of the protocol. For the changes made since + version 3, see Section 10. + + +2.2. flags + + Flags applying to this frame. The flags have the following meaning (described + by the mask that allows selecting them): + 0x01: Compression flag. If set, the frame body is compressed. The actual + compression to use should have been set up beforehand through the + Startup message (which thus cannot be compressed; Section 4.1.1). + 0x02: Tracing flag. For a request frame, this indicates the client requires + tracing of the request. Note that only QUERY, PREPARE and EXECUTE queries + support tracing. Other requests will simply ignore the tracing flag if + set. If a request supports tracing and the tracing flag is set, the response + to this request will have the tracing flag set and contain tracing + information. + If a response frame has the tracing flag set, its body contains + a tracing ID. The tracing ID is a [uuid] and is the first thing in + the frame body. The rest of the body will then be the usual body + corresponding to the response opcode. + 0x04: Custom payload flag. For a request or response frame, this indicates + that a generic key-value custom payload for a custom QueryHandler + implementation is present in the frame. Such a custom payload is simply + ignored by the default QueryHandler implementation. + Currently, only QUERY, PREPARE, EXECUTE and BATCH requests support + payload. + Type of custom payload is [bytes map] (see below). + 0x08: Warning flag. The response contains warnings which were generated by the + server to go along with this response. + If a response frame has the warning flag set, its body will contain the + text of the warnings. The warnings are a [string list] and will be the + first value in the frame body if the tracing flag is not set, or directly + after the tracing ID if it is. + + The rest of flags is currently unused and ignored. + +2.3. stream + + A frame has a stream id (a [short] value). When sending request messages, this + stream id must be set by the client to a non-negative value (negative stream id + are reserved for streams initiated by the server; currently all EVENT messages + (section 4.2.6) have a streamId of -1). If a client sends a request message + with the stream id X, it is guaranteed that the stream id of the response to + that message will be X. + + This helps to enable the asynchronous nature of the protocol. If a client + sends multiple messages simultaneously (without waiting for responses), there + is no guarantee on the order of the responses. For instance, if the client + writes REQ_1, REQ_2, REQ_3 on the wire (in that order), the server might + respond to REQ_3 (or REQ_2) first. Assigning different stream ids to these 3 + requests allows the client to distinguish to which request a received answer + responds to. As there can only be 32768 different simultaneous streams, it is up + to the client to reuse stream id. + + Note that clients are free to use the protocol synchronously (i.e. wait for + the response to REQ_N before sending REQ_N+1). In that case, the stream id + can be safely set to 0. Clients should also feel free to use only a subset of + the 32768 maximum possible stream ids if it is simpler for its implementation. + +2.4. opcode + + An integer byte that distinguishes the actual message: + 0x00 ERROR + 0x01 STARTUP + 0x02 READY + 0x03 AUTHENTICATE + 0x05 OPTIONS + 0x06 SUPPORTED + 0x07 QUERY + 0x08 RESULT + 0x09 PREPARE + 0x0A EXECUTE + 0x0B REGISTER + 0x0C EVENT + 0x0D BATCH + 0x0E AUTH_CHALLENGE + 0x0F AUTH_RESPONSE + 0x10 AUTH_SUCCESS + + Messages are described in Section 4. + + (Note that there is no 0x04 message in this version of the protocol) + + +2.5. length + + A 4 byte integer representing the length of the body of the frame (note: + currently a frame is limited to 256MB in length). + + +3. Notations + + To describe the layout of the frame body for the messages in Section 4, we + define the following: + + [int] A 4 bytes integer + [long] A 8 bytes integer + [short] A 2 bytes unsigned integer + [string] A [short] n, followed by n bytes representing an UTF-8 + string. + [long string] An [int] n, followed by n bytes representing an UTF-8 string. + [uuid] A 16 bytes long uuid. + [string list] A [short] n, followed by n [string]. + [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0, + no byte should follow and the value represented is `null`. + [value] A [int] n, followed by n bytes if n >= 0. + If n == -1 no byte should follow and the value represented is `null`. + If n == -2 no byte should follow and the value represented is + `not set` not resulting in any change to the existing value. + n < -2 is an invalid value and results in an error. + [short bytes] A [short] n, followed by n bytes if n >= 0. + + [option] A pair of where is a [short] representing + the option id and depends on that option (and can be + of size 0). The supported id (and the corresponding ) + will be described when this is used. + [option list] A [short] n, followed by n [option]. + [inet] An address (ip and port) to a node. It consists of one + [byte] n, that represents the address size, followed by n + [byte] representing the IP address (in practice n can only be + either 4 (IPv4) or 16 (IPv6)), following by one [int] + representing the port. + [consistency] A consistency level specification. This is a [short] + representing a consistency level with the following + correspondance: + 0x0000 ANY + 0x0001 ONE + 0x0002 TWO + 0x0003 THREE + 0x0004 QUORUM + 0x0005 ALL + 0x0006 LOCAL_QUORUM + 0x0007 EACH_QUORUM + 0x0008 SERIAL + 0x0009 LOCAL_SERIAL + 0x000A LOCAL_ONE + + [string map] A [short] n, followed by n pair where and + are [string]. + [string multimap] A [short] n, followed by n pair where is a + [string] and is a [string list]. + [bytes map] A [short] n, followed by n pair where is a + [string] and is a [bytes]. + + +4. Messages + +4.1. Requests + + Note that outside of their normal responses (described below), all requests + can get an ERROR message (Section 4.2.1) as response. + +4.1.1. STARTUP + + Initialize the connection. The server will respond by either a READY message + (in which case the connection is ready for queries) or an AUTHENTICATE message + (in which case credentials will need to be provided using AUTH_RESPONSE). + + This must be the first message of the connection, except for OPTIONS that can + be sent before to find out the options supported by the server. Once the + connection has been initialized, a client should not send any more STARTUP + messages. + + The body is a [string map] of options. Possible options are: + - "CQL_VERSION": the version of CQL to use. This option is mandatory and + currently the only version supported is "3.0.0". Note that this is + different from the protocol version. + - "COMPRESSION": the compression algorithm to use for frames (See section 5). + This is optional; if not specified no compression will be used. + - "NO_COMPACT": whether or not connection has to be established in compatibility + mode. This mode will make all Thrift and Compact Tables to be exposed as if + they were CQL Tables. This is optional; if not specified, the option will + not be used. + - "THROW_ON_OVERLOAD": In case of server overloaded with too many requests, by default the server puts + back pressure on the client connection. Instead, the server can send an OverloadedException error message back to + the client if this option is set to true. + + +4.1.2. AUTH_RESPONSE + + Answers a server authentication challenge. + + Authentication in the protocol is SASL based. The server sends authentication + challenges (a bytes token) to which the client answers with this message. Those + exchanges continue until the server accepts the authentication by sending a + AUTH_SUCCESS message after a client AUTH_RESPONSE. Note that the exchange + begins with the client sending an initial AUTH_RESPONSE in response to a + server AUTHENTICATE request. + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + The response to a AUTH_RESPONSE is either a follow-up AUTH_CHALLENGE message, + an AUTH_SUCCESS message or an ERROR message. + + +4.1.3. OPTIONS + + Asks the server to return which STARTUP options are supported. The body of an + OPTIONS message should be empty and the server will respond with a SUPPORTED + message. + + +4.1.4. QUERY + + Performs a CQL query. The body of the message must be: + + where is a [long string] representing the query and + must be + [[name_1]...[name_n]][][][][] + where: + - is the [consistency] level for the operation. + - is a [byte] whose bits define the options for this query and + in particular influence what the remainder of the message contains. + A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given their mask: + 0x01: Values. If set, a [short] followed by [value] + values are provided. Those values are used for bound variables in + the query. Optionally, if the 0x40 flag is present, each value + will be preceded by a [string] name, representing the name of + the marker the value must be bound to. + 0x02: Skip_metadata. If set, the Result Set returned as a response + to the query (if any) will have the NO_METADATA flag (see + Section 4.2.5.2). + 0x04: Page_size. If set, is an [int] + controlling the desired page size of the result (in CQL3 rows). + See the section on paging (Section 8) for more details. + 0x08: With_paging_state. If set, should be present. + is a [bytes] value that should have been returned + in a result set (Section 4.2.5.2). The query will be + executed but starting from a given paging state. This is also to + continue paging on a different node than the one where it + started (See Section 8 for more details). + 0x10: With serial consistency. If set, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consitency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else other than a + conditional update/insert. + 0x20: With default timestamp. If set, should be present. + is a [long] representing the default timestamp for the query + in microseconds (negative values are forbidden). This will + replace the server side assigned timestamp as default timestamp. + Note that a timestamp in the query itself will still override + this timestamp. This is entirely optional. + 0x40: With names for values. This only makes sense if the 0x01 flag is set and + is ignored otherwise. If present, the values from the 0x01 flag will + be preceded by a name (see above). Note that this is only useful for + QUERY requests where named bind markers are used; for EXECUTE statements, + since the names for the expected values was returned during preparation, + a client can always provide values in the right order without any names + and using this flag, while supported, is almost surely inefficient. + + Note that the consistency is ignored by some queries (USE, CREATE, ALTER, + TRUNCATE, ...). + + The server will respond to a QUERY message with a RESULT message, the content + of which depends on the query. + + +4.1.5. PREPARE + + Prepare a query for later execution (through EXECUTE). The body consists of + the CQL query to prepare as a [long string]. + + The server will respond with a RESULT message with a `prepared` kind (0x0004, + see Section 4.2.5). + + +4.1.6. EXECUTE + + Executes a prepared query. The body of the message must be: + + where is the prepared query ID. It's the [short bytes] returned as a + response to a PREPARE message. As for , it has the exact + same definition as in QUERY (see Section 4.1.4). + + The response from the server will be a RESULT message. + + +4.1.7. BATCH + + Allows executing a list of queries (prepared or not) as a batch (note that + only DML statements are accepted in a batch). The body of the message must + be: + ...[][] + where: + - is a [byte] indicating the type of batch to use: + - If == 0, the batch will be "logged". This is equivalent to a + normal CQL3 batch statement. + - If == 1, the batch will be "unlogged". + - If == 2, the batch will be a "counter" batch (and non-counter + statements will be rejected). + - is a [byte] whose bits define the options for this query and + in particular influence what the remainder of the message contains. It is similar + to the from QUERY and EXECUTE methods, except that the 4 rightmost + bits must always be 0 as their corresponding options do not make sense for + Batch. A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given their mask: + 0x10: With serial consistency. If set, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consistency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else other than a + conditional update/insert. + 0x20: With default timestamp. If set, should be present. + is a [long] representing the default timestamp for the query + in microseconds. This will replace the server side assigned + timestamp as default timestamp. Note that a timestamp in the query itself + will still override this timestamp. This is entirely optional. + 0x40: With names for values. If set, then all values for all must be + preceded by a [string] that have the same meaning as in QUERY + requests [IMPORTANT NOTE: this feature does not work and should not be + used. It is specified in a way that makes it impossible for the server + to implement. This will be fixed in a future version of the native + protocol. See https://issues.apache.org/jira/browse/CASSANDRA-10246 for + more details]. + - is a [short] indicating the number of following queries. + - ... are the queries to execute. A must be of the + form: + []...[] + where: + - is a [byte] indicating whether the following query is a prepared + one or not. value must be either 0 or 1. + - depends on the value of . If == 0, it should be + a [long string] query string (as in QUERY, the query string might contain + bind markers). Otherwise (that is, if == 1), it should be a + [short bytes] representing a prepared query ID. + - is a [short] indicating the number (possibly 0) of following values. + - is the optional name of the following . It must be present + if and only if the 0x40 flag is provided for the batch. + - is the [value] to use for bound variable i (of bound variable + if the 0x40 flag is used). + - is the [consistency] level for the operation. + - is only present if the 0x10 flag is set. In that case, + is the [consistency] level for the serial phase of + conditional updates. That consitency can only be either SERIAL or + LOCAL_SERIAL and if not present will defaults to SERIAL. This option will + be ignored for anything else other than a conditional update/insert. + + The server will respond with a RESULT message. + + +4.1.8. REGISTER + + Register this connection to receive some types of events. The body of the + message is a [string list] representing the event types to register for. See + section 4.2.6 for the list of valid event types. + + The response to a REGISTER message will be a READY message. + + Please note that if a client driver maintains multiple connections to a + Cassandra node and/or connections to multiple nodes, it is advised to + dedicate a handful of connections to receive events, but to *not* register + for events on all connections, as this would only result in receiving + multiple times the same event messages, wasting bandwidth. + + +4.2. Responses + + This section describes the content of the frame body for the different + responses. Please note that to make room for future evolution, clients should + support extra informations (that they should simply discard) to the one + described in this document at the end of the frame body. + +4.2.1. ERROR + + Indicates an error processing a request. The body of the message will be an + error code ([int]) followed by a [string] error message. Then, depending on + the exception, more content may follow. The error codes are defined in + Section 9, along with their additional content if any. + + +4.2.2. READY + + Indicates that the server is ready to process queries. This message will be + sent by the server either after a STARTUP message if no authentication is + required (if authentication is required, the server indicates readiness by + sending a AUTH_RESPONSE message). + + The body of a READY message is empty. + + +4.2.3. AUTHENTICATE + + Indicates that the server requires authentication, and which authentication + mechanism to use. + + The authentication is SASL based and thus consists of a number of server + challenges (AUTH_CHALLENGE, Section 4.2.7) followed by client responses + (AUTH_RESPONSE, Section 4.1.2). The initial exchange is however boostrapped + by an initial client response. The details of that exchange (including how + many challenge-response pairs are required) are specific to the authenticator + in use. The exchange ends when the server sends an AUTH_SUCCESS message or + an ERROR message. + + This message will be sent following a STARTUP message if authentication is + required and must be answered by a AUTH_RESPONSE message from the client. + + The body consists of a single [string] indicating the full class name of the + IAuthenticator in use. + + +4.2.4. SUPPORTED + + Indicates which startup options are supported by the server. This message + comes as a response to an OPTIONS message. + + The body of a SUPPORTED message is a [string multimap]. This multimap gives + for each of the supported STARTUP options, the list of supported values. + + +4.2.5. RESULT + + The result to a query (QUERY, PREPARE, EXECUTE or BATCH messages). + + The first element of the body of a RESULT message is an [int] representing the + `kind` of result. The rest of the body depends on the kind. The kind can be + one of: + 0x0001 Void: for results carrying no information. + 0x0002 Rows: for results to select queries, returning a set of rows. + 0x0003 Set_keyspace: the result to a `use` query. + 0x0004 Prepared: result to a PREPARE message. + 0x0005 Schema_change: the result to a schema altering query. + + The body for each kind (after the [int] kind) is defined below. + + +4.2.5.1. Void + + The rest of the body for a Void result is empty. It indicates that a query was + successful without providing more information. + + +4.2.5.2. Rows + + Indicates a set of rows. The rest of the body of a Rows result is: + + where: + - is composed of: + [][?...] + where: + - is an [int]. The bits of provides information on the + formatting of the remaining information. A flag is set if the bit + corresponding to its `mask` is set. Supported flags are, given their + mask: + 0x0001 Global_tables_spec: if set, only one table spec (keyspace + and table name) is provided as . If not + set, is not present. + 0x0002 Has_more_pages: indicates whether this is not the last + page of results and more should be retrieved. If set, the + will be present. The is a + [bytes] value that should be used in QUERY/EXECUTE to + continue paging and retrieve the remainder of the result for + this query (See Section 8 for more details). + 0x0004 No_metadata: if set, the is only composed of + these , the and optionally the + (depending on the Has_more_pages flag) but + no other information (so no nor ). + This will only ever be the case if this was requested + during the query (see QUERY and RESULT messages). + - is an [int] representing the number of columns selected + by the query that produced this result. It defines the number of + elements in and the number of elements for each row in . + - is present if the Global_tables_spec is set in + . It is composed of two [string] representing the + (unique) keyspace name and table name the columns belong to. + - specifies the columns returned in the query. There are + such column specifications that are composed of: + ()? + The initial and are two [string] and are only present + if the Global_tables_spec flag is not set. The is a + [string] and is an [option] that corresponds to the description + (what this description is depends a bit on the context: in results to + selects, this will be either the user chosen alias or the selection used + (often a colum name, but it can be a function call too). In results to + a PREPARE, this will be either the name of the corresponding bind variable + or the column name for the variable if it is "anonymous") and type of + the corresponding result. The option for is either a native + type (see below), in which case the option has no value, or a + 'custom' type, in which case the value is a [string] representing + the fully qualified class name of the type represented. Valid option + ids are: + 0x0000 Custom: the value is a [string], see above. + 0x0001 Ascii + 0x0002 Bigint + 0x0003 Blob + 0x0004 Boolean + 0x0005 Counter + 0x0006 Decimal + 0x0007 Double + 0x0008 Float + 0x0009 Int + 0x000B Timestamp + 0x000C Uuid + 0x000D Varchar + 0x000E Varint + 0x000F Timeuuid + 0x0010 Inet + 0x0011 Date + 0x0012 Time + 0x0013 Smallint + 0x0014 Tinyint + 0x0020 List: the value is an [option], representing the type + of the elements of the list. + 0x0021 Map: the value is two [option], representing the types of the + keys and values of the map + 0x0022 Set: the value is an [option], representing the type + of the elements of the set + 0x0030 UDT: the value is ... + where: + - is a [string] representing the keyspace name this + UDT is part of. + - is a [string] representing the UDT name. + - is a [short] representing the number of fields of + the UDT, and thus the number of pairs + following + - is a [string] representing the name of the + i_th field of the UDT. + - is an [option] representing the type of the + i_th field of the UDT. + 0x0031 Tuple: the value is ... where is a [short] + representing the number of values in the type, and + are [option] representing the type of the i_th component + of the tuple + + - is an [int] representing the number of rows present in this + result. Those rows are serialized in the part. + - is composed of ... where m is . + Each is composed of ... where n is + and where is a [bytes] representing the value + returned for the jth column of the ith row. In other words, + is composed of ( * ) [bytes]. + + +4.2.5.3. Set_keyspace + + The result to a `use` query. The body (after the kind [int]) is a single + [string] indicating the name of the keyspace that has been set. + + +4.2.5.4. Prepared + + The result to a PREPARE message. The body of a Prepared result is: + + where: + - is [short bytes] representing the prepared query ID. + - is composed of: + [...][?...] + where: + - is an [int]. The bits of provides information on the + formatting of the remaining information. A flag is set if the bit + corresponding to its `mask` is set. Supported masks and their flags + are: + 0x0001 Global_tables_spec: if set, only one table spec (keyspace + and table name) is provided as . If not + set, is not present. + - is an [int] representing the number of bind markers + in the prepared statement. It defines the number of + elements. + - is an [int] representing the number of + elements to follow. If this value is zero, at least one of the + partition key columns in the table that the statement acts on + did not have a corresponding bind marker (or the bind marker + was wrapped in a function call). + - is a short that represents the index of the bind marker + that corresponds to the partition key column in position i. + For example, a sequence of [2, 0, 1] indicates that the + table has three partition key columns; the full partition key + can be constructed by creating a composite of the values for + the bind markers at index 2, at index 0, and at index 1. + This allows implementations with token-aware routing to correctly + construct the partition key without needing to inspect table + metadata. + - is present if the Global_tables_spec is set in + . If present, it is composed of two [string]s. The first + [string] is the name of the keyspace that the statement acts on. + The second [string] is the name of the table that the columns + represented by the bind markers belong to. + - specifies the bind markers in the prepared statement. + There are such column specifications, each with the + following format: + ()? + The initial and are two [string] that are only + present if the Global_tables_spec flag is not set. The field + is a [string] that holds the name of the bind marker (if named), + or the name of the column, field, or expression that the bind marker + corresponds to (if the bind marker is "anonymous"). The + field is an [option] that represents the expected type of values for + the bind marker. See the Rows documentation (section 4.2.5.2) for + full details on the field. + + - is defined exactly the same as in the Rows + documentation (section 4.2.5.2). This describes the metadata for the + result set that will be returned when this prepared statement is executed. + Note that may be empty (have the No_metadata flag and + 0 columns, See section 4.2.5.2) and will be for any query that is not a + Select. In fact, there is never a guarantee that this will be non-empty, so + implementations should protect themselves accordingly. This result metadata + is an optimization that allows implementations to later execute the + prepared statement without requesting the metadata (see the Skip_metadata + flag in EXECUTE). Clients can safely discard this metadata if they do not + want to take advantage of that optimization. + + Note that the prepared query ID returned is global to the node on which the query + has been prepared. It can be used on any connection to that node + until the node is restarted (after which the query must be reprepared). + +4.2.5.5. Schema_change + + The result to a schema altering query (creation/update/drop of a + keyspace/table/index). The body (after the kind [int]) is the same + as the body for a "SCHEMA_CHANGE" event, so 3 strings: + + Please refer to section 4.2.6 below for the meaning of those fields. + + Note that a query to create or drop an index is considered to be a change + to the table the index is on. + + +4.2.6. EVENT + + An event pushed by the server. A client will only receive events for the + types it has REGISTERed to. The body of an EVENT message will start with a + [string] representing the event type. The rest of the message depends on the + event type. The valid event types are: + - "TOPOLOGY_CHANGE": events related to change in the cluster topology. + Currently, events are sent when new nodes are added to the cluster, and + when nodes are removed. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of change ("NEW_NODE" or "REMOVED_NODE") followed by the address of + the new/removed node. + - "STATUS_CHANGE": events related to change of node status. Currently, + up/down events are sent. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of status change ("UP" or "DOWN") followed by the address of the + concerned node. + - "SCHEMA_CHANGE": events related to schema change. After the event type, + the rest of the message will be where: + - is a [string] representing the type of changed involved. + It will be one of "CREATED", "UPDATED" or "DROPPED". + - is a [string] that can be one of "KEYSPACE", "TABLE", "TYPE", + "FUNCTION" or "AGGREGATE" and describes what has been modified + ("TYPE" stands for modifications related to user types, "FUNCTION" + for modifications related to user defined functions, "AGGREGATE" + for modifications related to user defined aggregates). + - depends on the preceding : + - If is "KEYSPACE", then will be a single [string] + representing the keyspace changed. + - If is "TABLE" or "TYPE", then + will be 2 [string]: the first one will be the keyspace + containing the affected object, and the second one will be the name + of said affected object (either the table, user type, function, or + aggregate name). + - If is "FUNCTION" or "AGGREGATE", multiple arguments follow: + - [string] keyspace containing the user defined function / aggregate + - [string] the function/aggregate name + - [string list] one string for each argument type (as CQL type) + + All EVENT messages have a streamId of -1 (Section 2.3). + + Please note that "NEW_NODE" and "UP" events are sent based on internal Gossip + communication and as such may be sent a short delay before the binary + protocol server on the newly up node is fully started. Clients are thus + advised to wait a short time before trying to connect to the node (1 second + should be enough), otherwise they may experience a connection refusal at + first. + +4.2.7. AUTH_CHALLENGE + + A server authentication challenge (see AUTH_RESPONSE (Section 4.1.2) for more + details). + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + Clients are expected to answer the server challenge with an AUTH_RESPONSE + message. + +4.2.8. AUTH_SUCCESS + + Indicates the success of the authentication phase. See Section 4.2.3 for more + details. + + The body of this message is a single [bytes] token holding final information + from the server that the client may require to finish the authentication + process. What that token contains and whether it can be null depends on the + actual authenticator used. + + +5. Compression + + Frame compression is supported by the protocol, but then only the frame body + is compressed (the frame header should never be compressed). + + Before being used, client and server must agree on a compression algorithm to + use, which is done in the STARTUP message. As a consequence, a STARTUP message + must never be compressed. However, once the STARTUP frame has been received + by the server, messages can be compressed (including the response to the STARTUP + request). Frames do not have to be compressed, however, even if compression has + been agreed upon (a server may only compress frames above a certain size at its + discretion). A frame body should be compressed if and only if the compressed + flag (see Section 2.2) is set. + + As of version 2 of the protocol, the following compressions are available: + - lz4 (https://code.google.com/p/lz4/). In that, note that the first four bytes + of the body will be the uncompressed length (followed by the compressed + bytes). + - snappy (https://code.google.com/p/snappy/). This compression might not be + available as it depends on a native lib (server-side) that might not be + avaivable on some installations. + + +6. Data Type Serialization Formats + + This sections describes the serialization formats for all CQL data types + supported by Cassandra through the native protocol. These serialization + formats should be used by client drivers to encode values for EXECUTE + messages. Cassandra will use these formats when returning values in + RESULT messages. + + All values are represented as [bytes] in EXECUTE and RESULT messages. + The [bytes] format includes an int prefix denoting the length of the value. + For that reason, the serialization formats described here will not include + a length component. + + For legacy compatibility reasons, note that most non-string types support + "empty" values (i.e. a value with zero length). An empty value is distinct + from NULL, which is encoded with a negative length. + + As with the rest of the native protocol, all encodings are big-endian. + +6.1. ascii + + A sequence of bytes in the ASCII range [0, 127]. Bytes with values outside of + this range will result in a validation error. + +6.2 bigint + + An eight-byte two's complement integer. + +6.3 blob + + Any sequence of bytes. + +6.4 boolean + + A single byte. A value of 0 denotes "false"; any other value denotes "true". + (However, it is recommended that a value of 1 be used to represent "true".) + +6.5 date + + An unsigned integer representing days with epoch centered at 2^31. + (unix epoch January 1st, 1970). + A few examples: + 0: -5877641-06-23 + 2^31: 1970-1-1 + 2^32: 5881580-07-11 + +6.6 decimal + + The decimal format represents an arbitrary-precision number. It contains an + [int] "scale" component followed by a varint encoding (see section 6.17) + of the unscaled value. The encoded value represents "E<-scale>". + In other words, " * 10 ^ (-1 * )". + +6.7 double + + An 8 byte floating point number in the IEEE 754 binary64 format. + +6.8 float + + A 4 byte floating point number in the IEEE 754 binary32 format. + +6.9 inet + + A 4 byte or 16 byte sequence denoting an IPv4 or IPv6 address, respectively. + +6.10 int + + A 4 byte two's complement integer. + +6.11 list + + A [int] n indicating the number of elements in the list, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.12 map + + A [int] n indicating the number of key/value pairs in the map, followed by + n entries. Each entry is composed of two [bytes] representing the key + and value. + +6.13 set + + A [int] n indicating the number of elements in the set, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.14 smallint + + A 2 byte two's complement integer. + +6.15 text + + A sequence of bytes conforming to the UTF-8 specifications. + +6.16 time + + An 8 byte two's complement long representing nanoseconds since midnight. + Valid values are in the range 0 to 86399999999999 + +6.17 timestamp + + An 8 byte two's complement integer representing a millisecond-precision + offset from the unix epoch (00:00:00, January 1st, 1970). Negative values + represent a negative offset from the epoch. + +6.18 timeuuid + + A 16 byte sequence representing a version 1 UUID as defined by RFC 4122. + +6.19 tinyint + + A 1 byte two's complement integer. + +6.20 tuple + + A sequence of [bytes] values representing the items in a tuple. The encoding + of each element depends on the data type for that position in the tuple. + Null values may be represented by using length -1 for the [bytes] + representation of an element. + +6.21 uuid + + A 16 byte sequence representing any valid UUID as defined by RFC 4122. + +6.22 varchar + + An alias of the "text" type. + +6.23 varint + + A variable-length two's complement encoding of a signed integer. + + The following examples may help implementors of this spec: + + Value | Encoding + ------|--------- + 0 | 0x00 + 1 | 0x01 + 127 | 0x7F + 128 | 0x0080 + 129 | 0x0081 + -1 | 0xFF + -128 | 0x80 + -129 | 0xFF7F + + Note that positive numbers must use a most-significant byte with a value + less than 0x80, because a most-significant bit of 1 indicates a negative + value. Implementors should pad positive values that have a MSB >= 0x80 + with a leading 0x00 byte. + + +7. User Defined Types + + This section describes the serialization format for User defined types (UDT), + as described in section 4.2.5.2. + + A UDT value is composed of successive [bytes] values, one for each field of the UDT + value (in the order defined by the type). A UDT value will generally have one value + for each field of the type it represents, but it is allowed to have less values than + the type has fields. + + +8. Result paging + + The protocol allows for paging the result of queries. For that, the QUERY and + EXECUTE messages have a value that indicate the desired + page size in CQL3 rows. + + If a positive value is provided for , the result set of the + RESULT message returned for the query will contain at most the + first rows of the query result. If that first page of results + contains the full result set for the query, the RESULT message (of kind `Rows`) + will have the Has_more_pages flag *not* set. However, if some results are not + part of the first response, the Has_more_pages flag will be set and the result + will contain a value. In that case, the value + should be used in a QUERY or EXECUTE message (that has the *same* query as + the original one or the behavior is undefined) to retrieve the next page of + results. + + Only CQL3 queries that return a result set (RESULT message with a Rows `kind`) + support paging. For other type of queries, the value is + ignored. + + Note to client implementors: + - While can be as low as 1, it will likely be detrimental + to performance to pick a value too low. A value below 100 is probably too + low for most use cases. + - Clients should not rely on the actual size of the result set returned to + decide if there are more results to fetch or not. Instead, they should always + check the Has_more_pages flag (unless they did not enable paging for the query + obviously). Clients should also not assert that no result will have more than + results. While the current implementation always respects + the exact value of , we reserve the right to return + slightly smaller or bigger pages in the future for performance reasons. + - The is specific to a protocol version and drivers should not + send a returned by a node using the protocol v3 to query a node + using the protocol v4 for instance. + + +9. Error codes + + Let us recall that an ERROR message is composed of [...] + (see 4.2.1 for details). The supported error codes, as well as any additional + information the message may contain after the are described below: + 0x0000 Server error: something unexpected happened. This indicates a + server-side bug. + 0x000A Protocol error: some client message triggered a protocol + violation (for instance a QUERY message is sent before a STARTUP + one has been sent) + 0x0100 Authentication error: authentication was required and failed. The + possible reason for failing depends on the authenticator in use, + which may or may not include more detail in the accompanying + error message. + 0x1000 Unavailable exception. The rest of the ERROR message body will be + + where: + is the [consistency] level of the query that triggered + the exception. + is an [int] representing the number of nodes that + should be alive to respect + is an [int] representing the number of replicas that + were known to be alive when the request had been + processed (since an unavailable exception has been + triggered, there will be < ) + 0x1001 Overloaded: the request cannot be processed because the + coordinator node is overloaded + 0x1002 Is_bootstrapping: the request was a read request but the + coordinator node is bootstrapping + 0x1003 Truncate_error: error during a truncation error. + 0x1100 Write_timeout: Timeout exception during a write request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + acknowledged the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is a [string] that describe the type of the write + that timed out. The value of that string can be one + of: + - "SIMPLE": the write was a non-batched + non-counter write. + - "BATCH": the write was a (logged) batch write. + If this type is received, it means the batch log + has been successfully written (otherwise a + "BATCH_LOG" type would have been sent instead). + - "UNLOGGED_BATCH": the write was an unlogged + batch. No batch log write has been attempted. + - "COUNTER": the write was a counter write + (batched or not). + - "BATCH_LOG": the timeout occurred during the + write to the batch log when a (logged) batch + write was requested. + - "CAS": the timeout occured during the Compare And Set write/update. + - "VIEW": the timeout occured when a write involves + VIEW update and failure to acqiure local view(MV) + lock for key within timeout + - "CDC": the timeout occured when cdc_total_space_in_mb is + exceeded when doing a write to data tracked by cdc. + 0x1200 Read_timeout: Timeout exception during a read request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + response is required to achieve . Please note that + it is possible to have >= if + is false. Also in the (unlikely) + case where is achieved but the coordinator node + times out while waiting for read-repair acknowledgement. + is a single byte. If its value is 0, it means + the replica that was asked for data has not + responded. Otherwise, the value is != 0. + 0x1300 Read_failure: A non-timeout exception during a read request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is an [int] representing the number of nodes that + experience a failure while executing the request. + is a single byte. If its value is 0, it means + the replica that was asked for data had not + responded. Otherwise, the value is != 0. + 0x1400 Function_failure: A (user defined) function failed during execution. + The rest of the ERROR message body will be + + where: + is the keyspace [string] of the failed function + is the name [string] of the failed function + [string list] one string for each argument type (as CQL type) of the failed function + 0x1500 Write_failure: A non-timeout exception during a write request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is an [int] representing the number of nodes that + experience a failure while executing the request. + is a [string] that describes the type of the write + that failed. The value of that string can be one + of: + - "SIMPLE": the write was a non-batched + non-counter write. + - "BATCH": the write was a (logged) batch write. + If this type is received, it means the batch log + has been successfully written (otherwise a + "BATCH_LOG" type would have been sent instead). + - "UNLOGGED_BATCH": the write was an unlogged + batch. No batch log write has been attempted. + - "COUNTER": the write was a counter write + (batched or not). + - "BATCH_LOG": the failure occured during the + write to the batch log when a (logged) batch + write was requested. + - "CAS": the failure occured during the Compare And Set write/update. + - "VIEW": the failure occured when a write involves + VIEW update and failure to acqiure local view(MV) + lock for key within timeout + - "CDC": the failure occured when cdc_total_space_in_mb is + exceeded when doing a write to data tracked by cdc. + + 0x2000 Syntax_error: The submitted query has a syntax error. + 0x2100 Unauthorized: The logged user doesn't have the right to perform + the query. + 0x2200 Invalid: The query is syntactically correct but invalid. + 0x2300 Config_error: The query is invalid because of some configuration issue + 0x2400 Already_exists: The query attempted to create a keyspace or a + table that was already existing. The rest of the ERROR message + body will be
where: + is a [string] representing either the keyspace that + already exists, or the keyspace in which the table that + already exists is. +
is a [string] representing the name of the table that + already exists. If the query was attempting to create a + keyspace,
will be present but will be the empty + string. + 0x2500 Unprepared: Can be thrown while a prepared statement tries to be + executed if the provided prepared statement ID is not known by + this host. The rest of the ERROR message body will be [short + bytes] representing the unknown ID. + +10. Changes from v3 + + * Prepared responses (Section 4.2.5.4) now include partition-key bind indexes + * The format of "SCHEMA_CHANGE" events (Section 4.2.6) (and implicitly + "Schema_change" results (Section 4.2.5.5)) has been modified, and now includes + changes related to user defined functions and user defined aggregates. + * Read_failure error code was added. + * Function_failure error code was added. + * Add custom payload to frames for custom QueryHandler implementations (ignored by + Cassandra's standard QueryHandler) + * Add warnings to frames for responses for which the server generated a warning + during processing, which the client needs to address. + * Add the date and time data types + * Add the tinyint and smallint data types + * The returned in the v4 protocol is not compatible with the v3 + protocol. In other words, a returned by a node using protocol v4 + should not be used to query a node using protocol v3 (and vice-versa). + * Added THROW_ON_OVERLOAD startup option (Section 4.1.1). diff --git a/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v5.spec b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v5.spec new file mode 100644 index 000000000..d27945373 --- /dev/null +++ b/site-content/source/modules/ROOT/examples/TEXT/native_protocol_v5.spec @@ -0,0 +1,1281 @@ + + CQL BINARY PROTOCOL v5 + + +Table of Contents + + 1. Overview + 2. Frame header + 2.1. version + 2.2. flags + 2.3. stream + 2.4. opcode + 2.5. length + 3. Notations + 4. Messages + 4.1. Requests + 4.1.1. STARTUP + 4.1.2. AUTH_RESPONSE + 4.1.3. OPTIONS + 4.1.4. QUERY + 4.1.5. PREPARE + 4.1.6. EXECUTE + 4.1.7. BATCH + 4.1.8. REGISTER + 4.2. Responses + 4.2.1. ERROR + 4.2.2. READY + 4.2.3. AUTHENTICATE + 4.2.4. SUPPORTED + 4.2.5. RESULT + 4.2.5.1. Void + 4.2.5.2. Rows + 4.2.5.3. Set_keyspace + 4.2.5.4. Prepared + 4.2.5.5. Schema_change + 4.2.6. EVENT + 4.2.7. AUTH_CHALLENGE + 4.2.8. AUTH_SUCCESS + 5. Compression + 6. Data Type Serialization Formats + 7. User Defined Type Serialization + 8. Result paging + 9. Error codes + 10. Changes from v4 + + +1. Overview + + The CQL binary protocol is a frame based protocol. Frames are defined as: + + 0 8 16 24 32 40 + +---------+---------+---------+---------+---------+ + | version | flags | stream | opcode | + +---------+---------+---------+---------+---------+ + | length | + +---------+---------+---------+---------+ + | | + . ... body ... . + . . + . . + +---------------------------------------- + + The protocol is big-endian (network byte order). + + Each frame contains a fixed size header (9 bytes) followed by a variable size + body. The header is described in Section 2. The content of the body depends + on the header opcode value (the body can in particular be empty for some + opcode values). The list of allowed opcodes is defined in Section 2.4 and the + details of each corresponding message are described Section 4. + + The protocol distinguishes two types of frames: requests and responses. Requests + are those frames sent by the client to the server. Responses are those frames sent + by the server to the client. Note, however, that the protocol supports server pushes + (events) so a response does not necessarily come right after a client request. + + Note to client implementors: client libraries should always assume that the + body of a given frame may contain more data than what is described in this + document. It will however always be safe to ignore the remainder of the frame + body in such cases. The reason is that this may enable extending the protocol + with optional features without needing to change the protocol version. + + + +2. Frame header + +2.1. version + + The version is a single byte that indicates both the direction of the message + (request or response) and the version of the protocol in use. The most + significant bit of version is used to define the direction of the message: + 0 indicates a request, 1 indicates a response. This can be useful for protocol + analyzers to distinguish the nature of the packet from the direction in which + it is moving. The rest of that byte is the protocol version (5 for the protocol + defined in this document). In other words, for this version of the protocol, + version will be one of: + 0x05 Request frame for this protocol version + 0x85 Response frame for this protocol version + + Please note that while every message ships with the version, only one version + of messages is accepted on a given connection. In other words, the first message + exchanged (STARTUP) sets the version for the connection for the lifetime of this + connection. The single exception to this behavior is when a startup message + is sent with a version that is higher than the current server version. In this + case, the server will respond with its current version. + + This document describes version 5 of the protocol. For the changes made since + version 4, see Section 10. + + +2.2. flags + + Flags applying to this frame. The flags have the following meaning (described + by the mask that allows selecting them): + 0x01: Compression flag. If set, the frame body is compressed. The actual + compression to use should have been set up beforehand through the + Startup message (which thus cannot be compressed; Section 4.1.1). + 0x02: Tracing flag. For a request frame, this indicates the client requires + tracing of the request. Note that only QUERY, PREPARE and EXECUTE queries + support tracing. Other requests will simply ignore the tracing flag if + set. If a request supports tracing and the tracing flag is set, the response + to this request will have the tracing flag set and contain tracing + information. + If a response frame has the tracing flag set, its body contains + a tracing ID. The tracing ID is a [uuid] and is the first thing in + the frame body. The rest of the body will then be the usual body + corresponding to the response opcode. + 0x04: Custom payload flag. For a request or response frame, this indicates + that a generic key-value custom payload for a custom QueryHandler + implementation is present in the frame. Such a custom payload is simply + ignored by the default QueryHandler implementation. + Currently, only QUERY, PREPARE, EXECUTE and BATCH requests support + payload. + Type of custom payload is [bytes map] (see below). + 0x08: Warning flag. The response contains warnings which were generated by the + server to go along with this response. + If a response frame has the warning flag set, its body will contain the + text of the warnings. The warnings are a [string list] and will be the + first value in the frame body if the tracing flag is not set, or directly + after the tracing ID if it is. + 0x10: Use beta flag. Indicates that the client opts in to use protocol version + that is currently in beta. Server will respond with ERROR if protocol + version is marked as beta on server and client does not provide this flag. + + The rest of flags is currently unused and ignored. + +2.3. stream + + A frame has a stream id (a [short] value). When sending request messages, this + stream id must be set by the client to a non-negative value (negative stream id + are reserved for streams initiated by the server; currently all EVENT messages + (section 4.2.6) have a streamId of -1). If a client sends a request message + with the stream id X, it is guaranteed that the stream id of the response to + that message will be X. + + This helps to enable the asynchronous nature of the protocol. If a client + sends multiple messages simultaneously (without waiting for responses), there + is no guarantee on the order of the responses. For instance, if the client + writes REQ_1, REQ_2, REQ_3 on the wire (in that order), the server might + respond to REQ_3 (or REQ_2) first. Assigning different stream ids to these 3 + requests allows the client to distinguish to which request a received answer + responds to. As there can only be 32768 different simultaneous streams, it is up + to the client to reuse stream id. + + Note that clients are free to use the protocol synchronously (i.e. wait for + the response to REQ_N before sending REQ_N+1). In that case, the stream id + can be safely set to 0. Clients should also feel free to use only a subset of + the 32768 maximum possible stream ids if it is simpler for its implementation. + +2.4. opcode + + An integer byte that distinguishes the actual message: + 0x00 ERROR + 0x01 STARTUP + 0x02 READY + 0x03 AUTHENTICATE + 0x05 OPTIONS + 0x06 SUPPORTED + 0x07 QUERY + 0x08 RESULT + 0x09 PREPARE + 0x0A EXECUTE + 0x0B REGISTER + 0x0C EVENT + 0x0D BATCH + 0x0E AUTH_CHALLENGE + 0x0F AUTH_RESPONSE + 0x10 AUTH_SUCCESS + + Messages are described in Section 4. + + (Note that there is no 0x04 message in this version of the protocol) + + +2.5. length + + A 4 byte integer representing the length of the body of the frame (note: + currently a frame is limited to 256MB in length). + + +3. Notations + + To describe the layout of the frame body for the messages in Section 4, we + define the following: + + [int] A 4 bytes integer + [long] A 8 bytes integer + [byte] A 1 byte unsigned integer + [short] A 2 bytes unsigned integer + [string] A [short] n, followed by n bytes representing an UTF-8 + string. + [long string] An [int] n, followed by n bytes representing an UTF-8 string. + [uuid] A 16 bytes long uuid. + [string list] A [short] n, followed by n [string]. + [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0, + no byte should follow and the value represented is `null`. + [value] A [int] n, followed by n bytes if n >= 0. + If n == -1 no byte should follow and the value represented is `null`. + If n == -2 no byte should follow and the value represented is + `not set` not resulting in any change to the existing value. + n < -2 is an invalid value and results in an error. + [short bytes] A [short] n, followed by n bytes if n >= 0. + + [unsigned vint] An unsigned variable length integer. A vint is encoded with the most significant byte (MSB) first. + The most significant byte will contains the information about how many extra bytes need to be read + as well as the most significant bits of the integer. + The number of extra bytes to read is encoded as 1 bits on the left side. + For example, if we need to read 2 more bytes the first byte will start with 110 + (e.g. 256 000 will be encoded on 3 bytes as [110]00011 11101000 00000000) + If the encoded integer is 8 bytes long the vint will be encoded on 9 bytes and the first + byte will be: 11111111 + + [vint] A signed variable length integer. This is encoded using zig-zag encoding and then sent + like an [unsigned vint]. Zig-zag encoding converts numbers as follows: + 0 = 0, -1 = 1, 1 = 2, -2 = 3, 2 = 4, -3 = 5, 3 = 6 and so forth. + The purpose is to send small negative values as small unsigned values, so that we save bytes on the wire. + To encode a value n use "(n >> 31) ^ (n << 1)" for 32 bit values, and "(n >> 63) ^ (n << 1)" + for 64 bit values where "^" is the xor operation, "<<" is the left shift operation and ">>" is + the arithemtic right shift operation (highest-order bit is replicated). + Decode with "(n >> 1) ^ -(n & 1)". + + [option] A pair of where is a [short] representing + the option id and depends on that option (and can be + of size 0). The supported id (and the corresponding ) + will be described when this is used. + [option list] A [short] n, followed by n [option]. + [inet] An address (ip and port) to a node. It consists of one + [byte] n, that represents the address size, followed by n + [byte] representing the IP address (in practice n can only be + either 4 (IPv4) or 16 (IPv6)), following by one [int] + representing the port. + [inetaddr] An IP address (without a port) to a node. It consists of one + [byte] n, that represents the address size, followed by n + [byte] representing the IP address. + [consistency] A consistency level specification. This is a [short] + representing a consistency level with the following + correspondance: + 0x0000 ANY + 0x0001 ONE + 0x0002 TWO + 0x0003 THREE + 0x0004 QUORUM + 0x0005 ALL + 0x0006 LOCAL_QUORUM + 0x0007 EACH_QUORUM + 0x0008 SERIAL + 0x0009 LOCAL_SERIAL + 0x000A LOCAL_ONE + + [string map] A [short] n, followed by n pair where and + are [string]. + [string multimap] A [short] n, followed by n pair where is a + [string] and is a [string list]. + [bytes map] A [short] n, followed by n pair where is a + [string] and is a [bytes]. + + +4. Messages + +4.1. Requests + + Note that outside of their normal responses (described below), all requests + can get an ERROR message (Section 4.2.1) as response. + +4.1.1. STARTUP + + Initialize the connection. The server will respond by either a READY message + (in which case the connection is ready for queries) or an AUTHENTICATE message + (in which case credentials will need to be provided using AUTH_RESPONSE). + + This must be the first message of the connection, except for OPTIONS that can + be sent before to find out the options supported by the server. Once the + connection has been initialized, a client should not send any more STARTUP + messages. + + The body is a [string map] of options. Possible options are: + - "CQL_VERSION": the version of CQL to use. This option is mandatory and + currently the only version supported is "3.0.0". Note that this is + different from the protocol version. + - "COMPRESSION": the compression algorithm to use for frames (See section 5). + This is optional; if not specified no compression will be used. + + +4.1.2. AUTH_RESPONSE + + Answers a server authentication challenge. + + Authentication in the protocol is SASL based. The server sends authentication + challenges (a bytes token) to which the client answers with this message. Those + exchanges continue until the server accepts the authentication by sending a + AUTH_SUCCESS message after a client AUTH_RESPONSE. Note that the exchange + begins with the client sending an initial AUTH_RESPONSE in response to a + server AUTHENTICATE request. + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + The response to a AUTH_RESPONSE is either a follow-up AUTH_CHALLENGE message, + an AUTH_SUCCESS message or an ERROR message. + + +4.1.3. OPTIONS + + Asks the server to return which STARTUP options are supported. The body of an + OPTIONS message should be empty and the server will respond with a SUPPORTED + message. + + +4.1.4. QUERY + + Performs a CQL query. The body of the message must be: + + where is a [long string] representing the query and + must be + [[name_1]...[name_n]][][][][][][] + where: + - is the [consistency] level for the operation. + - is a [int] whose bits define the options for this query and + in particular influence what the remainder of the message contains. + A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given their mask: + 0x0001: Values. If set, a [short] followed by [value] + values are provided. Those values are used for bound variables in + the query. Optionally, if the 0x40 flag is present, each value + will be preceded by a [string] name, representing the name of + the marker the value must be bound to. + 0x0002: Skip_metadata. If set, the Result Set returned as a response + to the query (if any) will have the NO_METADATA flag (see + Section 4.2.5.2). + 0x0004: Page_size. If set, is an [int] + controlling the desired page size of the result (in CQL3 rows). + See the section on paging (Section 8) for more details. + 0x0008: With_paging_state. If set, should be present. + is a [bytes] value that should have been returned + in a result set (Section 4.2.5.2). The query will be + executed but starting from a given paging state. This is also to + continue paging on a different node than the one where it + started (See Section 8 for more details). + 0x0010: With serial consistency. If set, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consitency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else other than a + conditional update/insert. + 0x0020: With default timestamp. If set, must be present. + is a [long] representing the default timestamp for the query + in microseconds (negative values are forbidden). This will + replace the server side assigned timestamp as default timestamp. + Note that a timestamp in the query itself will still override + this timestamp. This is entirely optional. + 0x0040: With names for values. This only makes sense if the 0x01 flag is set and + is ignored otherwise. If present, the values from the 0x01 flag will + be preceded by a name (see above). Note that this is only useful for + QUERY requests where named bind markers are used; for EXECUTE statements, + since the names for the expected values was returned during preparation, + a client can always provide values in the right order without any names + and using this flag, while supported, is almost surely inefficient. + 0x0080: With keyspace. If set, must be present. is a + [string] indicating the keyspace that the query should be executed in. + It supercedes the keyspace that the connection is bound to, if any. + 0x0100: With now in seconds. If set, must be present. + is an [int] representing the current time (now) for + the query. Affects TTL cell liveness in read queries and local deletion + time for tombstones and TTL cells in update requests. It's intended + for testing purposes and is optional. + + Note that the consistency is ignored by some queries (USE, CREATE, ALTER, + TRUNCATE, ...). + + The server will respond to a QUERY message with a RESULT message, the content + of which depends on the query. + + +4.1.5. PREPARE + + Prepare a query for later execution (through EXECUTE). The body of the message must be: + [] + where: + - is a [long string] representing the CQL query. + - is a [int] whose bits define the options for this statement and in particular + influence what the remainder of the message contains. + A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given their mask: + 0x01: With keyspace. If set, must be present. is a + [string] indicating the keyspace that the query should be executed in. + It supercedes the keyspace that the connection is bound to, if any. + + The server will respond with a RESULT message with a `prepared` kind (0x0004, + see Section 4.2.5). + + +4.1.6. EXECUTE + + Executes a prepared query. The body of the message must be: + + where + - is the prepared query ID. It's the [short bytes] returned as a + response to a PREPARE message. + - is the ID of the resultset metadata that was sent + along with response to PREPARE message. If a RESULT/Rows message reports + changed resultset metadata with the Metadata_changed flag, the reported new + resultset metadata must be used in subsequent executions. + - has the exact same definition as in QUERY (see Section 4.1.4). + + +4.1.7. BATCH + + Allows executing a list of queries (prepared or not) as a batch (note that + only DML statements are accepted in a batch). The body of the message must + be: + ...[][][][] + where: + - is a [byte] indicating the type of batch to use: + - If == 0, the batch will be "logged". This is equivalent to a + normal CQL3 batch statement. + - If == 1, the batch will be "unlogged". + - If == 2, the batch will be a "counter" batch (and non-counter + statements will be rejected). + - is a [int] whose bits define the options for this query and + in particular influence what the remainder of the message contains. It is similar + to the from QUERY and EXECUTE methods, except that the 4 rightmost + bits must always be 0 as their corresponding options do not make sense for + Batch. A flag is set if the bit corresponding to its `mask` is set. Supported + flags are, given their mask: + 0x0010: With serial consistency. If set, should be + present. is the [consistency] level for the + serial phase of conditional updates. That consistency can only be + either SERIAL or LOCAL_SERIAL and if not present, it defaults to + SERIAL. This option will be ignored for anything else other than a + conditional update/insert. + 0x0020: With default timestamp. If set, should be present. + is a [long] representing the default timestamp for the query + in microseconds. This will replace the server side assigned + timestamp as default timestamp. Note that a timestamp in the query itself + will still override this timestamp. This is entirely optional. + 0x0040: With names for values. If set, then all values for all must be + preceded by a [string] that have the same meaning as in QUERY + requests [IMPORTANT NOTE: this feature does not work and should not be + used. It is specified in a way that makes it impossible for the server + to implement. This will be fixed in a future version of the native + protocol. See https://issues.apache.org/jira/browse/CASSANDRA-10246 for + more details]. + 0x0080: With keyspace. If set, must be present. is a + [string] indicating the keyspace that the query should be executed in. + It supercedes the keyspace that the connection is bound to, if any. + 0x0100: With now in seconds. If set, must be present. + is an [int] representing the current time (now) for + the query. Affects TTL cell liveness in read queries and local deletion + time for tombstones and TTL cells in update requests. It's intended + for testing purposes and is optional. + - is a [short] indicating the number of following queries. + - ... are the queries to execute. A must be of the + form: + []...[] + where: + - is a [byte] indicating whether the following query is a prepared + one or not. value must be either 0 or 1. + - depends on the value of . If == 0, it should be + a [long string] query string (as in QUERY, the query string might contain + bind markers). Otherwise (that is, if == 1), it should be a + [short bytes] representing a prepared query ID. + - is a [short] indicating the number (possibly 0) of following values. + - is the optional name of the following . It must be present + if and only if the 0x40 flag is provided for the batch. + - is the [value] to use for bound variable i (of bound variable + if the 0x40 flag is used). + - is the [consistency] level for the operation. + - is only present if the 0x10 flag is set. In that case, + is the [consistency] level for the serial phase of + conditional updates. That consitency can only be either SERIAL or + LOCAL_SERIAL and if not present will defaults to SERIAL. This option will + be ignored for anything else other than a conditional update/insert. + + The server will respond with a RESULT message. + + +4.1.8. REGISTER + + Register this connection to receive some types of events. The body of the + message is a [string list] representing the event types to register for. See + section 4.2.6 for the list of valid event types. + + The response to a REGISTER message will be a READY message. + + Please note that if a client driver maintains multiple connections to a + Cassandra node and/or connections to multiple nodes, it is advised to + dedicate a handful of connections to receive events, but to *not* register + for events on all connections, as this would only result in receiving + multiple times the same event messages, wasting bandwidth. + + +4.2. Responses + + This section describes the content of the frame body for the different + responses. Please note that to make room for future evolution, clients should + support extra informations (that they should simply discard) to the one + described in this document at the end of the frame body. + +4.2.1. ERROR + + Indicates an error processing a request. The body of the message will be an + error code ([int]) followed by a [string] error message. Then, depending on + the exception, more content may follow. The error codes are defined in + Section 9, along with their additional content if any. + + +4.2.2. READY + + Indicates that the server is ready to process queries. This message will be + sent by the server either after a STARTUP message if no authentication is + required (if authentication is required, the server indicates readiness by + sending a AUTH_RESPONSE message). + + The body of a READY message is empty. + + +4.2.3. AUTHENTICATE + + Indicates that the server requires authentication, and which authentication + mechanism to use. + + The authentication is SASL based and thus consists of a number of server + challenges (AUTH_CHALLENGE, Section 4.2.7) followed by client responses + (AUTH_RESPONSE, Section 4.1.2). The initial exchange is however boostrapped + by an initial client response. The details of that exchange (including how + many challenge-response pairs are required) are specific to the authenticator + in use. The exchange ends when the server sends an AUTH_SUCCESS message or + an ERROR message. + + This message will be sent following a STARTUP message if authentication is + required and must be answered by a AUTH_RESPONSE message from the client. + + The body consists of a single [string] indicating the full class name of the + IAuthenticator in use. + + +4.2.4. SUPPORTED + + Indicates which startup options are supported by the server. This message + comes as a response to an OPTIONS message. + + The body of a SUPPORTED message is a [string multimap]. This multimap gives + for each of the supported STARTUP options, the list of supported values. It + also includes: + - "PROTOCOL_VERSIONS": the list of native protocol versions that are + supported, encoded as the version number followed by a slash and the + version description. For example: 3/v3, 4/v4, 5/v5-beta. If a version is + in beta, it will have the word "beta" in its description. + + +4.2.5. RESULT + + The result to a query (QUERY, PREPARE, EXECUTE or BATCH messages). + + The first element of the body of a RESULT message is an [int] representing the + `kind` of result. The rest of the body depends on the kind. The kind can be + one of: + 0x0001 Void: for results carrying no information. + 0x0002 Rows: for results to select queries, returning a set of rows. + 0x0003 Set_keyspace: the result to a `use` query. + 0x0004 Prepared: result to a PREPARE message. + 0x0005 Schema_change: the result to a schema altering query. + + The body for each kind (after the [int] kind) is defined below. + + +4.2.5.1. Void + + The rest of the body for a Void result is empty. It indicates that a query was + successful without providing more information. + + +4.2.5.2. Rows + + Indicates a set of rows. The rest of the body of a Rows result is: + + where: + - is composed of: + [][][?...] + where: + - is an [int]. The bits of provides information on the + formatting of the remaining information. A flag is set if the bit + corresponding to its `mask` is set. Supported flags are, given their + mask: + 0x0001 Global_tables_spec: if set, only one table spec (keyspace + and table name) is provided as . If not + set, is not present. + 0x0002 Has_more_pages: indicates whether this is not the last + page of results and more should be retrieved. If set, the + will be present. The is a + [bytes] value that should be used in QUERY/EXECUTE to + continue paging and retrieve the remainder of the result for + this query (See Section 8 for more details). + 0x0004 No_metadata: if set, the is only composed of + these , the and optionally the + (depending on the Has_more_pages flag) but + no other information (so no nor ). + This will only ever be the case if this was requested + during the query (see QUERY and RESULT messages). + 0x0008 Metadata_changed: if set, the No_metadata flag has to be unset + and has to be supplied. This flag is to be + used to avoid a roundtrip in case of metadata changes for queries + that requested metadata to be skipped. + - is an [int] representing the number of columns selected + by the query that produced this result. It defines the number of + elements in and the number of elements for each row in . + - is [short bytes] representing the new, changed resultset + metadata. The new metadata ID must also be used in subsequent executions of + the corresponding prepared statement, if any. + - is present if the Global_tables_spec is set in + . It is composed of two [string] representing the + (unique) keyspace name and table name the columns belong to. + - specifies the columns returned in the query. There are + such column specifications that are composed of: + ()? + The initial and are two [string] and are only present + if the Global_tables_spec flag is not set. The is a + [string] and is an [option] that corresponds to the description + (what this description is depends a bit on the context: in results to + selects, this will be either the user chosen alias or the selection used + (often a colum name, but it can be a function call too). In results to + a PREPARE, this will be either the name of the corresponding bind variable + or the column name for the variable if it is "anonymous") and type of + the corresponding result. The option for is either a native + type (see below), in which case the option has no value, or a + 'custom' type, in which case the value is a [string] representing + the fully qualified class name of the type represented. Valid option + ids are: + 0x0000 Custom: the value is a [string], see above. + 0x0001 Ascii + 0x0002 Bigint + 0x0003 Blob + 0x0004 Boolean + 0x0005 Counter + 0x0006 Decimal + 0x0007 Double + 0x0008 Float + 0x0009 Int + 0x000B Timestamp + 0x000C Uuid + 0x000D Varchar + 0x000E Varint + 0x000F Timeuuid + 0x0010 Inet + 0x0011 Date + 0x0012 Time + 0x0013 Smallint + 0x0014 Tinyint + 0x0015 Duration + 0x0020 List: the value is an [option], representing the type + of the elements of the list. + 0x0021 Map: the value is two [option], representing the types of the + keys and values of the map + 0x0022 Set: the value is an [option], representing the type + of the elements of the set + 0x0030 UDT: the value is ... + where: + - is a [string] representing the keyspace name this + UDT is part of. + - is a [string] representing the UDT name. + - is a [short] representing the number of fields of + the UDT, and thus the number of pairs + following + - is a [string] representing the name of the + i_th field of the UDT. + - is an [option] representing the type of the + i_th field of the UDT. + 0x0031 Tuple: the value is ... where is a [short] + representing the number of values in the type, and + are [option] representing the type of the i_th component + of the tuple + + - is an [int] representing the number of rows present in this + result. Those rows are serialized in the part. + - is composed of ... where m is . + Each is composed of ... where n is + and where is a [bytes] representing the value + returned for the jth column of the ith row. In other words, + is composed of ( * ) [bytes]. + + +4.2.5.3. Set_keyspace + + The result to a `use` query. The body (after the kind [int]) is a single + [string] indicating the name of the keyspace that has been set. + + +4.2.5.4. Prepared + + The result to a PREPARE message. The body of a Prepared result is: + + where: + - is [short bytes] representing the prepared query ID. + - is [short bytes] representing the resultset metadata ID. + - is composed of: + [...][?...] + where: + - is an [int]. The bits of provides information on the + formatting of the remaining information. A flag is set if the bit + corresponding to its `mask` is set. Supported masks and their flags + are: + 0x0001 Global_tables_spec: if set, only one table spec (keyspace + and table name) is provided as . If not + set, is not present. + - is an [int] representing the number of bind markers + in the prepared statement. It defines the number of + elements. + - is an [int] representing the number of + elements to follow. If this value is zero, at least one of the + partition key columns in the table that the statement acts on + did not have a corresponding bind marker (or the bind marker + was wrapped in a function call). + - is a short that represents the index of the bind marker + that corresponds to the partition key column in position i. + For example, a sequence of [2, 0, 1] indicates that the + table has three partition key columns; the full partition key + can be constructed by creating a composite of the values for + the bind markers at index 2, at index 0, and at index 1. + This allows implementations with token-aware routing to correctly + construct the partition key without needing to inspect table + metadata. + - is present if the Global_tables_spec is set in + . If present, it is composed of two [string]s. The first + [string] is the name of the keyspace that the statement acts on. + The second [string] is the name of the table that the columns + represented by the bind markers belong to. + - specifies the bind markers in the prepared statement. + There are such column specifications, each with the + following format: + ()? + The initial and are two [string] that are only + present if the Global_tables_spec flag is not set. The field + is a [string] that holds the name of the bind marker (if named), + or the name of the column, field, or expression that the bind marker + corresponds to (if the bind marker is "anonymous"). The + field is an [option] that represents the expected type of values for + the bind marker. See the Rows documentation (section 4.2.5.2) for + full details on the field. + + - is defined exactly the same as in the Rows + documentation (section 4.2.5.2). This describes the metadata for the + result set that will be returned when this prepared statement is executed. + Note that may be empty (have the No_metadata flag and + 0 columns, See section 4.2.5.2) and will be for any query that is not a + Select. In fact, there is never a guarantee that this will be non-empty, so + implementations should protect themselves accordingly. This result metadata + is an optimization that allows implementations to later execute the + prepared statement without requesting the metadata (see the Skip_metadata + flag in EXECUTE). Clients can safely discard this metadata if they do not + want to take advantage of that optimization. + + Note that the prepared query ID returned is global to the node on which the query + has been prepared. It can be used on any connection to that node + until the node is restarted (after which the query must be reprepared). + +4.2.5.5. Schema_change + + The result to a schema altering query (creation/update/drop of a + keyspace/table/index). The body (after the kind [int]) is the same + as the body for a "SCHEMA_CHANGE" event, so 3 strings: + + Please refer to section 4.2.6 below for the meaning of those fields. + + Note that a query to create or drop an index is considered to be a change + to the table the index is on. + + +4.2.6. EVENT + + An event pushed by the server. A client will only receive events for the + types it has REGISTERed to. The body of an EVENT message will start with a + [string] representing the event type. The rest of the message depends on the + event type. The valid event types are: + - "TOPOLOGY_CHANGE": events related to change in the cluster topology. + Currently, events are sent when new nodes are added to the cluster, and + when nodes are removed. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of change ("NEW_NODE" or "REMOVED_NODE") followed by the address of + the new/removed node. + - "STATUS_CHANGE": events related to change of node status. Currently, + up/down events are sent. The body of the message (after the event type) + consists of a [string] and an [inet], corresponding respectively to the + type of status change ("UP" or "DOWN") followed by the address of the + concerned node. + - "SCHEMA_CHANGE": events related to schema change. After the event type, + the rest of the message will be where: + - is a [string] representing the type of changed involved. + It will be one of "CREATED", "UPDATED" or "DROPPED". + - is a [string] that can be one of "KEYSPACE", "TABLE", "TYPE", + "FUNCTION" or "AGGREGATE" and describes what has been modified + ("TYPE" stands for modifications related to user types, "FUNCTION" + for modifications related to user defined functions, "AGGREGATE" + for modifications related to user defined aggregates). + - depends on the preceding : + - If is "KEYSPACE", then will be a single [string] + representing the keyspace changed. + - If is "TABLE" or "TYPE", then + will be 2 [string]: the first one will be the keyspace + containing the affected object, and the second one will be the name + of said affected object (either the table, user type, function, or + aggregate name). + - If is "FUNCTION" or "AGGREGATE", multiple arguments follow: + - [string] keyspace containing the user defined function / aggregate + - [string] the function/aggregate name + - [string list] one string for each argument type (as CQL type) + + All EVENT messages have a streamId of -1 (Section 2.3). + + Please note that "NEW_NODE" and "UP" events are sent based on internal Gossip + communication and as such may be sent a short delay before the binary + protocol server on the newly up node is fully started. Clients are thus + advised to wait a short time before trying to connect to the node (1 second + should be enough), otherwise they may experience a connection refusal at + first. + +4.2.7. AUTH_CHALLENGE + + A server authentication challenge (see AUTH_RESPONSE (Section 4.1.2) for more + details). + + The body of this message is a single [bytes] token. The details of what this + token contains (and when it can be null/empty, if ever) depends on the actual + authenticator used. + + Clients are expected to answer the server challenge with an AUTH_RESPONSE + message. + +4.2.8. AUTH_SUCCESS + + Indicates the success of the authentication phase. See Section 4.2.3 for more + details. + + The body of this message is a single [bytes] token holding final information + from the server that the client may require to finish the authentication + process. What that token contains and whether it can be null depends on the + actual authenticator used. + + +5. Compression + + Frame compression is supported by the protocol, but then only the frame body + is compressed (the frame header should never be compressed). + + Before being used, client and server must agree on a compression algorithm to + use, which is done in the STARTUP message. As a consequence, a STARTUP message + must never be compressed. However, once the STARTUP frame has been received + by the server, messages can be compressed (including the response to the STARTUP + request). Frames do not have to be compressed, however, even if compression has + been agreed upon (a server may only compress frames above a certain size at its + discretion). A frame body should be compressed if and only if the compressed + flag (see Section 2.2) is set. + + As of version 2 of the protocol, the following compressions are available: + - lz4 (https://code.google.com/p/lz4/). In that, note that the first four bytes + of the body will be the uncompressed length (followed by the compressed + bytes). + - snappy (https://code.google.com/p/snappy/). This compression might not be + available as it depends on a native lib (server-side) that might not be + avaivable on some installations. + + +6. Data Type Serialization Formats + + This sections describes the serialization formats for all CQL data types + supported by Cassandra through the native protocol. These serialization + formats should be used by client drivers to encode values for EXECUTE + messages. Cassandra will use these formats when returning values in + RESULT messages. + + All values are represented as [bytes] in EXECUTE and RESULT messages. + The [bytes] format includes an int prefix denoting the length of the value. + For that reason, the serialization formats described here will not include + a length component. + + For legacy compatibility reasons, note that most non-string types support + "empty" values (i.e. a value with zero length). An empty value is distinct + from NULL, which is encoded with a negative length. + + As with the rest of the native protocol, all encodings are big-endian. + +6.1. ascii + + A sequence of bytes in the ASCII range [0, 127]. Bytes with values outside of + this range will result in a validation error. + +6.2 bigint + + An eight-byte two's complement integer. + +6.3 blob + + Any sequence of bytes. + +6.4 boolean + + A single byte. A value of 0 denotes "false"; any other value denotes "true". + (However, it is recommended that a value of 1 be used to represent "true".) + +6.5 date + + An unsigned integer representing days with epoch centered at 2^31. + (unix epoch January 1st, 1970). + A few examples: + 0: -5877641-06-23 + 2^31: 1970-1-1 + 2^32: 5881580-07-11 + +6.6 decimal + + The decimal format represents an arbitrary-precision number. It contains an + [int] "scale" component followed by a varint encoding (see section 6.17) + of the unscaled value. The encoded value represents "E<-scale>". + In other words, " * 10 ^ (-1 * )". + +6.7 double + + An 8 byte floating point number in the IEEE 754 binary64 format. + +6.8 duration + + A duration is composed of 3 signed variable length integers ([vint]s). + The first [vint] represents a number of months, the second [vint] represents + a number of days, and the last [vint] represents a number of nanoseconds. + The number of months and days must be valid 32 bits integers whereas the + number of nanoseconds must be a valid 64 bits integer. + A duration can either be positive or negative. If a duration is positive + all the integers must be positive or zero. If a duration is + negative all the numbers must be negative or zero. + +6.9 float + + A 4 byte floating point number in the IEEE 754 binary32 format. + +6.10 inet + + A 4 byte or 16 byte sequence denoting an IPv4 or IPv6 address, respectively. + +6.11 int + + A 4 byte two's complement integer. + +6.12 list + + A [int] n indicating the number of elements in the list, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.13 map + + A [int] n indicating the number of key/value pairs in the map, followed by + n entries. Each entry is composed of two [bytes] representing the key + and value. + +6.14 set + + A [int] n indicating the number of elements in the set, followed by n + elements. Each element is [bytes] representing the serialized value. + +6.15 smallint + + A 2 byte two's complement integer. + +6.16 text + + A sequence of bytes conforming to the UTF-8 specifications. + +6.17 time + + An 8 byte two's complement long representing nanoseconds since midnight. + Valid values are in the range 0 to 86399999999999 + +6.18 timestamp + + An 8 byte two's complement integer representing a millisecond-precision + offset from the unix epoch (00:00:00, January 1st, 1970). Negative values + represent a negative offset from the epoch. + +6.19 timeuuid + + A 16 byte sequence representing a version 1 UUID as defined by RFC 4122. + +6.20 tinyint + + A 1 byte two's complement integer. + +6.21 tuple + + A sequence of [bytes] values representing the items in a tuple. The encoding + of each element depends on the data type for that position in the tuple. + Null values may be represented by using length -1 for the [bytes] + representation of an element. + +6.22 uuid + + A 16 byte sequence representing any valid UUID as defined by RFC 4122. + +6.23 varchar + + An alias of the "text" type. + +6.24 varint + + A variable-length two's complement encoding of a signed integer. + + The following examples may help implementors of this spec: + + Value | Encoding + ------|--------- + 0 | 0x00 + 1 | 0x01 + 127 | 0x7F + 128 | 0x0080 + 129 | 0x0081 + -1 | 0xFF + -128 | 0x80 + -129 | 0xFF7F + + Note that positive numbers must use a most-significant byte with a value + less than 0x80, because a most-significant bit of 1 indicates a negative + value. Implementors should pad positive values that have a MSB >= 0x80 + with a leading 0x00 byte. + + +7. User Defined Types + + This section describes the serialization format for User defined types (UDT), + as described in section 4.2.5.2. + + A UDT value is composed of successive [bytes] values, one for each field of the UDT + value (in the order defined by the type). A UDT value will generally have one value + for each field of the type it represents, but it is allowed to have less values than + the type has fields. + + +8. Result paging + + The protocol allows for paging the result of queries. For that, the QUERY and + EXECUTE messages have a value that indicate the desired + page size in CQL3 rows. + + If a positive value is provided for , the result set of the + RESULT message returned for the query will contain at most the + first rows of the query result. If that first page of results + contains the full result set for the query, the RESULT message (of kind `Rows`) + will have the Has_more_pages flag *not* set. However, if some results are not + part of the first response, the Has_more_pages flag will be set and the result + will contain a value. In that case, the value + should be used in a QUERY or EXECUTE message (that has the *same* query as + the original one or the behavior is undefined) to retrieve the next page of + results. + + Only CQL3 queries that return a result set (RESULT message with a Rows `kind`) + support paging. For other type of queries, the value is + ignored. + + Note to client implementors: + - While can be as low as 1, it will likely be detrimental + to performance to pick a value too low. A value below 100 is probably too + low for most use cases. + - Clients should not rely on the actual size of the result set returned to + decide if there are more results to fetch or not. Instead, they should always + check the Has_more_pages flag (unless they did not enable paging for the query + obviously). Clients should also not assert that no result will have more than + results. While the current implementation always respects + the exact value of , we reserve the right to return + slightly smaller or bigger pages in the future for performance reasons. + - The is specific to a protocol version and drivers should not + send a returned by a node using the protocol v3 to query a node + using the protocol v4 for instance. + + +9. Error codes + + Let us recall that an ERROR message is composed of [...] + (see 4.2.1 for details). The supported error codes, as well as any additional + information the message may contain after the are described below: + 0x0000 Server error: something unexpected happened. This indicates a + server-side bug. + 0x000A Protocol error: some client message triggered a protocol + violation (for instance a QUERY message is sent before a STARTUP + one has been sent) + 0x0100 Authentication error: authentication was required and failed. The + possible reason for failing depends on the authenticator in use, + which may or may not include more detail in the accompanying + error message. + 0x1000 Unavailable exception. The rest of the ERROR message body will be + + where: + is the [consistency] level of the query that triggered + the exception. + is an [int] representing the number of nodes that + should be alive to respect + is an [int] representing the number of replicas that + were known to be alive when the request had been + processed (since an unavailable exception has been + triggered, there will be < ) + 0x1001 Overloaded: the request cannot be processed because the + coordinator node is overloaded + 0x1002 Is_bootstrapping: the request was a read request but the + coordinator node is bootstrapping + 0x1003 Truncate_error: error during a truncation error. + 0x1100 Write_timeout: Timeout exception during a write request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + acknowledged the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is a [string] that describe the type of the write + that timed out. The value of that string can be one + of: + - "SIMPLE": the write was a non-batched + non-counter write. + - "BATCH": the write was a (logged) batch write. + If this type is received, it means the batch log + has been successfully written (otherwise a + "BATCH_LOG" type would have been sent instead). + - "UNLOGGED_BATCH": the write was an unlogged + batch. No batch log write has been attempted. + - "COUNTER": the write was a counter write + (batched or not). + - "BATCH_LOG": the timeout occurred during the + write to the batch log when a (logged) batch + write was requested. + - "CAS": the timeout occured during the Compare And Set write/update. + - "VIEW": the timeout occured when a write involves + VIEW update and failure to acqiure local view(MV) + lock for key within timeout + - "CDC": the timeout occured when cdc_total_space_in_mb is + exceeded when doing a write to data tracked by cdc. + is a [short] that describes the number of contentions occured during the CAS operation. + The field only presents when the is "CAS". + 0x1200 Read_timeout: Timeout exception during a read request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + response is required to achieve . Please note that + it is possible to have >= if + is false. Also in the (unlikely) + case where is achieved but the coordinator node + times out while waiting for read-repair acknowledgement. + is a single byte. If its value is 0, it means + the replica that was asked for data has not + responded. Otherwise, the value is != 0. + 0x1300 Read_failure: A non-timeout exception during a read request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is a map of endpoint to failure reason codes. This maps + the endpoints of the replica nodes that failed when + executing the request to a code representing the reason + for the failure. The map is encoded starting with an [int] n + followed by n pairs of where + is an [inetaddr] and is a [short]. + is a single byte. If its value is 0, it means + the replica that was asked for data had not + responded. Otherwise, the value is != 0. + 0x1400 Function_failure: A (user defined) function failed during execution. + The rest of the ERROR message body will be + + where: + is the keyspace [string] of the failed function + is the name [string] of the failed function + [string list] one string for each argument type (as CQL type) of the failed function + 0x1500 Write_failure: A non-timeout exception during a write request. The rest + of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + answered the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + is a map of endpoint to failure reason codes. This maps + the endpoints of the replica nodes that failed when + executing the request to a code representing the reason + for the failure. The map is encoded starting with an [int] n + followed by n pairs of where + is an [inetaddr] and is a [short]. + is a [string] that describes the type of the write + that failed. The value of that string can be one + of: + - "SIMPLE": the write was a non-batched + non-counter write. + - "BATCH": the write was a (logged) batch write. + If this type is received, it means the batch log + has been successfully written (otherwise a + "BATCH_LOG" type would have been sent instead). + - "UNLOGGED_BATCH": the write was an unlogged + batch. No batch log write has been attempted. + - "COUNTER": the write was a counter write + (batched or not). + - "BATCH_LOG": the failure occured during the + write to the batch log when a (logged) batch + write was requested. + - "CAS": the failure occured during the Compare And Set write/update. + - "VIEW": the failure occured when a write involves + VIEW update and failure to acqiure local view(MV) + lock for key within timeout + - "CDC": the failure occured when cdc_total_space_in_mb is + exceeded when doing a write to data tracked by cdc. + 0x1600 CDC_WRITE_FAILURE: // todo + 0x1700 CAS_WRITE_UNKNOWN: An exception occured due to contended Compare And Set write/update. + The CAS operation was only partially completed and the operation may or may not get completed by + the contending CAS write or SERIAL/LOCAL_SERIAL read. The rest of the ERROR message body will be + + where: + is the [consistency] level of the query having triggered + the exception. + is an [int] representing the number of nodes having + acknowledged the request. + is an [int] representing the number of replicas whose + acknowledgement is required to achieve . + + 0x2000 Syntax_error: The submitted query has a syntax error. + 0x2100 Unauthorized: The logged user doesn't have the right to perform + the query. + 0x2200 Invalid: The query is syntactically correct but invalid. + 0x2300 Config_error: The query is invalid because of some configuration issue + 0x2400 Already_exists: The query attempted to create a keyspace or a + table that was already existing. The rest of the ERROR message + body will be
where: + is a [string] representing either the keyspace that + already exists, or the keyspace in which the table that + already exists is. +
is a [string] representing the name of the table that + already exists. If the query was attempting to create a + keyspace,
will be present but will be the empty + string. + 0x2500 Unprepared: Can be thrown while a prepared statement tries to be + executed if the provided prepared statement ID is not known by + this host. The rest of the ERROR message body will be [short + bytes] representing the unknown ID. + +10. Changes from v4 + + * Beta protocol flag for v5 native protocol is added (Section 2.2) + * in Read_failure and Write_failure error message bodies (Section 9) + has been replaced with . The maps node IP addresses to + a failure reason code which indicates why the request failed on that node. + * Enlarged flag's bitmaps for QUERY, EXECUTE and BATCH messages from [byte] to [int] + (Sections 4.1.4, 4.1.6 and 4.1.7). + * Add the duration data type + * Added keyspace field in QUERY, PREPARE, and BATCH messages (Sections 4.1.4, 4.1.5, and 4.1.7). + * Added now_in_seconds field in QUERY, EXECUTE, and BATCH messages (Sections 4.1.4, 4.1.6, and 4.1.7). + * Added [int] flags field in PREPARE message (Section 4.1.5). + * Removed NO_COMPACT startup option (Section 4.1.1.) diff --git a/src/doc/4.0-alpha1/_images/docs_commit.png b/site-content/source/modules/ROOT/images/docs_commit.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_commit.png rename to site-content/source/modules/ROOT/images/docs_commit.png diff --git a/src/doc/4.0-alpha1/_images/docs_create_branch.png b/site-content/source/modules/ROOT/images/docs_create_branch.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_create_branch.png rename to site-content/source/modules/ROOT/images/docs_create_branch.png diff --git a/src/doc/4.0-alpha1/_images/docs_create_file.png b/site-content/source/modules/ROOT/images/docs_create_file.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_create_file.png rename to site-content/source/modules/ROOT/images/docs_create_file.png diff --git a/src/doc/4.0-alpha1/_images/docs_editor.png b/site-content/source/modules/ROOT/images/docs_editor.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_editor.png rename to site-content/source/modules/ROOT/images/docs_editor.png diff --git a/src/doc/4.0-alpha1/_images/docs_fork.png b/site-content/source/modules/ROOT/images/docs_fork.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_fork.png rename to site-content/source/modules/ROOT/images/docs_fork.png diff --git a/src/doc/4.0-alpha1/_images/docs_pr.png b/site-content/source/modules/ROOT/images/docs_pr.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_pr.png rename to site-content/source/modules/ROOT/images/docs_pr.png diff --git a/src/doc/4.0-alpha1/_images/docs_preview.png b/site-content/source/modules/ROOT/images/docs_preview.png similarity index 100% rename from src/doc/4.0-alpha1/_images/docs_preview.png rename to site-content/source/modules/ROOT/images/docs_preview.png diff --git a/src/doc/3.11.3/_images/eclipse_debug0.png b/site-content/source/modules/ROOT/images/eclipse_debug0.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug0.png rename to site-content/source/modules/ROOT/images/eclipse_debug0.png diff --git a/src/doc/3.11.3/_images/eclipse_debug1.png b/site-content/source/modules/ROOT/images/eclipse_debug1.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug1.png rename to site-content/source/modules/ROOT/images/eclipse_debug1.png diff --git a/src/doc/3.11.3/_images/eclipse_debug2.png b/site-content/source/modules/ROOT/images/eclipse_debug2.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug2.png rename to site-content/source/modules/ROOT/images/eclipse_debug2.png diff --git a/src/doc/3.11.3/_images/eclipse_debug3.png b/site-content/source/modules/ROOT/images/eclipse_debug3.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug3.png rename to site-content/source/modules/ROOT/images/eclipse_debug3.png diff --git a/src/doc/3.11.3/_images/eclipse_debug4.png b/site-content/source/modules/ROOT/images/eclipse_debug4.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug4.png rename to site-content/source/modules/ROOT/images/eclipse_debug4.png diff --git a/src/doc/3.11.3/_images/eclipse_debug5.png b/site-content/source/modules/ROOT/images/eclipse_debug5.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug5.png rename to site-content/source/modules/ROOT/images/eclipse_debug5.png diff --git a/src/doc/3.11.3/_images/eclipse_debug6.png b/site-content/source/modules/ROOT/images/eclipse_debug6.png similarity index 100% rename from src/doc/3.11.3/_images/eclipse_debug6.png rename to site-content/source/modules/ROOT/images/eclipse_debug6.png diff --git a/site-content/source/modules/ROOT/nav.adoc b/site-content/source/modules/ROOT/nav.adoc new file mode 100644 index 000000000..4f763468e --- /dev/null +++ b/site-content/source/modules/ROOT/nav.adoc @@ -0,0 +1,17 @@ +* xref:glossary.adoc[Glossary] +* xref:bugs.adoc[How to report bugs] +* xref:contactus.adoc[Contact us] + +* xref:development/index.adoc[Contribute to Cassandra code] +** xref:development/gettingstarted.adoc[Getting started] +** xref:development/ide.adoc[IDE] +** xref:development/testing.adoc[Testing] +** xref:development/code_style.adoc[Code style] +** xref:development/how_to_commit.adoc[How to commit] +** xref:development/how_to_review.adoc[How to review] +** xref:development/patches.adoc[Patches] +** xref:development/ci.adoc[CI] +** xref:development/dependencies.adoc[Dependencies] +** xref:development/release_process.adoc[Release process] + +* xref:development/documentation.adoc[Contribute to the documentation] diff --git a/site-content/source/modules/ROOT/pages/apachecon_cfp.adoc b/site-content/source/modules/ROOT/pages/apachecon_cfp.adoc new file mode 100644 index 000000000..288f70a26 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/apachecon_cfp.adoc @@ -0,0 +1,83 @@ += Announcing the 2019 Apache Cassandra Summit + +== Announcing the 2019 Apache Cassandra Summit + +Join the Apache Cassandra community for the 2019 Apache Cassandra +Summit, hosted with ApacheCon in Las Vegas, NV (Sept 9 - 13). + +''''' + +=== Apache Cassandra at ApacheCon + +image:/img/apachecon-2019.jpg[ApacheCon 2019]\{:style=``float: right; +width: 400px''} + +For more information about other events at ApacheCon, see +https://apachecon.com/acna19/index.html[ApacheCon 2019]. + +  + +==== Day One: Next Generation Cassandra Conference (NGCC) + +NGCC (Next Generation Cassandra Conference), a one-day event for Apache +Cassandra contributors and large system operators to meet, discuss, and +plan future development in the project. NGCC will take place on +*Tuesday, September 10th*. + +NGCC is an advanced event targeted toward Apache Cassandra contributors +and large system / platform operators. Content will focus on Cassandra +internals and is geared toward those with detailed knowledge of the +codebase and architecture. All are welcome to attend. + +  + +==== Day Two: Apache Cassandra Summit + +The Apache Cassandra Summit, a one-day event for Apache Cassandra users +to meet, network, and learn about what’s new and what’s coming in +Cassandra. The Apache Cassandra Summit will be held on *Wednesday, Sept +11*. + +The Apache Cassandra Summit is targeted toward a wider audience. Topics +should be interesting and accessible to those whose first introduction +to Cassandra is at this event, and those who have been active in the +community for many years. + +''''' + +=== Call for Presentations + +We’re excited to announce the Call for Presentations is now open for +both, closing Monday, May 13 at 2pm BST (UTC+1). + +  + +==== Apache Cassandra Summit CFP (40-minute presentation, 5-minute Q&A) – + +*Example proposals might include:* - Lessons learned operating Apache +Cassandra at scale. - Customizations and ways members of the community +have extended Apache Cassandra to make it a great fit for their use +case. - Stability improvements, performance enhancements, and new +features in an upcoming Cassandra release. - Something we haven’t +thought about that a general audience would be interested to hear. + +  + +==== Next Generation Cassandra Conference CFP (30-minute presentation, 15m breakout) – + +*Example proposals might include:* - Presentations from contributors and +large operators covering pain points and proposals to address them - +Planned or proposed improvements in specific areas of the Apache +Cassandra codebase - Planned or proposed improvements we can make to +strengthen and empower the Apache Cassandra community. - Something we +haven’t thought about that advanced operators / contributors would be +interested to hear. + +  + +https://asf.jamhosted.net/cfp.html[Click here] to submit a proposal for +the Apache Cassandra Summit or NGCC. + +When submitting, please ensure you select ``Cassandra'' as the category. +For NGCC submissions, please include ``NGCC Proposal'' in the ``Optional +Notes'' field. diff --git a/site-content/source/modules/ROOT/pages/bugs.adoc b/site-content/source/modules/ROOT/pages/bugs.adoc new file mode 100644 index 000000000..d2d06de6d --- /dev/null +++ b/site-content/source/modules/ROOT/pages/bugs.adoc @@ -0,0 +1,17 @@ += Reporting Bugs + +If you encounter a problem with Cassandra, the first places to ask for help are the +link:++https://lists.apache.org/list.html?user@cassandra.apache.org++[user mailing list] and +the https://s.apache.org/slack-invite[Cassandra slack room]. + +If, after having asked for help, you suspect that you have found a bug +in Cassandra, you should report it by opening a ticket through the +xref:https://issues.apache.org/jira/browse/CASSANDRA[Apache Cassandra JIRA]. +Please provide as much details as you can on your problem, and don't +forget to indicate which version of Cassandra you are running and on +which environment. + +Further details on how to contribute can be found at our +xref:cassandra:development/index.adoc[Contributing to Cassandra] section. Please note that the source of this +documentation is part of the Cassandra git repository and hence +contributions to the documentation should follow the same path. diff --git a/site-content/source/modules/ROOT/pages/community.adoc b/site-content/source/modules/ROOT/pages/community.adoc new file mode 100644 index 000000000..21a51e5b6 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/community.adoc @@ -0,0 +1,78 @@ += Community + +== Getting and keeping in touch + +== Mailing lists + +Discussion and questions on Cassandra’s usage and development happens +mainly on the following mailing lists: + +* http://www.mail-archive.com/user@cassandra.apache.org/[Users]: General +mailing list for user questions and discussions. This is also where new +releases are announced +(mailto:user-subscribe@cassandra.apache.org[subscribe] | +mailto:user-unsubscribe@cassandra.apache.org[unsubscribe] | +https://lists.apache.org/list.html?user@cassandra.apache.org[Archives]). +* http://www.mail-archive.com/dev@cassandra.apache.org/[Developers]: +Questions and discussions related to Cassandra development +(mailto:dev-subscribe@cassandra.apache.org[subscribe] | +mailto:dev-unsubscribe@cassandra.apache.org[unsubscribe] | +https://lists.apache.org/list.html?dev@cassandra.apache.org[Archives]). +* http://www.mail-archive.com/commits@cassandra.apache.org/[Commits]: +Notification on commits done to the source repository and on +https://issues.apache.org/jira/browse/CASSANDRA[JIRA] updates. This is a +fairly noisy mailing list mostly useful for Cassandra developers and +those who would like to keep close tabs on Cassandra’s development +(mailto:commits-subscribe@cassandra.apache.org[subscribe] | +mailto:commits-unsubscribe@cassandra.apache.org[unsubscribe] | +https://lists.apache.org/list.html?commits@cassandra.apache.org[Archives]). + +== Slack + +We have recently moved to the ASF Slack organization for all chat. +Please https://s.apache.org/slack-invite[sign up for an account] to +participate. + +* `#cassandra` - for user questions and general discussions +* `#cassandra-dev` - strictly for questions or discussions related to +Cassandra development +* `#cassandra-builds` - results of automated test builds +* `#cassandra-builds-patches` - results of patch test builds + +== Stack Overflow + +You can also check the +http://stackoverflow.com/questions/tagged/cassandra[Q&A about using +Cassandra] on Stack Overflow. + +== Books and publications + +* http://shop.oreilly.com/product/0636920299837.do[Cassandra: The +Definitive Guide, 3rd Edition], by Jeff Carpenter and Eben Hewitt. +Updated for Cassandra 4.0 +* https://www.amazon.com/Mastering-Apache-Cassandra-Nishant-Neeraj/dp/1784392618/[Mastering +Apache Cassandra, 2nd Edition], by Nishant Neeraj +* https://www.amazon.com/Learning-Apache-Cassandra-Tolerant-Real-Time/dp/1783989203/[Learning +Apache Cassandra - Manage Fault Tolerant and Scalable Real-Time Data], +by Mat Brown +* https://dl.acm.org/citation.cfm?id=1773922[Cassandra: a decentralized +structured storage system], by Avinash Lakshman and Prashant Malik + +== Third-party projects + +There are a number of third-party Cassandra projects that could be +useful. Check out this +https://cassandra.apache.org/third-party/[listing]. + +== Reporting bugs + +If you encounter a problem with Cassandra, the first places to ask for +help are the link:#mailing[user mailing list] and the `#cassandra` +https://s.apache.org/slack-invite[Slack channel]. + +If, after having asked for help, you suspect that you have found a bug +in Cassandra, you should report it by opening a ticket through the +https://issues.apache.org/jira/browse/CASSANDRA[Apache Cassandra JIRA +tracking system]. Please provide as much detail as you can on your +problem. Don’t forget to indicate which version of Cassandra you are +running and on which environment. diff --git a/site-content/source/modules/ROOT/pages/contactus.adoc b/site-content/source/modules/ROOT/pages/contactus.adoc new file mode 100644 index 000000000..7382f109f --- /dev/null +++ b/site-content/source/modules/ROOT/pages/contactus.adoc @@ -0,0 +1,30 @@ += Contact us + +You can get in touch with the Cassandra community either via the mailing +lists or Slack rooms. + +== Mailing lists + +The following mailing lists are available: + +* http://www.mail-archive.com/user@cassandra.apache.org/[Users]: +General discussion list for users - link:mail-to:user-subscribe@cassandra.apache.org[Subscribe] +* http://www.mail-archive.com/dev@cassandra.apache.org/[Developers]: +Development related discussion - link:mail-to:dev-subscribe@cassandra.apache.org[Subscribe] +* http://www.mail-archive.com/commits@cassandra.apache.org/[Commits]: +Commit notification source repository - link:mail-to:commits-subscribe@cassandra.apache.org[Subscribe] +* http://www.mail-archive.com/client-dev@cassandra.apache.org/[Client Libraries]: +Discussion related to the development of idiomatic client APIs - link:mail-to:client-dev-subscribe@cassandra.apache.org[Subscribe] + +Subscribe by clicking on the Subscribe links above. Follow the instructions +in the welcome email to confirm your subscription. Make sure to keep the welcome email +as it contains instructions on how to unsubscribe. + +== Slack + +To chat with users or developers in real-time, join our rooms on +https://s.apache.org/slack-invite[ASF Slack]: + +* `cassandra` - for user questions and general discussions. +* `cassandra-dev` - strictly for questions or discussions related to +Cassandra development. diff --git a/site-content/source/modules/ROOT/pages/development/ci.adoc b/site-content/source/modules/ROOT/pages/development/ci.adoc new file mode 100644 index 000000000..9a9105f84 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/ci.adoc @@ -0,0 +1,102 @@ +== CI Environments + +=== About CI testing and Apache Cassandra + +Cassandra can be automatically tested using various test suites, that +are either implemented based on JUnit or the +https://github.com/riptano/cassandra-dtest[dtest] scripts written in +Python. As outlined in `testing`, each kind of test suite addresses a +different way to test Cassandra. Eventually, all of the tests will be +executed together on the CI platform at +https://builds.apache.org[builds.apache.org], running +http://jenkins-ci.org[Jenkins]. + +=== Setting up your own Jenkins server + +Jenkins is an open source solution that can be installed on a large +number of platforms. Setting up a custom Jenkins instance for Cassandra +may be desirable for users who have hardware to spare, or organizations +that want to run Cassandra tests for custom patches before contribution. + +Please refer to the Jenkins download and documentation pages for details +on how to get Jenkins running, possibly also including slave build +executor instances. The rest of the document will focus on how to setup +Cassandra jobs in your Jenkins environment. + +==== Required plugins + +In addition, the following plugins need to be installed along with the standard +plugins (git, ant, ..). + +You can install any missing plugins using the install manager. + +Go to `Manage Jenkins -> Manage Plugins -> Available` and install the +following plugins and respective dependencies: + +* Job DSL +* Javadoc Plugin +* description setter plugin +* Throttle Concurrent Builds Plug-in +* Test stability history +* Hudson Post build task + +==== Setup seed job + +. Config `New Item` + +* Name it `Cassandra-Job-DSL` +* Select `Freestyle project` + +. Under `Source Code Management` select Git using the repository: +`https://github.com/apache/cassandra-builds` + +. Under `Build`, confirm `Add build step` -> `Process Job DSLs` and enter +at `Look on Filesystem`: `jenkins-dsl/cassandra_job_dsl_seed.groovy` + +Generated jobs will be created based on the Groovy script's default +settings. You may want to override settings by checking +`This project is parameterized` and add `String Parameter` for on the +variables that can be found in the top of the script. This will allow +you to setup jobs for your own repository and branches (e.g. working +branches). + +[arabic, start=4] +. When done, confirm "Save". + +You should now find a new entry with the given name in your project +list. However, building the project will still fail and abort with an +error message "Processing DSL script +cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use". +Go to `Manage Jenkins` -> `In-process Script Approval` to fix this issue. +Afterwards you should be able to run the script and have it generate +numerous new jobs based on the found branches and configured templates. + +Jobs are triggered by either changes in Git or are scheduled to execute +periodically, e.g. on daily basis. +Jenkins will use any available executor with the label "cassandra", once the job +is to be run. +Please make sure to make any executors available by selecting +`Build Executor Status` -> `Configure` -> Add "`cassandra`" as label and +save. + +Executors need to have "JDK 1.8 (latest)" installed. This is done under +`Manage Jenkins -> Global Tool Configuration -> JDK Installations…`. +Executors also need to have the `virtualenv` package installed on their +system. + +=== CircleCI + +Cassandra ships with a default https://circleci.com[CircleCI] +configuration to enable running tests on your branches. +Go to the CircleCI website, click "Login" and log in with your github account. +Then give CircleCI permission to watch your repositories. + +Once you have done that, you can optionally configure CircleCI to run +tests in parallel if you wish: + +[arabic,start=1] +. Click `Projects` and select your github account, and then click the settings for your project. +. Set the parallelism setting. If you leave the default value of 1 +for Cassandra, only `ant eclipse-warnings` and `ant test` will be run. +If you change the value to 4, Circle CI also runs `ant long-test`, +`ant test-compression` and `ant stress-test`. diff --git a/site-content/source/modules/ROOT/pages/development/code_style.adoc b/site-content/source/modules/ROOT/pages/development/code_style.adoc new file mode 100644 index 000000000..cd9048b48 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/code_style.adoc @@ -0,0 +1,99 @@ +== Code Style + +=== General Code Conventions + +* The Cassandra project follows +http://java.sun.com/docs/codeconv/html/CodeConvTOC.doc.html[Sun's Java +coding conventions] with one important exception: `{` and `}` are always +placed on a new line. + +=== Exception handling + +* Never ever write `catch (...) {}` or `catch (...) { logger.error() }` +merely to satisfy Java's compile-time exception checking. Always +propagate the exception up or throw `RuntimeException` (or, if it "can't +happen," `AssertionError`). This makes the exceptions visible to +automated tests. +* Avoid propagating up checked exceptions that no caller handles. +Rethrow as `RuntimeException` (or `IOError`, if that is more +applicable). +* Similarly, logger.warn() is often a cop-out: is this an error or not? +If it is don't hide it behind a warn; if it isn't, no need for the +warning. +* If you genuinely know an exception indicates an expected condition, +it's okay to ignore it BUT this must be explicitly explained in a +comment. + +=== Boilerplate + +* Avoid redundant `@Override` annotations when implementing abstract or +interface methods. +* Do not implement equals or hashcode methods unless they are actually +needed. +* Prefer public final fields to private fields with getters. (But prefer +encapsulating behavior in "real" methods to either.) +* Prefer requiring initialization in the constructor to setters. +* Avoid redundant `this` references to member fields or methods. +* Do not extract interfaces (or abstract classes) unless you actually +need multiple implementations of it. +* Always include braces for nested levels of conditionals and loops. +Only avoid braces for single level. + +=== Multiline statements + +* Try to keep lines under 120 characters, but use good judgement. +It is better to exceed 120 by a little, than split a line that has no natural +splitting points. +* When splitting inside a method call, use one line per parameter and +align the items called: + +[source,none] +---- +SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), + columnFamilies.size(), + StorageService.getPartitioner()); +---- + +* When splitting a ternary, use one line per clause, carry the operator, +and align by indenting with 4 white spaces: + +[source,none] +---- +var = bar == null + ? doFoo() + : doBar(); +---- + +=== Whitespace + +* Make sure to use 4 spaces instead of the tab character for all +your indentation. +* Many lines in the current files have a bunch of trailing whitespace. +If you encounter incorrect whitespace, clean up in a separate patch. +Current and future reviewers won't want to review whitespace diffs. + +=== Imports + +Observe the following order for your imports: + +[source,none] +---- +java +[blank line] +com.google.common +org.apache.commons +org.junit +org.slf4j +[blank line] +everything else alphabetically +---- + +=== Format files for IDEs + +* IntelliJ: +https://wiki.apache.org/cassandra/CodeStyle?action=AttachFile&do=view&target=intellij-codestyle.jar[intellij-codestyle.jar] +* IntelliJ 13: +https://gist.github.com/jdsumsion/9ab750a05c2a567c6afc[gist for IntelliJ +13] (this is a work in progress, still working on javadoc, ternary +style, line continuations, etc) +* Eclipse: (https://github.com/tjake/cassandra-style-eclipse) diff --git a/site-content/source/modules/ROOT/pages/development/dependencies.adoc b/site-content/source/modules/ROOT/pages/development/dependencies.adoc new file mode 100644 index 000000000..5bedae4bf --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/dependencies.adoc @@ -0,0 +1,51 @@ +== Dependency Management + +Managing libraries for Cassandra is a bit less straight forward compared +to other projects, as the build process is based on ant, maven and +manually managed jars. Make sure to follow the steps below carefully and +pay attention to any emerging issues in the `ci` and reported related +issues on Jira/ML, in case of any project dependency changes. + +As Cassandra is an Apache product, all included libraries must follow +Apache's https://www.apache.org/legal/resolved.html[software license +requirements]. + +=== Required steps to add or update libraries + +* Add or replace jar file in `lib` directory +* Add or update `lib/license` files +* Update dependencies in `build.xml` +** Add to `parent-pom` with correct version +** Add to `all-pom` if simple Cassandra dependency (see below) + +=== POM file types + +* *parent-pom* - contains all dependencies with the respective version. +All other poms will refer to the artifacts with specified versions +listed here. +* *build-deps-pom(-sources)* + *coverage-deps-pom* - used by `ant build` +compile target. Listed dependenices will be resolved and copied to +`build/lib/{jar,sources}` by executing the +`maven-ant-tasks-retrieve-build` target. This should contain libraries +that are required for build tools (grammar, docs, instrumentation), but +are not shipped as part of the Cassandra distribution. +* *test-deps-pom* - refered by `maven-ant-tasks-retrieve-test` to +retrieve and save dependencies to `build/test/lib`. Exclusively used +during JUnit test execution. +* *all-pom* - pom for +https://mvnrepository.com/artifact/org.apache.cassandra/cassandra-all[cassandra-all.jar] +that can be installed or deployed to public maven repos via +`ant publish` + +=== Troubleshooting and conflict resolution + +Here are some useful commands that may help you out resolving conflicts. + +* `ant realclean` - gets rid of the build directory, including build +artifacts. +* `mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j` +- shows transitive dependency tree for artifacts, e.g. org.slf4j. In +case the command above fails due to a missing parent pom file, try +running `ant mvn-install`. +* `rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/` - removes +cached local Cassandra maven artifacts diff --git a/site-content/source/modules/ROOT/pages/development/documentation.adoc b/site-content/source/modules/ROOT/pages/development/documentation.adoc new file mode 100644 index 000000000..a93f89796 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/documentation.adoc @@ -0,0 +1,135 @@ +== Working on Documentation + +=== How Cassandra is documented + +The official Cassandra documentation lives in the project's git +repository. +We use a static site generator, http://www.antora.org/[Antora], to create pages hosted at +https://cassandra.apache.org/doc/latest/[cassandra.apache.org]. + + + +Using a static site generator often requires the use of a markup language +instead of visual editors (which some people would call good news). +Antora processes http://www.asciidoc.org[Ascidoc], the markup language used to generate our documentation. +Markup languages allow you to format text using certain syntax elements. +Your document structure will also have to follow specific conventions. +Feel free to take a look at http://cassandra.apache.org/docs[existing documents] to get a better idea how we structure our documents. + +So how do you actually start making contributions? + +=== GitHub based work flow + +_Recommended for shorter documents and minor changes on existing content +(e.g. fixing typos or updating descriptions)_ + +Follow these steps to contribute using GitHub. It's assumed that you're +logged in with an existing account. + +[arabic] +. Fork the GitHub mirror of the +https://github.com/apache/cassandra[Cassandra repository] + +image::docs_fork.png[image] + +[arabic, start=2] +. Create a new branch that you can use to make your edits. It's +recommended to have a separate branch for each of your working projects. +It will also make it easier to create a pull request later to when you +decide you’re ready to contribute your work. + +image::docs_create_branch.png[image] + +[arabic, start=3] +. Navigate to document sources `doc/source/modules` to find the `.adoc` file to +edit. The URL of the document should correspond to the directory +structure within the modules, where first the `component` name, such as `cassandra` is listed, and then the actual pages inside the `pages` directory. New files can be created using the "Create new file" button: + +image::docs_create_file.png[image] + +[arabic, start=4] +. At this point you should be able to edit the file using the GitHub web +editor. Start by naming your file and add some content. Have a look at +other existing `.adoc` files to get a better idea what format elements to +use. + +image::docs_editor.png[image] + +Make sure to preview added content before committing any changes. + +image::docs_preview.png[image] + +[arabic, start=5] +. Commit your work when you're done. Make sure to add a short +description of all your edits since the last time you committed before. + +image::docs_commit.png[image] + +[arabic, start=6] +. Finally if you decide that you're done working on your branch, it's +time to create a pull request! + +image::docs_pr.png[image] + +Afterwards the GitHub Cassandra mirror will list your pull request and +you're done. Congratulations! Please give us some time to look at your +suggested changes before we get back to you. + +=== Jira based work flow + +_Recommended for major changes_ + +Significant changes to the documentation are best managed through our +Jira issue tracker. Please follow the same +https://cassandra.apache.org/doc/latest/development/patches.html[contribution +guides] as for regular code contributions. Creating high quality content +takes a lot of effort. It’s therefore always a good idea to create a +ticket before you start and explain what you’re planning to do. This will +create the opportunity for other contributors and committers to comment +on your ideas and work so far. Eventually your patch gets a formal +review before it is committed. + +=== Working on documents locally using Antora + +_Recommended for advanced editing_ + +Using the GitHub web interface should allow you to use most common +layout elements including images. More advanced formatting options and +navigation elements depend on Antora to render correctly. Therefore, it’s +a good idea to setup Antora locally for any serious editing. Please +follow the instructions in the Cassandra source directory at +`doc/README.md`. Setup is very easy (at least on OSX and Linux). + +=== Notes for committers + +Please feel free to get involved and merge pull requests created on the +GitHub mirror if you're a committer. As this is a read-only repository, +you won't be able to merge a PR directly on GitHub. You'll have to +commit the changes against the Apache repository with a comment that +will close the PR when the committ syncs with GitHub. + +You may use a git work flow like this: + +.... +git remote add github https://github.com/apache/cassandra.git +git fetch github pull//head: +git checkout +.... + +Now either rebase or squash the commit, e.g. for squashing: + +.... +git reset --soft origin/trunk +git commit --author +.... + +Make sure to add a proper commit message including a "Closes #" +text to automatically close the PR. + +==== Publishing + +Details for building and publishing of the site at cassandra.apache.org +can be found +https://github.com/apache/cassandra-website/blob/master/README.md[here]. diff --git a/site-content/source/modules/ROOT/pages/development/gettingstarted.adoc b/site-content/source/modules/ROOT/pages/development/gettingstarted.adoc new file mode 100644 index 000000000..801915dcd --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/gettingstarted.adoc @@ -0,0 +1,66 @@ +== Getting Started + +=== Initial Contributions + +Writing a new feature is just one way to contribute to the Cassandra +project. +In fact, making sure that supporting tasks, such as quality testing, +documentation, and helping users are completed is just as important. +Tracking the development of new features is an ongoing challenge for this project, +like most open source projects. +We suggest learning how this project gets things done before tackling a new feature. +Here are some suggestions for ways to contribute: + +* Update the documentation +* Answer questions on the user list +* Review and test a submitted patch +* Investigate and fix a reported bug +* Create unit tests and d-tests + +=== Updating documentation + +The Cassandra documentation is maintained in the Cassandra source +repository along with the Cassandra code base. +To submit changes to the documentation, follow the standard process for +submitting a xref::patches.adoc[patch]. + +=== Answering questions on the user list + +Subscribe to the user list, look for some questions you can answer and write a reply. +Simple as that! See the http://cassandra.apache.org/community/[community] page +for details on how to subscribe to the mailing list. + +=== Reviewing and testing a submitted patch + +Reviewing patches is not the sole domain of committers. +If others review a patch, it can reduce the load on the committers. +Less time spent reviewing patches means committers can more great features +or review more complex patches. +Follow the instructions in xref:development/development_how_to_review[How to review] or alternatively, create a build with the patch and test it with your own workload. +Add a comment to the JIRA ticket to let others know you've reviewed and tested, +along with the results of your work. +For example: + +==== +"I tested this performance enhancement on our application's standard production +load test and found a 3% improvement." +==== + +=== Investigate and/or fix a reported bug + +Often, the hardest work in fixing a bug is reproducing it. +Even if youdon't have the knowledge to produce a fix, figuring out a way to +reliably reproduce an issue can be a massive contribution. +Document your method of reproduction in a JIRA comment or, +better yet, produce an automated test that reproduces the issue and +attach it to the ticket. +If you go as far as producing a fix, follow the process for submitting a xref::patches.adoc[patch]. + +=== Create unit tests and Dtests + +Test coverage for Cassandra will always benefit from more automated test +coverage, as with most code bases. +Before starting work on a particular area of code, consider reviewing and +enhancing the existing test coverage. +You'll both improve your knowledge of the code before you start on an +enhancement, and reduce the chance introducing issues with your change. See xref::testing.adoc[testing] and xref::patches.adoc[patches] for more detail. diff --git a/site-content/source/modules/ROOT/pages/development/how_to_commit.adoc b/site-content/source/modules/ROOT/pages/development/how_to_commit.adoc new file mode 100644 index 000000000..a2ff02f87 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/how_to_commit.adoc @@ -0,0 +1,68 @@ +== How-to Commit + +If you are a committer, feel free to pick any process that works for you +- so long as you are planning to commit the work yourself. + +Here is how committing and merging typically look for merging and +pushing for tickets that follow the convention (if patch-based). +A hypothetical CASSANDRA-12345 ticket used in the example is a cassandra-3.0 +based bug fix that requires different code for cassandra-3.3, and trunk. +Contributor Jackie is supplying a patch for the root branch (12345-3.0.patch), +and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). + +On cassandra-3.0::: + . `git am -3 12345-3.0.patch` (if we have a problem b/c of CHANGES.txt + not merging anymore, we modify it ourselves, in place) +On cassandra-3.3::: + . `git merge cassandra-3.0 -s ours` + . `git apply -3 12345-3.3.patch` (likely to have an issue with + CHANGES.txt here: modify it ourselves, then `git add CHANGES.txt`) + . `git commit -amend` +On trunk::: + . `git merge cassandra-3.3 -s ours` + . `git apply -3 12345-trunk.patch` (likely to have an issue with + CHANGES.txt here: modify it ourselves, then `git add CHANGES.txt`) + . `git commit -amend` +On any branch::: + . `git push origin cassandra-3.0 cassandra-3.3 trunk -atomic` + +Same scenario, but a branch-based contribution: + +On cassandra-3.0::: + . `git cherry-pick ` (if we have a problem b/c of + CHANGES.txt not merging anymore, we modify it ourselves, in place) +On cassandra-3.3::: + . `git merge cassandra-3.0 -s ours` + . `git format-patch -1 ` + . `git apply -3 .patch` (likely to have an issue + with CHANGES.txt here: modify it ourselves, then `git add CHANGES.txt`) + . `git commit -amend` +On trunk::: + . `git merge cassandra-3.3 -s ours` + . `git format-patch -1 ` + . `git apply -3 .patch` (likely to have an issue + with CHANGES.txt here: modify it ourselves, then `git add CHANGES.txt`) + . `git commit -amend` +On any branch::: + . `git push origin cassandra-3.0 cassandra-3.3 trunk -atomic` + +[TIP] +.Notes on git flags +==== +The `-3` flag used with `git am` or `git apply` will instruct git to perform a +3-way merge. +If a conflict is detected, you can either resolve it manually or invoke `git mergetool`. + +The `-atomic` flag to `git push` does the obvious thing: pushes all or +nothing. Without the flag, the command is equivalent to running `git push` +once per each branch. This is nifty if a race condition occurs - +you won’t push half the branches, blocking other committers’ progress +while you are resolving the issue. +==== + +[TIP] +.Tip +==== +The fastest way to get a patch from someone’s commit in a branch on github if you don’t have their repo in remote, is to append .patch to the commit url: +`curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch` +==== diff --git a/site-content/source/modules/ROOT/pages/development/how_to_review.adoc b/site-content/source/modules/ROOT/pages/development/how_to_review.adoc new file mode 100644 index 000000000..2fcc8c348 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/how_to_review.adoc @@ -0,0 +1,72 @@ +== Review Checklist + +When reviewing tickets in Apache JIRA, the following items should be +covered as part of the review process: + +=== General + +* Does it conform to the `code_style` guidelines? +* Is there any redundant or duplicate code? +* Is the code as modular as possible? +* Can any singletons be avoided? +* Can any of the code be replaced with library functions? +* Are units of measurement used in the code consistent, both internally +and with the rest of the ecosystem? + +=== Error-Handling + +* Are all data inputs and outputs checked (for the correct type, length, +format, and range) and encoded? +* Where third-party utilities are used, are returning errors being +caught? +* Are invalid parameter values handled? +* Are any Throwable/Exceptions passed to the JVMStabilityInspector? +* Are errors well-documented? Does the error message tell the user how +to proceed? +* Do exceptions propagate to the appropriate level in the code? + +=== Documentation + +* Do comments exist and describe the intent of the code (the "why", not +the "how")? +* Are javadocs added where appropriate? +* Is any unusual behavior or edge-case handling described? +* Are data structures and units of measurement explained? +* Is there any incomplete code? If so, should it be removed or flagged +with a suitable marker like ‘TODO’? +* Does the code self-document via clear naming, abstractions, and flow +control? +* Have NEWS.txt, the cql3 docs, and the native protocol spec been +updated if needed? +* Is the ticket tagged with "client-impacting" and "doc-impacting", +where appropriate? +* Has lib/licences been updated for third-party libs? Are they Apache +License compatible? +* Is the Component on the JIRA ticket set appropriately? + +=== Testing + +* Is the code testable? i.e. don’t add too many or hide dependencies, +unable to initialize objects, test frameworks can use methods etc. +* Do tests exist and are they comprehensive? +* Do unit tests actually test that the code is performing the intended +functionality? +* Could any test code use common functionality (e.g. ccm, dtest, or +CqlTester methods) or abstract it there for reuse? +* If the code may be affected by multi-node clusters, are there dtests? +* If the code may take a long time to test properly, are there CVH +tests? +* Is the test passing on CI for all affected branches (up to trunk, if +applicable)? Are there any regressions? +* If patch affects read/write path, did we test for performance +regressions w/multiple workloads? +* If adding a new feature, were tests added and performed confirming it +meets the expected SLA/use-case requirements for the feature? + +=== Logging + +* Are logging statements logged at the correct level? +* Are there logs in the critical path that could affect performance? +* Is there any log that could be added to communicate status or +troubleshoot potential problems in this feature? +* Can any unnecessary logging statement be removed? diff --git a/site-content/source/modules/ROOT/pages/development/ide.adoc b/site-content/source/modules/ROOT/pages/development/ide.adoc new file mode 100644 index 000000000..eae0c7d20 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/ide.adoc @@ -0,0 +1,226 @@ +== Building and IDE Integration + +=== Building From Source + +Building Cassandra from source is the first important step in contributing +to the Apache Cassandra project. +You'll need to install http://www.oracle.com/technetwork/java/javase/downloads/index.html[Java 8], https://git-scm.com/[Git], and http://ant.apache.org/[Ant] first. + +The source code for Cassandra is shared on the central Apache Git +repository and organized by branch, one branch for each major version. +You can access the code for the current development branch using: + +// 01.15.21 - polandll - should this step be a fork rather than a clone? + +[source, plaintext] +---- +git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk +---- + +Other branches will point to different versions of Cassandra. Switching +to a different branch requires checking out the branch. +For example, to checkout the latest version of Cassandra 3.0, use: + +[source, plaintext] +---- +git checkout cassandra-3.0 +---- + +You can get a list of available branches with `git branch`. + +Build Cassandra using ant: + +[source, plaintext] +---- +ant +---- + +This may take a significant amount of time depending on artifacts that have to +be downloaded or the number of classes that need to be compiled. + +[TIP] +.Hint +==== +You can setup multiple working trees for different Cassandra versions +from the same repository using +https://git-scm.com/docs/git-worktree[git-worktree]. +==== + +Now you can get started with Cassandra using IntelliJ IDEA or Eclipse. + +=== Setting up Cassandra in IntelliJ IDEA + +https://www.jetbrains.com/idea/[IntelliJ IDEA] by JetBrains is one of +the most popular IDEs for Cassandra and Java development in general. +The Community Edition can be freely downloaded with all features needed to get started developing Cassandra. + +Use the following procedure for Cassandra 2.1.5+. +If you wish to work with older Cassandra versions, see our https://cwiki.apache.org/confluence/display/CASSANDRA2/RunningCassandraInIDEA[wiki] for instructions. + +First, clone and build Cassandra. +Then execute the following steps to use IntelliJ IDEA. + +[arabic] +. Generate the IDEA files using ant: + +[source, plaintext] +---- +ant generate-idea-files +---- + +[arabic, start=2] +. Start IDEA. +. Open the IDEA project from the checked-out Cassandra directory using `File > Open` in IDEA's menu. + +The project generated by `ant generate-idea-files` contains +nearly everything you need to debug Cassandra and execute unit tests. +You should be able to: + +* Run/debug defaults for JUnit +* Run/debug configuration for Cassandra daemon +* Read/modify the license header for Java source files +* Study Cassandra code style +* Inspections + +=== Opening Cassandra in Apache NetBeans + +https://netbeans.apache.org/[Apache NetBeans] is an older open source Java IDE, +and can be used for Cassandra development. +There is no project setup or generation required to open Cassandra in NetBeans. +Use the following procedure for Cassandra 4.0+. + +First, clone and build Cassandra. +Then execute the following steps to use NetBeans. + +[arabic] +. Start Apache NetBeans +. Open the NetBeans project from the _ide/_ folder of the +checked-out Cassandra directory using `File > Open Project` in NetBeans' menu. + +You should be able to: + +* Build code +* Run code +* Debug code +* Profile code + +These capabilities use the _build.xml_ script. +Build/Run/Debug Project are available via the Run/Debug menus, or the +project context menu. +Profile Project is available via the Profile menu. In the opened +Profiler tab, click the green "Profile" button. +Cassandra's code style is honored in _ide/nbproject/project.properties_. +The `JAVA8_HOME` system environment variable must be set for NetBeans to execute the Run/Debug/Profile `ant` targets to execute. + +=== Setting up Cassandra in Eclipse + +Eclipse is a popular open source IDE that can be used for Cassandra +development. Various Eclipse environments are available from the +https://www.eclipse.org/downloads/eclipse-packages/[download page]. The +following guide was created with "Eclipse IDE for Java Developers". + +These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) +using Cassandra versions 2.1 through 3.x. + +First, clone and build Cassandra. +Then execute the following steps to use Eclipse. + +[arabic] +. Generate the IDEA files using ant: + +[source, plaintext] +---- +ant generate-eclipse-files +---- +[arabic, start=2] +. Start Eclipse. +. Open the Eclipse project from the checked-out Cassandra directory using +`File > Import > Existing Projects` and `Workspace > Select` git directory. +Select the correct branch, such as `cassandra-trunk`. +. Confirm and select `Finish` to import your project. + +Find the project in `Package Explorer` or `Project Explorer`. +You should not get errors if you build the project automatically using these +instructions. Don't set up the project before generating the files with `ant`. + +You should be able to: + +* Run/debug defaults for JUnit +* Run/debug Cassandra +* Study Cassandra code style + +Unit tests can be run from Eclipse by simply right-clicking the class +file or method and selecting `Run As > JUnit Test`. +Tests can be debugged by defining breakpoints (double-click line number) and +selecting `Debug As > JUnit Test`. + +Alternatively all unit tests can be run from the command line as +described in xref::testing.adoc[testing]. + +==== Debugging Cassandra Using Eclipse + +There are two ways to start a local Cassandra instance with Eclipse for debugging. +You can either start Cassandra from the command line or from within Eclipse. + +===== Debugging Cassandra started at command line + +[arabic] +. Set environment variable to define remote debugging options for the +JVM: `export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"` +. Start Cassandra by executing the `./bin/cassandra` + +Next, connect to the running Cassandra process by: + +[arabic, start=3] +. In Eclipse, select `Run > Debug Configurations`. + +image::eclipse_debug0.png[image] + +[arabic, start=4] +. Create new remote application. + +image::eclipse_debug1.png[image] + +[arabic, start=5] +. Configure https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/introclientissues005.html[connection settings] by specifying a name and port 1414. +Confirm `Debug` and start debugging. + +image::eclipse_debug2.png[image] + + +===== Debugging Cassandra started from Eclipse + +Cassandra can also be started directly from Eclipse if you don't want to +use the command line. + +[arabic, start=1] +. In Eclipse, select `Run > Run Configurations`. + +image::eclipse_debug3.png[image] + +[arabic, start=2] +. Create new application. + +image::eclipse_debug4.png[image] + +[arabic, start=3] +. Specify name, project and main class `org.apache.cassandra.service.CassandraDaemon` + +image::eclipse_debug5.png[image] + +[arabic, start=4] +. Configure additional JVM specific parameters that will start Cassandra +with some of the settings created by the regular startup script. Change +heap related values as needed. + +[source, plaintext] +---- +-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true +---- + +image::eclipse_debug6.png[image] + +[arabic, start=5] +. Confirm `Debug` and you should see the output of Cassandra start up in the Eclipse console. + +You can now set breakpoints and start debugging! diff --git a/site-content/source/modules/ROOT/pages/development/index.adoc b/site-content/source/modules/ROOT/pages/development/index.adoc new file mode 100644 index 000000000..6af334c97 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/index.adoc @@ -0,0 +1,35 @@ += Contributing to Cassandra + +* xref:development/gettingstarted.adoc[Getting started] +* xref:development/ide.adoc[IDE] +* xref:development/testing.adoc[Testing] +* xref:development/code_style.adoc[Code style] +* xref:development/how_to_commit.adoc[How to commit] +* xref:development/how_to_review.adoc[How to review] +* xref:development/patches.adoc[Patches] +* xref:development/ci.adoc[CI] +* xref:development/dependencies.adoc[Dependencies] +* xref:development/documentation.adoc[Documentation] +* xref:development/release_process.adoc[Release process] + +include::page$development/gettingstarted.adoc[] + +include::page$development/ide.adoc[] + +include::page$development/testing.adoc[] + +include::page$development/code_style.adoc[] + +include::page$development/how_to_commit.adoc[] + +include::page$development/how_to_review.adoc[] + +include::page$development/patches.adoc[] + +include::page$development/ci.adoc[] + +include::page$development/dependencies.adoc[] + +include::page$development/documentation.adoc[Documentation] + +include::page$development/release_process.adoc[] diff --git a/site-content/source/modules/ROOT/pages/development/patches.adoc b/site-content/source/modules/ROOT/pages/development/patches.adoc new file mode 100644 index 000000000..81a352a5b --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/patches.adoc @@ -0,0 +1,208 @@ +== Contributing Code Changes + +=== Choosing What to Work on + +Submitted patches can include bug fixes, changes to the Java code base, +improvements for tooling (both Java or Python), documentation, testing +or any other changes that requires changing the code base. Although the +process of contributing code is always the same, the amount of work and +time it takes to get a patch accepted also depends on the kind of issue +you're addressing. + +As a general rule of thumb::: + * Major new features and significant changes to the code base will + likely not be accepted without deeper discussion within the + http://cassandra.apache.org/community/[developer community]. + * Bug fixes take higher priority compared to features. + * The extent to which tests are required depends on how likely your + changes will effect the stability of Cassandra in production. Tooling + changes requires fewer tests than storage engine changes. + * Less complex patches will be reviewed faster; consider breaking up + an issue into individual tasks and contributions that can be reviewed + separately. + +[TIP] +.Hint +==== +Not sure what to work? Just pick an issue marked as +https://issues.apache.org/jira/issues/?jql=project%20%3D%20CASSANDRA%20AND%20Complexity%20%3D%20%22Low%20Hanging%20Fruit%22%20and%20status%20!%3D%20resolved[Low +Hanging Fruit] Complexity in JIRA, which flags issues that often turn out to be good starter tasks for beginners. +==== + +=== Before You Start Coding + +Although contributions are highly appreciated, we do not guarantee that +every contribution will become a part of Cassandra. Therefore, it's +generally a good idea to first get some feedback on the thing you plan +to do, especially about any new features or major changes to the +code base. You can reach out to other developers on the mailing list or +`Slack`. + +You should also:: + * Avoid redundant work by searching for already reported issues in + https://issues.apache.org/jira/browse/CASSANDRA[JIRA] to work on. + * Create a new issue early in the process describing what you're + working on - before finishing your patch. + * Link related JIRA issues with your own ticket to provide a better + context. + * Update your ticket from time to time by giving feedback on your + progress and link a GitHub WIP branch with your current code. + * Ping people who you actively like to ask for advice on JIRA by + https://confluence.atlassian.com/doc/mentions-251725350.html[mentioning users]. + +There are also some fixed rules that you need to be aware:: + * Patches will only be applied to branches by following the release + model + * Code must be testable + * Code must follow the `code_style` convention + * Changes must not break compatibility between different Cassandra + versions + * Contributions must be covered by the Apache License + +==== Choosing the Right Branches to Work on + +There are currently multiple Cassandra versions maintained in individual +branches: + +[cols=",",options="header",] +|=== +|Version |Policy +|4.0 |Code freeze (see below) +|3.11 |Critical bug fixes only +|3.0 |Critical bug fixes only +|2.2 |Critical bug fixes only +|2.1 |Critical bug fixes only +|=== + +Corresponding branches in git are easy to recognize as they are named +`cassandra-` (e.g. `cassandra-3.0`). The `trunk` branch is an +exception, as it contains the most recent commits from all other +branches and is used for creating new branches for future tick-tock +releases. + +==== 4.0 Code Freeze + +Patches for new features are currently not accepted for 4.0 or any +earlier versions. +All efforts should focus on stabilizing the 4.0 branch before the first +official release. During that time, only the following patches will be +considered for acceptance: + +* Bug fixes +* Measurable performance improvements +* Changes not distributed as part of the release such as: +* Testing related improvements and fixes +* Build and infrastructure related changes +* Documentation + +==== Bug Fixes + +Creating patches for bug fixes is a bit more complicated and will +depend on how many different versions of Cassandra are affected. In each +case, the order for merging such changes will be `cassandra-2.1` -> +`cassandra-2.2` -> `cassandra-3.0` -> `cassandra-3.x` -> `trunk`. +But don't worry, merging from 2.1 would be the worst case for bugs that +affect all currently supported versions, an uncommon event. As a +contributor, you're also not expected to provide a single patch for each +version. What you need to do however is: + +* Be clear about which versions you could verify to be affected by the +bug +* For 2.x: ask if a bug qualifies to be fixed in this release line, as +this may be handled on case by case bases +* If possible, create a patch against the lowest version in the branches +listed above (e.g. if you found the bug in 3.9 you should try to fix it +already in 3.0) +* Test if the patch can be merged cleanly across branches in the +direction listed above +* Be clear which branches may need attention by the committer or even +create custom patches for those if you can + +=== Creating a Patch + +So you've finished coding and the great moment arrives: it's time to +submit your patch! + +[arabic] +. Create a branch for your changes if you haven't done already. Many +contributors name their branches based on ticket number and Cassandra +version, e.g. `git checkout -b 12345-3.0` +. Verify that you follow Cassandra's `code_style` +. Make sure all tests (including yours) pass using ant as described in +`testing`. If you suspect a test failure is unrelated to your change, it +may be useful to check the test's status by searching the issue tracker +or looking at https://builds.apache.org/[CI] results for the relevant +upstream version. Note that the full test suites take many hours to +complete, so it is common to only run specific relevant tests locally +before uploading a patch. Once a patch has been uploaded, the reviewer +or committer can help setup CI jobs to run the full test suites. +. Consider going through the `how_to_review` for your code. This will +help you to understand how others will consider your change for +inclusion. +. Don’t make the committer squash commits for you in the root branch +either. Multiple commits are fine - and often preferable - during review +stage, especially for incremental review, but once +1d, do either: + +[loweralpha] +. Attach a patch to JIRA with a single squashed commit in it (per +branch), or +. Squash the commits in-place in your branches into one + +[arabic, start=6] +. Include a CHANGES.txt entry (put it at the top of the list), and +format the commit message appropriately in your patch as below. Please +note that only user-impacting items +https://lists.apache.org/thread.html/rde1128131a621e43b0a9c88778398c053a234da0f4c654b82dcbbe0e%40%3Cdev.cassandra.apache.org%3E[should] +be listed in CHANGES.txt. If you fix a test that does not affect users +and does not require changes in runtime code, then no CHANGES.txt entry +is necessary. ++ +[source,none] +---- + + +patch by ; reviewed by for CASSANDRA-##### +---- +[arabic, start=7] +. When you're happy with the result, create a patch: ++ +[source,none] +---- +git add +git commit -m '' +git format-patch HEAD~1 +mv (e.g. 12345-trunk.txt, 12345-3.0.txt) +---- + +Alternatively, many contributors prefer to make their branch available +on GitHub. In this case, fork the Cassandra repository on GitHub and +push your branch: + +[source,none] +---- +git push --set-upstream origin 12345-3.0 +---- + +[arabic, start=8] +. To make life easier for your reviewer/committer, you may want to make +sure your patch applies cleanly to later branches and create additional +patches/branches for later Cassandra versions to which your original +patch does not apply cleanly. That said, this is not critical, and you +will receive feedback on your patch regardless. +. Attach the newly generated patch to the ticket/add a link to your +branch and click "Submit Patch" at the top of the ticket. This will move +the ticket into "Patch Available" status, indicating that your +submission is ready for review. +. Wait for other developers or committers to review it and hopefully +1 +the ticket (see `how_to_review`). If your change does not receive a +1, +do not be discouraged. If possible, the reviewer will give suggestions +to improve your patch or explain why it is not suitable. +. If the reviewer has given feedback to improve the patch, make the +necessary changes and move the ticket into "Patch Available" once again. + +Once the review process is complete, you will receive a +1. Wait for a +committer to commit it. Do not delete your branches immediately after +they’ve been committed - keep them on GitHub for a while. Alternatively, +attach a patch to JIRA for historical record. It’s not that uncommon for +a committer to mess up a merge. In case of that happening, access to the +original code is required, or else you’ll have to redo some of the work. diff --git a/site-content/source/modules/ROOT/pages/development/release_process.adoc b/site-content/source/modules/ROOT/pages/development/release_process.adoc new file mode 100644 index 000000000..1ac169108 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/release_process.adoc @@ -0,0 +1,245 @@ +== Release Process + +The steps for Release Managers to create, vote, and publish releases for +Apache Cassandra. + +While a committer can perform the initial steps of creating and calling +a vote on a proposed release, only a PMC member can complete the process +of publishing and announcing the release. + +=== Prerequisites + +Background docs:: + * http://www.apache.org/legal/release-policy.html[ASF Release Policy] + * http://www.apache.org/dev/release-distribution[ASF Release + Distribution Policy] + * http://www.eu.apache.org/dev/release-publishing.html[ASF Release + Best Practices] + +A debian based linux OS is required to run the release steps from. +Debian-based distros provide the required RPM, dpkg and repository +management tools. + +==== Create and publish your GPG key + +To create a GPG key, follow the +http://www.apache.org/dev/openpgp.html[guidelines]. The key must be 4096 +bit RSA. Include your public key in: + +[source,none] +---- +https://dist.apache.org/repos/dist/release/cassandra/KEYS +---- + +Publish your GPG key in a PGP key server, such as +http://pgp.mit.edu/[MIT Keyserver]. + +==== Bintray account with access to Apache organisation + +Publishing a successfully voted upon release requires bintray access to +the Apache organisation. Please verify that you have a bintray account +and the Apache organisation is listed +https://bintray.com/profile/edit/organizations[here]. + +=== Create Release Artifacts + +Any committer can perform the following steps to create and call a vote +on a proposed release. + +Check that there are no open urgent jira tickets currently being worked +on. Also check with the PMC that there's security vulnerabilities +currently being worked on in private.' Current project habit is to check +the timing for a new release on the dev mailing lists. + +==== Perform the Release + +Run the following commands to generate and upload release artifacts, to +the ASF nexus staging repository and dev distribution location: + +[source,none] +---- +cd ~/git +git clone https://github.com/apache/cassandra-builds.git +git clone https://github.com/apache/cassandra.git + +# Edit the variables at the top of the `prepare_release.sh` file +edit cassandra-builds/cassandra-release/prepare_release.sh + +# Ensure your 4096 RSA key is the default secret key +edit ~/.gnupg/gpg.conf # update the `default-key` line +edit ~/.rpmmacros # update the `%gpg_name ` line + +# Ensure DEBFULLNAME and DEBEMAIL is defined and exported, in the debian scripts configuration +edit ~/.devscripts + +# The prepare_release.sh is run from the actual cassandra git checkout, +# on the branch/commit that we wish to tag for the tentative release along with version number to tag. +cd cassandra +git switch cassandra- + +# The following cuts the release artifacts (including deb and rpm packages) and deploy to staging environments +../cassandra-builds/cassandra-release/prepare_release.sh -v +---- + +Follow the prompts. + +If building the deb or rpm packages fail, those steps can be repeated +individually using the [.title-ref]#-d# and [.title-ref]#-r# flags, +respectively. + +=== Call for a Vote + +Fill out the following email template and send to the dev mailing list: + +[source,none] +---- +I propose the following artifacts for release as . + +sha1: + +Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative + +Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// + +Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ + +The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ + +The vote will be open for 72 hours (longer if needed). + +[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative +[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative +---- + +=== Post-vote operations + +Any PMC member can perform the following steps to formalize and publish +a successfully voted release. + +==== Publish Artifacts + +Run the following commands to publish the voted release artifacts: + +[source,none] +---- +cd ~/git +# edit the variables at the top of the `finish_release.sh` file +edit cassandra-builds/cassandra-release/finish_release.sh + +# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, +# on the tentative release tag that we wish to tag for the final release version number tag. +cd ~/git/cassandra/ +git checkout -tentative +../cassandra-builds/cassandra-release/finish_release.sh -v +---- + +If successful, take note of the email text output which can be used in +the next section "Send Release Announcement". The output will also list +the next steps that are required. + +==== Promote Nexus Repository + +* Login to https://repository.apache.org[Nexus repository] again. +* Click on "Staging" and then on the repository with id +"cassandra-staging". +* Find your closed staging repository, right click on it and choose +"Promote". +* Select the "Releases" repository and click "Promote". +* Next click on "Repositories", select the "Releases" repository and +validate that your artifacts exist as you expect them. + +Publish the Bintray Uploaded Distribution Packages +-------------------------------------------------- + +Log into bintray and publish the uploaded artifacts. + +=== Update and Publish Website + +See https://svn.apache.org/repos/asf/cassandra/site/src/README[docs] for +building and publishing the website. + +Also update the CQL doc if appropriate. + +=== Release version in JIRA + +Release the JIRA version. + +* In JIRA go to the version that you want to release and release it. +* Create a new version, if it has not been done before. + +=== Update to Next Development Version + +Update the codebase to point to the next development version: + +[source,none] +---- +cd ~/git/cassandra/ +git checkout cassandra- +edit build.xml # update ` ` +edit debian/changelog # add entry for new version +edit CHANGES.txt # add entry for new version +git commit -m "Increment version to " build.xml debian/changelog CHANGES.txt + +# …and forward merge and push per normal procedure +---- + +=== Wait for Artifacts to Sync + +Wait for the artifacts to sync at +https://downloads.apache.org/cassandra/ + +=== Send Release Announcement + +Fill out the following email template and send to both user and dev +mailing lists: + +[source,none] +---- +The Cassandra team is pleased to announce the release of Apache Cassandra version . + +Apache Cassandra is a fully distributed database. It is the right choice +when you need scalability and high availability without compromising +performance. + + http://cassandra.apache.org/ + +Downloads of source and binary distributions are listed in our download +section: + + http://cassandra.apache.org/download/ + +This version is release[1] on the series. As always, +please pay attention to the release notes[2] and let us know[3] if you +were to encounter any problem. + +Enjoy! + +[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= +[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= +[3]: https://issues.apache.org/jira/browse/CASSANDRA +---- + +Update Slack Cassandra topic --------------------------- + +Update topic in `cassandra` `Slack room `:: + /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, + 2.1.21 | ask, don't ask to ask + +=== Tweet from @Cassandra + +Tweet the new release, from the @Cassandra account + +=== Delete Old Releases + +As described in +http://www.apache.org/dev/release.html#when-to-archive[When to Archive]. + +An example of removing old releases: + +[source,none] +---- +svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist +svn rm debian/pool/main/c/cassandra/* +svn st +# check and commit +---- diff --git a/site-content/source/modules/ROOT/pages/development/testing.adoc b/site-content/source/modules/ROOT/pages/development/testing.adoc new file mode 100644 index 000000000..71ddd5d74 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/development/testing.adoc @@ -0,0 +1,115 @@ +== Testing + +Creating tests is one of the most important and also most difficult +parts of developing Cassandra. There are different ways to test your +code depending on what you're working on. + +=== Unit Testing + +The simplest test to write for Cassandra code is a unit test. +Cassandra uses JUnit as a testing framework and test cases +can be found in the `test/unit` directory. +Ideally, you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). +Unfortunately, this is not always possible, because Cassandra doesn’t have a very mock friendly code base. +Often you’ll find yourself in a situation where you have to use an embedded Cassandra instance to interact with your test. +If you want to make use of CQL in your test, you can extend CQLTester and use +some of the convenient helper methods, as shown here: + +[source,java] +---- +@Test +public void testBatchAndList() throws Throwable +{ + createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); + execute("BEGIN BATCH " + + "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + + "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + + "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + + "APPLY BATCH"); + + assertRows(execute("SELECT l FROM %s WHERE k = 0"), + row(list(1, 2, 3))); +} +---- + +Unit tests can be run using the command `ant test`. Both test suites and +individual tests can be executed. + +Test suite: +[source, plaintext] +---- +ant test -Dtest.name= +---- +For example, replace `` with `SimpleQueryTest` to test all the methods in `org.apache.cassandra.cql3.SimpleQueryTest`. + +Individual test: +[source, plaintext] +---- +ant testsome -Dtest.name= -Dtest.methods=[,testmethod2] +---- + +For example, replace `` with `org.apache.cassandra.cql3.SimpleQueryTest` +and `` with `testStaticCompactTables` to test just the one method. + + +If you get the following error for a unit test, install the `ant-optional` package +because you need the `JUnitTask` class: + +[source,none] +---- +Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: +org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader +AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] +---- + +Tests that consume a significant amount of time during execution can be found +in the `test/long` directory. +They can be executed as a regular JUnit test or standalone program. +Except for the execution time, there’s nothing +really special about them, but `ant` will only execute these test with the +`ant long-test` target. + +=== DTests + +One way of doing integration or system testing at larger scale is +using https://github.com/apache/cassandra-dtest[dtest] (Cassandra distributed test). +These dtests automatically setup Cassandra clusters with certain configurations and simulate use cases you want to test. +DTests are Python scripts that use `ccmlib` from the https://github.com/pcmanus/ccm[ccm] project. +The clusters set up with dtests run like ad-hoc clusters executed with `ccm` on your local machine. + +Once a cluster is initialized, the http://datastax.github.io/python-driver/installation.html[Python driver] is used to interact with the nodes, manipulate the file system, analyze logs, or change individual nodes. + +The https://builds.apache.org/[CI server] uses dtests against new patches to prevent regression bugs. +Committers can set up build branches and use the CI environment to run tests for your submitted patches. +Read more on the motivation behind http://www.datastax.com/dev/blog/cassandra-testing-improvements-for-developer-convenience-and-confidence[continuous integration]. + +The best way to learn how to write dtests is probably by reading the +introduction "http://www.datastax.com/dev/blog/how-to-write-a-dtest[How +to Write a Dtest]". +Looking at existing, recently updated tests in the project is another good activity. +New tests must follow certain +https://github.com/apache/cassandra-dtest/blob/master/CONTRIBUTING.md[style +conventions] that are checked before contributions are accepted. +In contrast to Cassandra, dtest issues and pull requests are managed on +github, therefore you should make sure to link any created dtests in your +Cassandra ticket and also refer to the ticket number in your dtest PR. + +Creating a good dtest can be tough, but it should not prevent you from +submitting patches! +Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. +In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. + +=== Performance Testing + +Performance tests for Cassandra are a special breed of tests that are +not part of the usual patch contribution process. In fact, many people +contribute a lot of patches to Cassandra without ever running performance +tests. However, they are important when working on performance +improvements; such improvements must be measurable. + +Several tools exist for running performance tests. Here are a few to investigate: + +* `cassandra-stress`: built-in Cassandra stress tool +* https://github.com/thelastpickle/tlp-stress[tlp-stress] +* https://github.com/nosqlbench/nosqlbench[NoSQLBench] +* https://github.com/datastax/cstar_perf[cstar_perf] diff --git a/site-content/source/modules/ROOT/pages/docdev/index.adoc b/site-content/source/modules/ROOT/pages/docdev/index.adoc new file mode 100644 index 000000000..f74d63415 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/docdev/index.adoc @@ -0,0 +1,3 @@ += Contributing to the Cassandra documentation + +* xref:development/documentation.adoc[Documentation] diff --git a/site-content/source/modules/ROOT/pages/download.adoc b/site-content/source/modules/ROOT/pages/download.adoc new file mode 100644 index 000000000..6e381db12 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/download.adoc @@ -0,0 +1,40 @@ += Download Cassandra +:url-apache-closer: https://www.apache.org/dyn/closer.lua/cassandra +:url-downloads-cassandra: https://downloads.apache.org/cassandra +:latest-name: 4.0-beta4 +:3_11-name: 3.11.9 +:3_0-name: 3.0.23 +:2_2-name: 2.2.19 +:2_1-name: 2.1.22 +:latest-date: 2020-12-30 +:3_11-date: 2020-11-04 +:3_0-date: 2020-11-04 +:2_2-date: 2020-11-04 +:2_1-date: 2020-08-31 + + +== Latest Beta Version + +Download the latest Apache Cassandra 4.0 beta release: +{url-apache-closer}/{latest-name}/apache-cassandra-{latest-name}-bin.tar.gz[{latest-name}] +({url-downloads-cassandra}/{latest-name}/apache-cassandra-{latest-name}-bin.tar.gz.asc[pgp], +{url-downloads-cassandra}/{latest-name}/apache-cassandra-{latest-name}-bin.tar.gz.sha256[sha256] and +{url-downloads-cassandra}/{latest-name}/apache-cassandra-{latest-name}-bin.tar.gz.sha512[sha512]), released on {latest-date}. + +== Latest Stable Version + +Download the latest Apache Cassandra 3.11 release: {url-apache-closer}/{3_11-name}/apache-cassandra-{3_11-name}-bin.tar.gz[{3_11-name}] ({url-downloads-cassandra}/{3_11-name}/apache-cassandra-{3_11-name}-bin.tar.gz.asc[pgp], {url-downloads-cassandra}/{3_11-name}/apache-cassandra-{3_11-name}-bin.tar.gz.sha256[sha256] and {url-downloads-cassandra}/{3_11-name}/apache-cassandra-{3_11-name}-bin.tar.gz.sha512[sha512]), released on {3_11-date}. + +== Older Supported Releases + +The following older Cassandra releases are still supported: + +* Apache Cassandra 3.0 is supported until *6 months after 4.0 release +(date TBD)*. The latest release is {url-apache-closer}/{3_0-name}/apache-cassandra-{3_0-name}-bin.tar.gz[{3_0-name}] ({url-downloads-cassandra}/{3_0-name}/apache-cassandra-{3_0-name}-bin.tar.gz.asc[pgp], {url-downloads-cassandra}/{3_0-name}/apache-cassandra-{3_0-name}-bin.tar.gz.sha256[sha256] and {url-downloads-cassandra}/{3_0-name}/apache-cassandra-{3_0-name}-bin.tar.gz.sha512[sha512]), released on {3_0-date}. +* Apache Cassandra 2.2 is supported until *4.0 release (date TBD)*. The +latest release is {url-apache-closer}/{2_2-name}/apache-cassandra-{2_2-name}-bin.tar.gz[{2_2-name}] ({url-downloads-cassandra}/{2_2-name}/apache-cassandra-{2_2-name}-bin.tar.gz.asc[pgp], {url-downloads-cassandra}/{2_2-name}/apache-cassandra-{2_2-name}-bin.tar.gz.sha256[sha256] and {url-downloads-cassandra}/{2_2-name}/apache-cassandra-{2_2-name}-bin.tar.gz.sha512[sha512]), released on {2_2-date}. +* Apache Cassandra 2.1 is supported until *4.0 release (date TBD)* with +*critical fixes only*. The latest release is {url-apache-closer}/{2_1-name}/apache-cassandra-{2_1-name}-bin.tar.gz[{2_1-name}] ({url-downloads-cassandra}/{2_1-name}/apache-cassandra-{2_1-name}-bin.tar.gz.asc[pgp], {url-downloads-cassandra}/{2_1-name}/apache-cassandra-{2_1-name}-bin.tar.gz.sha256[sha256] and {url-downloads-cassandra}/{2_1-name}/apache-cassandra-{2_1-name}-bin.tar.gz.sha512[sha512]), released on {2_1-date}. + +Older (unsupported) versions of Cassandra are +http://archive.apache.org/dist/cassandra/[archived here]. diff --git a/site-content/source/modules/ROOT/pages/glossary.adoc b/site-content/source/modules/ROOT/pages/glossary.adoc new file mode 100644 index 000000000..f7fa0b31d --- /dev/null +++ b/site-content/source/modules/ROOT/pages/glossary.adoc @@ -0,0 +1,38 @@ +Cassandra:: + Apache Cassandra is a distributed, high-available, eventually + consistent NoSQL open-source database. +cluster:: + Two or more database instances that exchange messages using the gossip + protocol. +commitlog:: + A file to which the database appends changed data for recovery in the + event of a hardware failure. +datacenter:: + A group of related nodes that are configured together within a cluster + for replication and workload segregation purposes. Not necessarily a + separate location or physical data center. Datacenter names are + case-sensitive and cannot be changed. +gossip:: + A peer-to-peer communication protocol for exchanging location and + state information between nodes. +hint:: + One of the three ways, in addition to read-repair and full/incremental + anti-entropy repair, that Cassandra implements the eventual + consistency guarantee that all updates are eventually received by all + replicas. +listen address:: + Address or interface to bind to and tell other Cassandra nodes to + connect to +seed node:: + A seed node is used to bootstrap the gossip process for new nodes + joining a cluster. To learn the topology of the ring, a joining node + contacts one of the nodes in the -seeds list in cassandra. yaml. The + first time you bring up a node in a new cluster, only one node is the + seed node. +snitch:: + The mapping from the IP addresses of nodes to physical and virtual + locations, such as racks and data centers. There are several types of + snitches. The type of snitch affects the request routing mechanism. +SSTable:: + An SSTable provides a persistent,ordered immutable map from keys to + values, where both keys and values are arbitrary byte strings. diff --git a/site-content/source/modules/ROOT/pages/index.adoc b/site-content/source/modules/ROOT/pages/index.adoc new file mode 100644 index 000000000..7e17cf432 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/index.adoc @@ -0,0 +1,51 @@ += Welcome to Apache Cassandra's documentation! + +:description: Starting page for Apache Cassandra documentation. +:keywords: Apache, Cassandra, NoSQL, database +:cass-url: http://cassandra.apache.org +:cass-contrib-url: https://wiki.apache.org/cassandra/HowToContribute + +This is the official documentation for {cass-url}[Apache Cassandra]. +If you would like to contribute to this documentation, you are welcome +to do so by submitting your contribution like any other patch following +{cass-contrib-url}[these instructions]. + +== Main documentation + +[cols="a,a"] +|=== + +| xref:Cassandra:cassandra:getting_started/index.adoc[Getting started] | Newbie starting point + +| xref:Cassandra:cassandra:new/index.adoc[New in 4.0] | What's new in Cassandra 4.0 + +| xref:Cassandra:cassandra:architecture/index.adoc[Architecture] | Cassandra's big picture + +| xref:Cassandra:cassandra:data_modeling/index.adoc[Data modeling] | Hint: it's not relational + +| xref:Cassandra:cassandra:cql/index.adoc[Cassandra Query Language (CQL)] | CQL reference documentation + +| xref:Cassandra:cassandra:configuration/index.adoc[Configuration] | Cassandra's handles and knobs + +| xref:Cassandra:cassandra:operating/index.adoc[Operation] | The operator's corner + +| xref:Cassandra:cassandra:tools/index.adoc[Tools] | cqlsh, nodetool, and others + +| xref:Cassandra:cassandra:troubleshooting/index.adoc[Troubleshooting] | What to look for when you have a problem + +| xref:development/index.adoc[Development] | Learn how to improve Cassandra and contribute patches + +| xref:Cassandra:cassandra:faq/index.adoc[FAQ] | Frequently asked questions + +| xref:plugins/index.adoc[Plug-ins] | Third-party plug-ins + +|=== + +== Meta information +* xref:bugs.adoc[Reporting bugs] +* xref:contactus.adoc[Contact us] + +== The rest of the information +* xref:community.adoc[Community] +* xref:download.adoc[Download] +* xref:native_protocol.adoc[Native Protocols] diff --git a/site-content/source/modules/ROOT/pages/native_protocol.adoc b/site-content/source/modules/ROOT/pages/native_protocol.adoc new file mode 100644 index 000000000..93efd4176 --- /dev/null +++ b/site-content/source/modules/ROOT/pages/native_protocol.adoc @@ -0,0 +1,22 @@ += Native protocols + +== Native Protocol Version 3 + +[source, plaintext] +---- +include::example$TEXT/native_protocol_v3.spec[Version 3] +---- + +== Native Protocol Version 4 + +[source, plaintext] +---- +include::example$TEXT/native_protocol_v4.spec[Version 4] +---- + +== Native Protocol Version 5 + +[source, plaintext] +---- +include::example$TEXT/native_protocol_v5.spec[Version 5] +---- diff --git a/site-content/source/modules/ROOT/pages/third-party.adoc b/site-content/source/modules/ROOT/pages/third-party.adoc new file mode 100644 index 000000000..ffde41a1a --- /dev/null +++ b/site-content/source/modules/ROOT/pages/third-party.adoc @@ -0,0 +1,173 @@ += Third-party projects + +== Third-party projects + +=== Cassandra as-a-Service cloud offerings + +* https://aiven.io/cassandra[Aiven for Apache Cassandra]: Aiven for +Apache Cassandra is a fully managed NoSQL database, deployable in the +cloud of your choice. Snap it into your existing workflows with the +click of a button, automate away the mundane tasks, and focus on +building your core apps. Now running Cassandra 3.11. +* https://aws.amazon.com/keyspaces/[Amazon Keyspaces (for Apache +Cassandra)]: Scalable, highly available, and managed Apache +Cassandra–compatible database service. +* https://astra.datastax.com[DataStax Astra]: Cloud-native database +as-a-service built on Apache Cassandra™ complete with a free-tier and +CQL, REST and GraphQL APIs for faster development. Deployable in AWS and +GCP. +* https://www.instaclustr.com/solutions/managed-apache-cassandra[Instaclustr +Hosted & Managed Apache Cassandra as a Service]: Instaclustr provides a +fully managed and SOC 2 certified hosted & managed service for Apache +Cassandra® on AWS, Azure, GCP and IBM Cloud. + +=== Cassandra installation tools + +* https://hub.docker.com/_/cassandra[Docker community Cassandra images]: +Docker images for Apache Cassandra maintained by the Docker community +* https://downloads.datastax.com/#desktop[DataStax Desktop]: +Cross-platform (Windows, MacOSX, Linux) application that allows +developers to quickly explore Apache Cassandra™ with a few clicks on +their laptop, complete with tutorials and walkthroughs. +* https://github.com/thelastpickle/tlp-cluster[The Last Pickle +tlp-cluster]: tlp-cluster, a tool for launching Cassandra clusters in +AWS (DataStax) + +=== Cassandra tools + +* https://cassandra.link[cassandra.link]: Curated site with tools, along +with https://cassandra.tools[cassandra.tools]. +* https://github.com/criteo/cassandra_exporter[Cassandra Prometheus +Exporter]: Standalone application which exports Cassandra metrics +through a prometheus friendly endpoint +* https://downloads.datastax.com/#bulk-loader[DataStax Bulk Loader]: +Easy-to-use command line utility for loading and unloading JSON or CSV +files to/from the database, counting rows in tables and identifying +large partitions. +* https://github.com/datastax/metric-collector-for-apache-cassandra[DataStax +Metrics Collector for Cassandra]: Based on Collectd, aggregates OS and +Cassandra metrics along with diagnostic events to facilitate problem +resolution and remediation +* https://hackolade.com/nosqldb.html#cassandra[Hackolade]: Visual data +modeling tool for Cassandra +* https://github.com/thelastpickle/cassandra-medusa[The Last Pickle +Medusa]: Apache Cassandra Backup and Restore Tool (DataStax) +* https://github.com/thelastpickle/cassandra-reaper[The Last Pickle +Reaper]: Automated repair tool for Apache Cassandra (DataStax) +* https://github.com/thelastpickle/tlp-stress[The Last Pickle Cassandra +stress tool, tlp-stress]: A workload-centric stress tool for Apache +Cassandra. Designed for simplicity, no math degree required. (DataStax) +* https://github.com/nosqlbench/nosqlbench[NoSQLBench]: Pluggable +benchmarking suite for Cassandra and other distributed systems + +=== Cassandra Kubernetes operators + +* https://github.com/mesosphere/kudo-cassandra-operator[D2iQ Cassandra +Kudo Operator]: The KUDO Cassandra Operator makes it easy to deploy and +manage Apache Cassandra on Kubernetes. +* https://github.com/datastax/cass-operator[DataStax cass-operator]: The +DataStax Kubernetes Operator for Apache Cassandra +* https://github.com/instaclustr/cassandra-operator[Instaclustr +cassandra-operator]: The Cassandra operator manages Cassandra clusters +deployed to Kubernetes and automates tasks related to operating a +Cassandra cluster. +* https://orange-opensource.github.io/casskop/[Orange CassKop]: The +Orange Cassandra operator is a Kubernetes operator to automate +provisioning, management, autoscaling and operations of Apache Cassandra +clusters deployed to K8s. +* https://github.com/sky-uk/cassandra-operator[Sky Cassandra Operator]: +The Sky Cassandra Operator is a Kubernetes operator that manages +Cassandra clusters inside Kubernetes. + +=== Cassandra management sidecars + +* https://github.com/apache/cassandra-sidecar[Apache Cassandra +cassandra-sidecar]: Sidecar for the highly scalable Apache Cassandra +database, built as part of the Apache Cassandra project. +* https://github.com/datastax/management-api-for-apache-cassandra[DataStax +Management API for Apache Cassandra]: RESTful / Secure Management +Sidecar for Apache Cassandra +* https://github.com/datastax/spring-boot[DataStax Spring Boot]: Spring +Boot extension +* https://github.com/instaclustr/cassandra-sidecar[Instaclustr +cassandra-sidecar]: This repository is home of a sidecar for Apache +Cassandra database. Sidecar is meant to be run alongside of Cassandra +instance and sidecar talks to Cassandra via JMX. + +=== Developer Frameworks + +* http://r4fek.github.io/django-cassandra-engine/[Django Cassandra +Engine]: Cassandra backend for Django Framework that allows you to use +Cqlengine directly in your project. +* https://express-cassandra.readthedocs.io/en/stable/[Express +Cassandra]: Express-Cassandra is a Cassandra ORM/ODM/OGM for NodeJS with +Elassandra & JanusGraph Support. +* https://quarkus.io/guides/cassandra[Quarkus extension for Apache +Cassandra]: An Apache Cassandra(R) extension for Quarkus. Quarkus is A +Kubernetes Native Java stack tailored for OpenJDK HotSpot and GraalVM, +crafted from the best of breed Java libraries and standards. +* https://spring.io/projects/spring-data-cassandra[Spring Data +Cassandra]: With the power to stay at a high level with annotated POJOs, +or at a low level with high performance data ingestion capabilities, the +Spring Data for Apache Cassandra templates are sure to meet every +application need +* https://www.testcontainers.org/modules/databases/cassandra/[TestContainers]: +Testcontainers is a Java library that supports JUnit tests, providing +lightweight, throwaway instances of common databases, Selenium web +browsers, or anything else that can run in a Docker container. + +=== Cassandra connectors + +==== Apache Kafka + +* https://www.confluent.io/hub/confluentinc/kafka-connect-cassandra[Confluent +Connect Cassandra]: The Confluent Cassandra Sink Connector is used to +move messages from Kafka into Apache Cassandra. +* https://downloads.datastax.com/#akc[DataStax Sink Connector]: The +DataStax Apache Kafka Connector automatically takes records from Kafka +topics and writes them to a DataStax Enterprise or Apache Cassandra™ +database. This sink connector is deployed in the Kafka Connect framework +and removes the need to build a custom solution to move data between +these two systems. +* https://github.com/debezium/debezium-incubator/tree/master/debezium-connector-cassandra[Debezium +Source Connector]: This connector is currently in incubating state, and +Cassandra is different from the other Debezium connectors since it is +not implemented on top of the Kafka Connect framework. +* https://docs.lenses.io/connectors/sink/cassandra.html[Lenses Sink +Connector]: The Cassandra Sink allows you to write events from Kafka to +Cassandra. The connector converts the value from the Kafka Connect +SinkRecords to JSON and uses Cassandra’s JSON insert functionality to +insert the rows. The task expects pre-created tables in Cassandra. +* https://docs.lenses.io/connectors/source/cassandra.html[Lenses Source +Connector]: Kafka Connect Cassandra is a Source Connector for reading +data from Cassandra and writing to Kafka. + +==== Apache Spark + +* https://github.com/datastax/spark-cassandra-connector[DataStax Spark +Cassandra Connector]: This library lets you expose Cassandra tables as +Spark RDDs and Datasets/DataFrames, write Spark RDDs and +Datasets/DataFrames to Cassandra tables, and execute arbitrary CQL +queries in your Spark applications. + +==== Apache Flink + +* https://ci.apache.org/projects/flink/flink-docs-stable/dev/connectors/cassandra.html[Flink +Sink Connector]: This connector provides sinks that writes data into a +Apache Cassandra database. + +==== Apache Pulsar + +* https://pulsar.apache.org/docs/en/io-quickstart/#connect-pulsar-to-cassandra[Pulsar +Sink Connector]: The Pulsar Cassandra Sink connector is used to write +messages to a Cassandra Cluster. + +==== Professional Support + +* https://luna.datastax.com/[DataStax Luna], +https://www.datastax.com/services/support/premium-support[DataStax +Premium Support], +https://www.datastax.com/services/professional-services[DataStax +Professional Services] +* https://www.instaclustr.com/services/[Instacluster] +* https://opencredo.com/about-us/[Open Credo] diff --git a/site-ui/.eslintrc b/site-ui/.eslintrc new file mode 100644 index 000000000..fc504896e --- /dev/null +++ b/site-ui/.eslintrc @@ -0,0 +1,14 @@ +{ + "extends": "standard", + "rules": { + "arrow-parens": ["error", "always"], + "comma-dangle": ["error", { + "arrays": "always-multiline", + "objects": "always-multiline", + "imports": "always-multiline", + "exports": "always-multiline" + }], + "max-len": [1, 120, 2], + "spaced-comment": "off" + } +} diff --git a/site-ui/.stylelintrc b/site-ui/.stylelintrc new file mode 100644 index 000000000..344318f3c --- /dev/null +++ b/site-ui/.stylelintrc @@ -0,0 +1,7 @@ +{ + "extends": "stylelint-config-standard", + "rules": { + "comment-empty-line-before": null, + "no-descending-specificity": null, + } +} diff --git a/site-ui/Dockerfile b/site-ui/Dockerfile new file mode 100644 index 000000000..bda86949a --- /dev/null +++ b/site-ui/Dockerfile @@ -0,0 +1,57 @@ +FROM ubuntu:18.04 +# Set up non-root user, 'build', with default uid:gid +# This allows passing --build-arg to use localhost username, and uid:gid: +# $ docker build \ +# -t cassandra-website-ui:latest \ +# --build-arg BUILD_USER_ARG=$(whoami) \ +# --build-arg UID_ARG=$(id -u) \ +# --build-arg GID_ARG=$(id -g) \ +# . +# +# Other container parameters can be overridden at build time as well: +# - NODE_VERSION_ARG: Version of node to use. +ARG BUILD_USER_ARG="build" +ARG UID_ARG=1000 +ARG GID_ARG=1000 +ARG NODE_VERSION_ARG="v12.16.2" + +ENV BUILD_USER=${BUILD_USER_ARG} + +RUN echo "Building with arguments:" \ + && echo " - BUILD_USER_ARG=${BUILD_USER_ARG}" \ + && echo " - UID_ARG=${UID_ARG}" \ + && echo " - GID_ARG=${GID_ARG}" \ + && echo " - NODE_VERSION_ARG=${NODE_VERSION_ARG}" + +RUN echo "Setting up user '${BUILD_USER}'" +RUN groupadd --gid ${GID_ARG} --non-unique ${BUILD_USER} +RUN useradd --create-home --shell /bin/bash \ + --uid ${UID_ARG} --gid ${GID_ARG} --non-unique ${BUILD_USER} + +RUN apt-get update && \ + apt-get install -y \ + wget \ + git \ + vim + +ENV NODE_PACKAGE="node-${NODE_VERSION_ARG}-linux-x64.tar.gz" +RUN wget https://nodejs.org/download/release/${NODE_VERSION_ARG}/${NODE_PACKAGE} && \ + tar -C /usr/local --strip-components 1 -xzf ${NODE_PACKAGE} && \ + rm ${NODE_PACKAGE} + +RUN npm install -g gulp-cli + +ENV BUILD_DIR="/home/${BUILD_USER}" +WORKDIR ${BUILD_DIR} +RUN mkdir -p ${BUILD_DIR}/site-ui && \ + chmod -R a+rw ${BUILD_DIR} && \ + chown -R ${BUILD_USER}:${BUILD_USER} ${BUILD_DIR} + +EXPOSE 5252/tcp + +# Run as build user from here +USER ${BUILD_USER} +WORKDIR ${BUILD_DIR}/site-ui +COPY docker-entrypoint.sh /usr/local/bin/ +ENTRYPOINT ["docker-entrypoint.sh"] +CMD ["--tasks", "--depth", "1"] \ No newline at end of file diff --git a/site-ui/LICENSE b/site-ui/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/site-ui/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/site-ui/docker-entrypoint.sh b/site-ui/docker-entrypoint.sh new file mode 100755 index 000000000..d2c6e1a4e --- /dev/null +++ b/site-ui/docker-entrypoint.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Abort script if a command fails +set -e + +GREEN='\033[1;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +if [ ! -d "node_modules" ] +then + echo + echo -e "${YELLOW}The node_modules directory is missing and we need it to render the UI.${NC}" + echo -e "${YELLOW}I'll run a once off install of the required modules. This will take a few minutes to complete.${NC}" + echo + + # Install node modules + npm install + + echo -e -n "${GREEN}Install complete!${NC}" +fi + +exec gulp "$@" \ No newline at end of file diff --git a/site-ui/gulp.d/lib/create-task.js b/site-ui/gulp.d/lib/create-task.js new file mode 100644 index 000000000..8c9006291 --- /dev/null +++ b/site-ui/gulp.d/lib/create-task.js @@ -0,0 +1,24 @@ +'use strict' + +const metadata = require('undertaker/lib/helpers/metadata') +const { watch } = require('gulp') + +module.exports = ({ name, desc, opts, call: fn, loop }) => { + if (name) { + const displayName = fn.displayName + if (displayName === '' || displayName === '') { + metadata.get(fn).tree.label = `${displayName} ${name}` + } + fn.displayName = name + } + if (loop) { + const delegate = fn + name = delegate.displayName + delegate.displayName = `${name}:loop` + fn = () => watch(loop, { ignoreInitial: false }, delegate) + fn.displayName = name + } + if (desc) fn.description = desc + if (opts) fn.flags = opts + return fn +} diff --git a/site-ui/gulp.d/lib/export-tasks.js b/site-ui/gulp.d/lib/export-tasks.js new file mode 100644 index 000000000..7c9de482c --- /dev/null +++ b/site-ui/gulp.d/lib/export-tasks.js @@ -0,0 +1,14 @@ +'use strict' + +module.exports = (...tasks) => { + const seed = {} + if (tasks.length) { + if (tasks.lastIndexOf(tasks[0]) > 0) { + const task1 = tasks.shift() + seed.default = Object.assign(task1.bind(null), { description: `=> ${task1.displayName}`, displayName: 'default' }) + } + return tasks.reduce((acc, it) => (acc[it.displayName || it.name] = it) && acc, seed) + } else { + return seed + } +} diff --git a/site-ui/gulp.d/lib/gulp-prettier-eslint.js b/site-ui/gulp.d/lib/gulp-prettier-eslint.js new file mode 100644 index 000000000..fe674da0d --- /dev/null +++ b/site-ui/gulp.d/lib/gulp-prettier-eslint.js @@ -0,0 +1,44 @@ +'use strict' + +const log = require('fancy-log') +const PluginError = require('plugin-error') +const prettierEslint = require('prettier-eslint') +const { Transform } = require('stream') +const map = (transform) => new Transform({ objectMode: true, transform }) + +module.exports = () => { + const report = { changed: 0, unchanged: 0 } + return map(format).on('finish', () => { + if (report.changed > 0) { + const changed = 'formatted ' + .concat(report.changed) + .concat(' file') + .concat(report.changed === 1 ? '' : 's') + const unchanged = 'left ' + .concat(report.unchanged) + .concat(' file') + .concat(report.unchanged === 1 ? '' : 's') + .concat(' unchanged') + log(`prettier-eslint: ${changed}; ${unchanged}`) + } else { + log(`prettier-eslint: left ${report.unchanged} file${report.unchanged === 1 ? '' : 's'} unchanged`) + } + }) + + function format (file, enc, next) { + if (file.isNull()) return next() + if (file.isStream()) return next(new PluginError('gulp-prettier-eslint', 'Streaming not supported')) + + const input = file.contents.toString() + const output = prettierEslint({ text: input, filePath: file.path }) + + if (input === output) { + report.unchanged += 1 + } else { + report.changed += 1 + file.contents = Buffer.from(output) + } + + next(null, file) + } +} diff --git a/site-ui/gulp.d/tasks/build-preview-pages.js b/site-ui/gulp.d/tasks/build-preview-pages.js new file mode 100644 index 000000000..556e51087 --- /dev/null +++ b/site-ui/gulp.d/tasks/build-preview-pages.js @@ -0,0 +1,214 @@ +'use strict' + +const asciidoctor = require('asciidoctor.js')() +const fs = require('fs-extra') +const handlebars = require('handlebars') +const merge = require('merge-stream') +const ospath = require('path') +const path = ospath.posix +const requireFromString = require('require-from-string') +const { Transform } = require('stream') +const map = (transform = () => {}, flush = undefined) => new Transform({ objectMode: true, transform, flush }) +const vfs = require('vinyl-fs') +const yaml = require('js-yaml') + +const ASCIIDOC_ATTRIBUTES = { experimental: '', icons: 'font', sectanchors: '', 'source-highlighter': 'highlight.js' } + +module.exports = (src, previewSrc, previewDest, sink = () => map()) => (done) => + Promise.all([ + loadSampleUiModel(previewSrc), + toPromise( + merge(compileLayouts(src), registerPartials(src), registerHelpers(src), copyImages(previewSrc, previewDest)) + ), + ]) + .then(([baseUiModel, { layouts }]) => [{ ...baseUiModel, env: process.env }, layouts]) + .then(([baseUiModel, layouts]) => + vfs + .src('**/*.adoc', { base: previewSrc, cwd: previewSrc }) + .pipe( + map((file, enc, next) => { + const siteRootPath = path.relative(ospath.dirname(file.path), ospath.resolve(previewSrc)) + const uiModel = { ...baseUiModel } + uiModel.siteRootPath = siteRootPath + uiModel.siteRootUrl = path.join(siteRootPath, 'index.html') + uiModel.uiRootPath = path.join(siteRootPath, '_') + if (file.stem === '404') { + uiModel.page = { layout: '404', title: 'Page Not Found' } + } else { + const pageModel = (uiModel.page = { ...uiModel.page }) + const doc = asciidoctor.load(file.contents, { safe: 'safe', attributes: ASCIIDOC_ATTRIBUTES }) + const attributes = doc.getAttributes() + pageModel.layout = doc.getAttribute('page-layout', 'default') + pageModel.title = doc.getDocumentTitle() + pageModel.url = '/' + file.relative.slice(0, -5) + '.html' + if (file.stem === 'tutorials') pageModel.tutorials = true + const componentName = doc.getAttribute('page-component-name', pageModel.src.component) + const versionString = doc.getAttribute( + 'page-version', + doc.hasAttribute('page-component-name') ? undefined : pageModel.src.version + ) + let component + let componentVersion + if (componentName) { + component = pageModel.component = uiModel.site.components[componentName] + componentVersion = pageModel.componentVersion = versionString + ? component.versions.find(({ version }) => version === versionString) + : component.latest + } else { + component = pageModel.component = Object.values(uiModel.site.components)[0] + componentVersion = pageModel.componentVersion = component.latest + } + pageModel.module = 'ROOT' + pageModel.relativeSrcPath = file.relative + pageModel.version = componentVersion.version + pageModel.displayVersion = componentVersion.displayVersion + pageModel.editUrl = pageModel.origin.editUrlPattern.replace('%s', file.relative) + pageModel.navigation = componentVersion.navigation || [] + pageModel.breadcrumbs = findNavPath(pageModel.url, pageModel.navigation) + if (pageModel.component.versions.length > 1) { + pageModel.versions = pageModel.component.versions.map(({ version, displayVersion, url }, idx, arr) => { + const pageVersion = { version, displayVersion: displayVersion || version, url } + if (version === component.latest.version) pageVersion.latest = true + if (idx === arr.length - 1) { + delete pageVersion.url + pageVersion.missing = true + } + return pageVersion + }) + } + pageModel.attributes = Object.entries({ ...attributes, ...componentVersion.asciidoc.attributes }) + .filter(([name, val]) => name.startsWith('page-')) + .reduce((accum, [name, val]) => ({ ...accum, [name.substr(5)]: val }), {}) + pageModel.contents = Buffer.from(doc.convert()) + } + file.extname = '.html' + try { + file.contents = Buffer.from(layouts.get(uiModel.page.layout)(uiModel)) + next(null, file) + } catch (e) { + next(transformHandlebarsError(e, uiModel.page.layout)) + } + }) + ) + .pipe(vfs.dest(previewDest)) + .on('error', done) + .pipe(sink()) + ) + +function loadSampleUiModel (src) { + return fs.readFile(ospath.join(src, 'ui-model.yml'), 'utf8').then((contents) => { + const uiModel = yaml.safeLoad(contents) + uiModel.env = process.env + Object.entries(uiModel.site.components).forEach(([name, component]) => { + component.name = name + if (!component.versions) component.versions = [(component.latest = { url: '#' })] + component.versions.forEach((version) => { + Object.defineProperty(version, 'name', { value: component.name, enumerable: true }) + if (!('displayVersion' in version)) version.displayVersion = version.version + if (!('asciidoc' in version)) version.asciidoc = { attributes: {} } + }) + Object.defineProperties(component, { + asciidoc: { + get () { + return this.latest.asciidoc + }, + }, + title: { + get () { + return this.latest.title + }, + }, + url: { + get () { + return this.latest.url + }, + }, + }) + }) + return uiModel + }) +} + +function registerPartials (src) { + return vfs.src('partials/*.hbs', { base: src, cwd: src }).pipe( + map((file, enc, next) => { + handlebars.registerPartial(file.stem, file.contents.toString()) + next() + }) + ) +} + +function registerHelpers (src) { + handlebars.registerHelper('relativize', relativize) + handlebars.registerHelper('resolvePage', resolvePage) + handlebars.registerHelper('resolvePageURL', resolvePageURL) + return vfs.src('helpers/*.js', { base: src, cwd: src }).pipe( + map((file, enc, next) => { + handlebars.registerHelper(file.stem, requireFromString(file.contents.toString())) + next() + }) + ) +} + +function compileLayouts (src) { + const layouts = new Map() + return vfs.src('layouts/*.hbs', { base: src, cwd: src }).pipe( + map( + (file, enc, next) => { + const srcName = path.join(src, file.relative) + layouts.set(file.stem, handlebars.compile(file.contents.toString(), { preventIndent: true, srcName })) + next() + }, + function (done) { + this.push({ layouts }) + done() + } + ) + ) +} + +function copyImages (src, dest) { + return vfs.src('**/*.{png,svg}', { base: src, cwd: src }).pipe(vfs.dest(dest)) +} + +function findNavPath (currentUrl, node = [], current_path = [], root = true) { + for (const item of node) { + const { url, items } = item + if (url === currentUrl) { + return current_path.concat(item) + } else if (items) { + const activePath = findNavPath(currentUrl, items, current_path.concat(item), false) + if (activePath) return activePath + } + } + if (root) return [] +} + +function relativize (url) { + return url ? (url.charAt() === '#' ? url : url.slice(1)) : '#' +} + +function resolvePage (spec, context = {}) { + if (spec) return { pub: { url: resolvePageURL(spec) } } +} + +function resolvePageURL (spec, context = {}) { + if (spec) return '/' + (spec = spec.split(':').pop()).slice(0, spec.lastIndexOf('.')) + '.html' +} + +function transformHandlebarsError ({ message, stack }, layout) { + const m = stack.match(/^ *at Object\.ret \[as (.+?)\]/m) + const templatePath = `src/${m ? 'partials/' + m[1] : 'layouts/' + layout}.hbs` + const err = new Error(`${message}${~message.indexOf('\n') ? '\n^ ' : ' '}in UI template ${templatePath}`) + err.stack = [err.toString()].concat(stack.substr(message.length + 8)).join('\n') + return err +} + +function toPromise (stream) { + return new Promise((resolve, reject, data = {}) => + stream + .on('error', reject) + .on('data', (chunk) => chunk.constructor === Object && Object.assign(data, chunk)) + .on('finish', () => resolve(data)) + ) +} diff --git a/site-ui/gulp.d/tasks/build.js b/site-ui/gulp.d/tasks/build.js new file mode 100644 index 000000000..f25afefd2 --- /dev/null +++ b/site-ui/gulp.d/tasks/build.js @@ -0,0 +1,113 @@ +'use strict' + +const autoprefixer = require('autoprefixer') +const browserify = require('browserify') +const buffer = require('vinyl-buffer') +const concat = require('gulp-concat') +const cssnano = require('cssnano') +const fs = require('fs-extra') +const imagemin = require('gulp-imagemin') +const merge = require('merge-stream') +const ospath = require('path') +const path = ospath.posix +const postcss = require('gulp-postcss') +const postcssCalc = require('postcss-calc') +const postcssImport = require('postcss-import') +const postcssUrl = require('postcss-url') +const postcssVar = require('postcss-custom-properties') +const { Transform } = require('stream') +const map = (transform) => new Transform({ objectMode: true, transform }) +const uglify = require('gulp-uglify') +const vfs = require('vinyl-fs') + +module.exports = (src, dest, preview) => () => { + const opts = { base: src, cwd: src } + const sourcemaps = preview || process.env.SOURCEMAPS === 'true' + const postcssPlugins = [ + postcssImport, + (css, { messages, opts: { file } }) => + Promise.all( + messages + .reduce((accum, { file: depPath, type }) => (type === 'dependency' ? accum.concat(depPath) : accum), []) + .map((importedPath) => fs.stat(importedPath).then(({ mtime }) => mtime)) + ).then((mtimes) => { + const newestMtime = mtimes.reduce((max, curr) => (!max || curr > max ? curr : max)) + if (newestMtime > file.stat.mtime) file.stat.mtimeMs = +(file.stat.mtime = newestMtime) + }), + postcssUrl([ + { + filter: '**/~typeface-*/files/*', + url: (asset) => { + const relpath = asset.pathname.substr(1) + const abspath = require.resolve(relpath) + const basename = ospath.basename(abspath) + const destpath = ospath.join(dest, 'font', basename) + if (!fs.pathExistsSync(destpath)) fs.copySync(abspath, destpath) + return path.join('..', 'font', basename) + }, + }, + ]), + postcssVar({ preserve: preview }), + preview ? postcssCalc : () => {}, + autoprefixer, + preview ? () => {} : cssnano({ preset: 'default' }), + ] + + return merge( + vfs + .src('js/+([0-9])-*.js', { ...opts, sourcemaps }) + .pipe(uglify()) + // NOTE concat already uses stat from newest combined file + .pipe(concat('js/site.js')), + vfs + .src('js/vendor/*.js', { ...opts, read: false }) + .pipe( + // see https://gulpjs.org/recipes/browserify-multiple-destination.html + map((file, enc, next) => { + if (file.relative.endsWith('.bundle.js')) { + const mtimePromises = [] + const bundlePath = file.path + browserify(file.relative, { basedir: src, detectGlobals: false }) + .plugin('browser-pack-flat/plugin') + .on('file', (bundledPath) => { + if (bundledPath !== bundlePath) mtimePromises.push(fs.stat(bundledPath).then(({ mtime }) => mtime)) + }) + .bundle((bundleError, bundleBuffer) => + Promise.all(mtimePromises).then((mtimes) => { + const newestMtime = mtimes.reduce((max, curr) => (curr > max ? curr : max), file.stat.mtime) + if (newestMtime > file.stat.mtime) file.stat.mtimeMs = +(file.stat.mtime = newestMtime) + if (bundleBuffer !== undefined) file.contents = bundleBuffer + file.path = file.path.slice(0, file.path.length - 10) + '.js' + next(bundleError, file) + }) + ) + } else { + fs.readFile(file.path, 'UTF-8').then((contents) => { + file.contents = Buffer.from(contents) + next(null, file) + }) + } + }) + ) + .pipe(buffer()) + .pipe(uglify()), + vfs.src(require.resolve('jquery/dist/jquery.min.js'), opts).pipe(concat('js/vendor/jquery.js')), + vfs + .src(['css/site.css', 'css/vendor/docsearch.css'], { ...opts, sourcemaps }) + .pipe(postcss((file) => ({ plugins: postcssPlugins, options: { file } }))), + vfs.src('font/*.{ttf,woff*(2)}', opts), + vfs + .src('img/**/*.{gif,ico,jpg,png,svg}', opts) + .pipe( + imagemin([ + imagemin.gifsicle(), + imagemin.jpegtran(), + imagemin.optipng(), + imagemin.svgo({ plugins: [{ removeViewBox: false }] }), + ]) + ), + vfs.src('helpers/*.js', opts), + vfs.src('layouts/*.hbs', opts), + vfs.src('partials/*.hbs', opts) + ).pipe(vfs.dest(dest, { sourcemaps: sourcemaps && '.' })) +} diff --git a/site-ui/gulp.d/tasks/format.js b/site-ui/gulp.d/tasks/format.js new file mode 100644 index 000000000..2d5049617 --- /dev/null +++ b/site-ui/gulp.d/tasks/format.js @@ -0,0 +1,10 @@ +'use strict' + +const prettier = require('../lib/gulp-prettier-eslint') +const vfs = require('vinyl-fs') + +module.exports = (files) => () => + vfs + .src(files) + .pipe(prettier()) + .pipe(vfs.dest((file) => file.base)) diff --git a/site-ui/gulp.d/tasks/index.js b/site-ui/gulp.d/tasks/index.js new file mode 100644 index 000000000..a5795fcee --- /dev/null +++ b/site-ui/gulp.d/tasks/index.js @@ -0,0 +1,5 @@ +'use strict' + +const camelCase = (name) => name.replace(/[-]./g, (m) => m.substr(1).toUpperCase()) + +module.exports = require('require-directory')(module, __dirname, { recurse: false, rename: camelCase }) diff --git a/site-ui/gulp.d/tasks/lint-css.js b/site-ui/gulp.d/tasks/lint-css.js new file mode 100644 index 000000000..d68401431 --- /dev/null +++ b/site-ui/gulp.d/tasks/lint-css.js @@ -0,0 +1,10 @@ +'use strict' + +const stylelint = require('gulp-stylelint') +const vfs = require('vinyl-fs') + +module.exports = (files) => (done) => + vfs + .src(files) + .pipe(stylelint({ reporters: [{ formatter: 'string', console: true }], failAfterError: true })) + .on('error', done) diff --git a/site-ui/gulp.d/tasks/lint-js.js b/site-ui/gulp.d/tasks/lint-js.js new file mode 100644 index 000000000..ef4f3c90b --- /dev/null +++ b/site-ui/gulp.d/tasks/lint-js.js @@ -0,0 +1,12 @@ +'use strict' + +const eslint = require('gulp-eslint') +const vfs = require('vinyl-fs') + +module.exports = (files) => (done) => + vfs + .src(files) + .pipe(eslint()) + .pipe(eslint.format()) + .pipe(eslint.failAfterError()) + .on('error', done) diff --git a/site-ui/gulp.d/tasks/pack.js b/site-ui/gulp.d/tasks/pack.js new file mode 100644 index 000000000..a792e72b7 --- /dev/null +++ b/site-ui/gulp.d/tasks/pack.js @@ -0,0 +1,11 @@ +'use strict' + +const vfs = require('vinyl-fs') +const zip = require('gulp-vinyl-zip') +const path = require('path') + +module.exports = (src, dest, bundleName, onFinish) => () => + vfs + .src('**/*', { base: src, cwd: src }) + .pipe(zip.dest(path.join(dest, `${bundleName}-bundle.zip`))) + .on('finish', () => onFinish && onFinish(path.resolve(dest, `${bundleName}-bundle.zip`))) diff --git a/site-ui/gulp.d/tasks/release.js b/site-ui/gulp.d/tasks/release.js new file mode 100644 index 000000000..f6295123a --- /dev/null +++ b/site-ui/gulp.d/tasks/release.js @@ -0,0 +1,55 @@ +'use strict' + +const fs = require('fs-extra') +const Octokit = require('@octokit/rest') +const path = require('path') + +module.exports = (dest, bundleName, owner, repo, token, updateMaster) => async () => { + const octokit = new Octokit({ auth: `token ${token}` }) + const { + data: { tag_name: lastTagName }, + } = await octokit.repos.getLatestRelease({ owner, repo }).catch(() => ({ data: { tag_name: 'v0' } })) + const tagName = `v${Number(lastTagName.substr(1)) + 1}` + const ref = 'heads/master' + const message = `Release ${tagName}` + const bundleFileBasename = `${bundleName}-bundle.zip` + const bundleFile = path.join(dest, bundleFileBasename) + let commit = await octokit.gitdata.getRef({ owner, repo, ref }).then((result) => result.data.object.sha) + const readmeContent = await fs + .readFile('README.adoc', 'utf-8') + .then((contents) => contents.replace(/^(?:\/\/)?(:current-release: ).+$/m, `$1${tagName}`)) + const readmeBlob = await octokit.gitdata + .createBlob({ owner, repo, content: readmeContent, encoding: 'utf-8' }) + .then((result) => result.data.sha) + let tree = await octokit.gitdata.getCommit({ owner, repo, commit_sha: commit }).then((result) => result.data.tree.sha) + tree = await octokit.gitdata + .createTree({ + owner, + repo, + tree: [{ path: 'README.adoc', mode: '100644', type: 'blob', sha: readmeBlob }], + base_tree: tree, + }) + .then((result) => result.data.sha) + commit = await octokit.gitdata + .createCommit({ owner, repo, message, tree, parents: [commit] }) + .then((result) => result.data.sha) + if (updateMaster) await octokit.gitdata.updateRef({ owner, repo, ref, sha: commit }) + const uploadUrl = await octokit.repos + .createRelease({ + owner, + repo, + tag_name: tagName, + target_commitish: commit, + name: tagName, + }) + .then((result) => result.data.upload_url) + await octokit.repos.uploadReleaseAsset({ + url: uploadUrl, + file: fs.createReadStream(bundleFile), + name: bundleFileBasename, + headers: { + 'content-length': (await fs.stat(bundleFile)).size, + 'content-type': 'application/zip', + }, + }) +} diff --git a/site-ui/gulp.d/tasks/remove.js b/site-ui/gulp.d/tasks/remove.js new file mode 100644 index 000000000..71a8dac0c --- /dev/null +++ b/site-ui/gulp.d/tasks/remove.js @@ -0,0 +1,9 @@ +'use strict' + +const fs = require('fs-extra') +const { Transform } = require('stream') +const map = (transform) => new Transform({ objectMode: true, transform }) +const vfs = require('vinyl-fs') + +module.exports = (files) => () => + vfs.src(files, { allowEmpty: true }).pipe(map((file, enc, next) => fs.remove(file.path, next))) diff --git a/site-ui/gulp.d/tasks/serve.js b/site-ui/gulp.d/tasks/serve.js new file mode 100644 index 000000000..942c67308 --- /dev/null +++ b/site-ui/gulp.d/tasks/serve.js @@ -0,0 +1,36 @@ +'use strict' + +const connect = require('gulp-connect') +const os = require('os') + +const ANY_HOST = '0.0.0.0' +const URL_RX = /(https?):\/\/(?:[^/: ]+)(:\d+)?/ + +module.exports = (root, opts = {}, watch = undefined) => (done) => { + connect.server({ ...opts, middleware: opts.host === ANY_HOST ? decorateLog : undefined, root }, function () { + this.server.on('close', done) + if (watch) watch() + }) +} + +function decorateLog (_, app) { + const _log = app.log + app.log = (msg) => { + if (msg.startsWith('Server started ')) { + const localIp = getLocalIp() + const replacement = '$1://localhost$2' + (localIp ? ` and $1://${localIp}$2` : '') + msg = msg.replace(URL_RX, replacement) + } + _log(msg) + } + return [] +} + +function getLocalIp () { + for (const records of Object.values(os.networkInterfaces())) { + for (const record of records) { + if (!record.internal && record.family === 'IPv4') return record.address + } + } + return 'localhost' +} diff --git a/site-ui/gulpfile.js b/site-ui/gulpfile.js new file mode 100644 index 000000000..8efa8ce34 --- /dev/null +++ b/site-ui/gulpfile.js @@ -0,0 +1,133 @@ +'use strict' + +const pkg = require('./package.json') +const [owner, repo] = new URL(pkg.repository.url).pathname.slice(1).split('/') + +const { parallel, series, watch } = require('gulp') +const createTask = require('./gulp.d/lib/create-task') +const exportTasks = require('./gulp.d/lib/export-tasks') +const log = require('fancy-log') + +const bundleName = 'ui' +const buildDir = ['deploy-preview', 'branch-deploy'].includes(process.env.CONTEXT) ? 'public/dist' : 'build' +const previewSrcDir = 'preview-src' +const previewDestDir = 'public' +const srcDir = 'src' +const destDir = `${previewDestDir}/_` +const { reload: livereload } = process.env.LIVERELOAD === 'true' ? require('gulp-connect') : {} +const serverConfig = { host: '0.0.0.0', port: 5252, livereload } + +const task = require('./gulp.d/tasks') +const glob = { + all: [srcDir, previewSrcDir], + css: `${srcDir}/css/**/*.css`, + js: ['gulpfile.js', 'gulp.d/**/*.js', `${srcDir}/{helpers,js}/**/*.js`], +} + +const cleanTask = createTask({ + name: 'clean', + desc: 'Clean files and folders generated by build', + call: task.remove(['build', 'public']), +}) + +const lintCssTask = createTask({ + name: 'lint:css', + desc: 'Lint the CSS source files using stylelint (standard config)', + call: task.lintCss(glob.css), +}) + +const lintJsTask = createTask({ + name: 'lint:js', + desc: 'Lint the JavaScript source files using eslint (JavaScript Standard Style)', + call: task.lintJs(glob.js), +}) + +const lintTask = createTask({ + name: 'lint', + desc: 'Lint the CSS and JavaScript source files', + call: parallel(lintCssTask, lintJsTask), +}) + +const formatTask = createTask({ + name: 'format', + desc: 'Format the JavaScript source files using prettify (JavaScript Standard Style)', + call: task.format(glob.js), +}) + +const buildTask = createTask({ + name: 'build', + desc: 'Build and stage the UI assets for bundling', + call: task.build( + srcDir, + destDir, + process.argv.slice(2).some((name) => name.startsWith('preview')) + ), +}) + +const bundleBuildTask = createTask({ + name: 'bundle:build', + call: series(cleanTask, lintTask, buildTask), +}) + +const bundlePackTask = createTask({ + name: 'bundle:pack', + desc: 'Create a bundle of the staged UI assets for publishing', + call: task.pack( + destDir, + buildDir, + bundleName, + (bundlePath) => !process.env.CI && log(`Antora option: --ui-bundle-url=${bundlePath}`) + ), +}) + +const bundleTask = createTask({ + name: 'bundle', + desc: 'Clean, lint, build, and bundle the UI for publishing', + call: series(bundleBuildTask, bundlePackTask), +}) + +const releasePublishTask = createTask({ + name: 'release:publish', + call: task.release(buildDir, bundleName, owner, repo, process.env.GITHUB_API_TOKEN, true), +}) + +const releaseTask = createTask({ + name: 'release', + desc: 'Bundle the UI and publish it to GitHub by attaching it to a new tag', + call: series(bundleTask, releasePublishTask), +}) + +const buildPreviewPagesTask = createTask({ + name: 'preview:build-pages', + call: task.buildPreviewPages(srcDir, previewSrcDir, previewDestDir, livereload), +}) + +const previewBuildTask = createTask({ + name: 'preview:build', + desc: 'Process and stage the UI assets and generate pages for the preview', + call: parallel(buildTask, buildPreviewPagesTask), +}) + +const previewServeTask = createTask({ + name: 'preview:serve', + call: task.serve(previewDestDir, serverConfig, () => watch(glob.all, previewBuildTask)), +}) + +const previewTask = createTask({ + name: 'preview', + desc: 'Generate a preview site and launch a server to view it', + call: series(previewBuildTask, previewServeTask), +}) + +module.exports = exportTasks( + bundleTask, + cleanTask, + lintTask, + formatTask, + buildTask, + bundleTask, + bundlePackTask, + releaseTask, + previewTask, + previewBuildTask +) diff --git a/site-ui/package-lock.json b/site-ui/package-lock.json new file mode 100644 index 000000000..82934097a --- /dev/null +++ b/site-ui/package-lock.json @@ -0,0 +1,13218 @@ +{ + "name": "cassandra-website-ui", + "requires": true, + "lockfileVersion": 1, + "dependencies": { + "@babel/code-frame": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", + "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", + "dev": true, + "requires": { + "@babel/highlight": "^7.10.4" + } + }, + "@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "json5": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.3.tgz", + "integrity": "sha512-KXPvOm8K9IJKFM0bmdn8QXh7udDh1g/giieX0NLCaMnb4hEiVFqnop2ImTXCc5e0/oHz3LTqmHGtExn5hfMkOA==", + "dev": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "@babel/generator": { + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.5.tgz", + "integrity": "sha512-m16TQQJ8hPt7E+OS/XVQg/7U184MLXtvuGbCdA7na61vha+ImkyyNM/9DDA0unYCVZn3ZOhng+qz48/KBOT96A==", + "dev": true, + "requires": { + "@babel/types": "^7.12.5", + "jsesc": "^2.5.1", + "source-map": "^0.5.0" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "@babel/helper-function-name": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.10.4.tgz", + "integrity": "sha512-YdaSyz1n8gY44EmN7x44zBn9zQ1Ry2Y+3GTA+3vH6Mizke1Vw0aWDM66FOYEPw8//qKkmqOckrGgTYa+6sceqQ==", + "dev": true, + "requires": { + "@babel/helper-get-function-arity": "^7.10.4", + "@babel/template": "^7.10.4", + "@babel/types": "^7.10.4" + } + }, + "@babel/helper-get-function-arity": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.4.tgz", + "integrity": "sha512-EkN3YDB+SRDgiIUnNgcmiD361ti+AVbL3f3Henf6dqqUyr5dMsorno0lJWJuLhDhkI5sYEpgj6y9kB8AOU1I2A==", + "dev": true, + "requires": { + "@babel/types": "^7.10.4" + } + }, + "@babel/helper-member-expression-to-functions": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.7.tgz", + "integrity": "sha512-DCsuPyeWxeHgh1Dus7APn7iza42i/qXqiFPWyBDdOFtvS581JQePsc1F/nD+fHrcswhLlRc2UpYS1NwERxZhHw==", + "dev": true, + "requires": { + "@babel/types": "^7.12.7" + } + }, + "@babel/helper-module-imports": { + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.5.tgz", + "integrity": "sha512-SR713Ogqg6++uexFRORf/+nPXMmWIn80TALu0uaFb+iQIUoR7bOC7zBWyzBs5b3tBBJXuyD0cRu1F15GyzjOWA==", + "dev": true, + "requires": { + "@babel/types": "^7.12.5" + } + }, + "@babel/helper-module-transforms": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.1.tgz", + "integrity": "sha512-QQzehgFAZ2bbISiCpmVGfiGux8YVFXQ0abBic2Envhej22DVXV9nCFaS5hIQbkyo1AdGb+gNME2TSh3hYJVV/w==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-replace-supers": "^7.12.1", + "@babel/helper-simple-access": "^7.12.1", + "@babel/helper-split-export-declaration": "^7.11.0", + "@babel/helper-validator-identifier": "^7.10.4", + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.12.1", + "@babel/types": "^7.12.1", + "lodash": "^4.17.19" + } + }, + "@babel/helper-optimise-call-expression": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.7.tgz", + "integrity": "sha512-I5xc9oSJ2h59OwyUqjv95HRyzxj53DAubUERgQMrpcCEYQyToeHA+NEcUEsVWB4j53RDeskeBJ0SgRAYHDBckw==", + "dev": true, + "requires": { + "@babel/types": "^7.12.7" + } + }, + "@babel/helper-replace-supers": { + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.5.tgz", + "integrity": "sha512-5YILoed0ZyIpF4gKcpZitEnXEJ9UoDRki1Ey6xz46rxOzfNMAhVIJMoune1hmPVxh40LRv1+oafz7UsWX+vyWA==", + "dev": true, + "requires": { + "@babel/helper-member-expression-to-functions": "^7.12.1", + "@babel/helper-optimise-call-expression": "^7.10.4", + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" + } + }, + "@babel/helper-simple-access": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.1.tgz", + "integrity": "sha512-OxBp7pMrjVewSSC8fXDFrHrBcJATOOFssZwv16F3/6Xtc138GHybBfPbm9kfiqQHKhYQrlamWILwlDCeyMFEaA==", + "dev": true, + "requires": { + "@babel/types": "^7.12.1" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.11.0.tgz", + "integrity": "sha512-74Vejvp6mHkGE+m+k5vHY93FX2cAtrw1zXrZXRlG4l410Nm9PxfEiVTn1PjDPV5SnmieiueY4AFg2xqhNFuuZg==", + "dev": true, + "requires": { + "@babel/types": "^7.11.0" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", + "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==", + "dev": true + }, + "@babel/helpers": { + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.5.tgz", + "integrity": "sha512-lgKGMQlKqA8meJqKsW6rUnc4MdUk35Ln0ATDqdM1a/UpARODdI4j5Y5lVfUScnSNkJcdCRAaWkspykNoFg9sJA==", + "dev": true, + "requires": { + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" + } + }, + "@babel/highlight": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", + "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.10.4", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "@babel/parser": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.7.tgz", + "integrity": "sha512-oWR02Ubp4xTLCAqPRiNIuMVgNO5Aif/xpXtabhzW2HWUD47XJsAB4Zd/Rg30+XeQA3juXigV7hlquOTmwqLiwg==", + "dev": true + }, + "@babel/template": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.7.tgz", + "integrity": "sha512-GkDzmHS6GV7ZeXfJZ0tLRBhZcMcY0/Lnb+eEbXDBfCAcZCjrZKe6p3J4we/D24O9Y8enxWAg1cWwof59yLh2ow==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/parser": "^7.12.7", + "@babel/types": "^7.12.7" + } + }, + "@babel/traverse": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.9.tgz", + "integrity": "sha512-iX9ajqnLdoU1s1nHt36JDI9KG4k+vmI8WgjK5d+aDTwQbL2fUnzedNedssA645Ede3PM2ma1n8Q4h2ohwXgMXw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-function-name": "^7.10.4", + "@babel/helper-split-export-declaration": "^7.11.0", + "@babel/parser": "^7.12.7", + "@babel/types": "^7.12.7", + "debug": "^4.1.0", + "globals": "^11.1.0", + "lodash": "^4.17.19" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "@babel/types": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.7.tgz", + "integrity": "sha512-MNyI92qZq6jrQkXvtIiykvl4WtoRrVV9MPn+ZfsoEENjiWcBQ3ZSHrkxnJWgWtLX3XXqX5hrSQ+X69wkmesXuQ==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.10.4", + "lodash": "^4.17.19", + "to-fast-properties": "^2.0.0" + } + }, + "@nodelib/fs.scandir": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz", + "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "2.0.3", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz", + "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA==", + "dev": true + }, + "@nodelib/fs.walk": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz", + "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==", + "dev": true, + "requires": { + "@nodelib/fs.scandir": "2.1.3", + "fastq": "^1.6.0" + } + }, + "@octokit/auth-token": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.3.tgz", + "integrity": "sha512-fdGoOQ3kQJh+hrilc0Plg50xSfaCKOeYN9t6dpJKXN9BxhhfquL0OzoQXg3spLYymL5rm29uPeI3KEXRaZQ9zg==", + "dev": true, + "requires": { + "@octokit/types": "^5.0.0" + } + }, + "@octokit/core": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.2.1.tgz", + "integrity": "sha512-XfFSDDwv6tclUenS0EmB6iA7u+4aOHBT1Lz4PtQNQQg3hBbNaR/+Uv5URU+egeIuuGAiMRiDyY92G4GBOWOqDA==", + "dev": true, + "requires": { + "@octokit/auth-token": "^2.4.0", + "@octokit/graphql": "^4.3.1", + "@octokit/request": "^5.4.0", + "@octokit/types": "^5.0.0", + "before-after-hook": "^2.1.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/endpoint": { + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.9.tgz", + "integrity": "sha512-3VPLbcCuqji4IFTclNUtGdp9v7g+nspWdiCUbK3+iPMjJCZ6LEhn1ts626bWLOn0GiDb6j+uqGvPpqLnY7pBgw==", + "dev": true, + "requires": { + "@octokit/types": "^5.0.0", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/graphql": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.5.7.tgz", + "integrity": "sha512-Gk0AR+DcwIK/lK/GX+OQ99UqtenQhcbrhHHfOYlrCQe17ADnX3EKAOKRsAZ9qZvpi5MuwWm/Nm+9aO2kTDSdyA==", + "dev": true, + "requires": { + "@octokit/request": "^5.3.0", + "@octokit/types": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/plugin-paginate-rest": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.6.0.tgz", + "integrity": "sha512-o+O8c1PqsC5++BHXfMZabRRsBIVb34tXPWyQLyp2IXq5MmkxdipS7TXM4Y9ldL1PzY9CTrCsn/lzFFJGM3oRRA==", + "dev": true, + "requires": { + "@octokit/types": "^5.5.0" + } + }, + "@octokit/plugin-request-log": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.2.tgz", + "integrity": "sha512-oTJSNAmBqyDR41uSMunLQKMX0jmEXbwD1fpz8FG27lScV3RhtGfBa1/BBLym+PxcC16IBlF7KH9vP1BUYxA+Eg==", + "dev": true + }, + "@octokit/plugin-rest-endpoint-methods": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-4.2.1.tgz", + "integrity": "sha512-QyFr4Bv807Pt1DXZOC5a7L5aFdrwz71UHTYoHVajYV5hsqffWm8FUl9+O7nxRu5PDMtB/IKrhFqTmdBTK5cx+A==", + "dev": true, + "requires": { + "@octokit/types": "^5.5.0", + "deprecation": "^2.3.1" + } + }, + "@octokit/request": { + "version": "5.4.10", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.4.10.tgz", + "integrity": "sha512-egA49HkqEORVGDZGav1mh+VD+7uLgOxtn5oODj6guJk0HCy+YBSYapFkSLFgeYj3Fr18ZULKGURkjyhkAChylw==", + "dev": true, + "requires": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.0.0", + "@octokit/types": "^5.0.0", + "deprecation": "^2.0.0", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.1", + "once": "^1.4.0", + "universal-user-agent": "^6.0.0" + } + }, + "@octokit/request-error": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.0.3.tgz", + "integrity": "sha512-GgD5z8Btm301i2zfvJLk/mkhvGCdjQ7wT8xF9ov5noQY8WbKZDH9cOBqXzoeKd1mLr1xH2FwbtGso135zGBgTA==", + "dev": true, + "requires": { + "@octokit/types": "^5.0.1", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/rest": { + "version": "18.0.9", + "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-18.0.9.tgz", + "integrity": "sha512-CC5+cIx974Ygx9lQNfUn7/oXDQ9kqGiKUC6j1A9bAVZZ7aoTF8K6yxu0pQhQrLBwSl92J6Z3iVDhGhGFgISCZg==", + "dev": true, + "requires": { + "@octokit/core": "^3.0.0", + "@octokit/plugin-paginate-rest": "^2.2.0", + "@octokit/plugin-request-log": "^1.0.0", + "@octokit/plugin-rest-endpoint-methods": "4.2.1" + } + }, + "@octokit/types": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-5.5.0.tgz", + "integrity": "sha512-UZ1pErDue6bZNjYOotCNveTXArOMZQFG6hKJfOnGnulVCMcVVi7YIIuuR4WfBhjo7zgpmzn/BkPDnUXtNx+PcQ==", + "dev": true, + "requires": { + "@types/node": ">= 8" + } + }, + "@sindresorhus/is": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", + "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==", + "dev": true, + "optional": true + }, + "@stylelint/postcss-css-in-js": { + "version": "0.37.2", + "resolved": "https://registry.npmjs.org/@stylelint/postcss-css-in-js/-/postcss-css-in-js-0.37.2.tgz", + "integrity": "sha512-nEhsFoJurt8oUmieT8qy4nk81WRHmJynmVwn/Vts08PL9fhgIsMhk1GId5yAN643OzqEEb5S/6At2TZW7pqPDA==", + "dev": true, + "requires": { + "@babel/core": ">=7.9.0" + } + }, + "@stylelint/postcss-markdown": { + "version": "0.36.2", + "resolved": "https://registry.npmjs.org/@stylelint/postcss-markdown/-/postcss-markdown-0.36.2.tgz", + "integrity": "sha512-2kGbqUVJUGE8dM+bMzXG/PYUWKkjLIkRLWNh39OaADkiabDRdw8ATFCgbMz5xdIcvwspPAluSL7uY+ZiTWdWmQ==", + "dev": true, + "requires": { + "remark": "^13.0.0", + "unist-util-find-all-after": "^3.0.2" + } + }, + "@types/eslint-visitor-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@types/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz", + "integrity": "sha512-OCutwjDZ4aFS6PB1UZ988C4YgwlBHJd6wCeQqaLdmadZ/7e+w79+hbMUFC1QXDNCmdyoRfAFdm0RypzwR+Qpag==", + "dev": true + }, + "@types/glob": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", + "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", + "dev": true, + "requires": { + "@types/minimatch": "*", + "@types/node": "*" + } + }, + "@types/json-schema": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.6.tgz", + "integrity": "sha512-3c+yGKvVP5Y9TYBEibGNR+kLtijnj7mYrXRg+WpFb2X9xm04g/DXYkfg4hmzJQosc9snFNUPkbYIhu+KAm6jJw==", + "dev": true + }, + "@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", + "dev": true + }, + "@types/mdast": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.3.tgz", + "integrity": "sha512-SXPBMnFVQg1s00dlMCc/jCdvPqdE4mXaMMCeRlxLDmTAEoegHT53xKtkDnzDTOcmMHUfcjyf36/YYZ6SxRdnsw==", + "dev": true, + "requires": { + "@types/unist": "*" + } + }, + "@types/minimatch": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz", + "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==", + "dev": true + }, + "@types/minimist": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.1.tgz", + "integrity": "sha512-fZQQafSREFyuZcdWFAExYjBiCL7AUCdgsk80iO0q4yihYYdcIiH28CcuPTGFgLOCC8RlW49GSQxdHwZP+I7CNg==", + "dev": true + }, + "@types/node": { + "version": "14.14.10", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.10.tgz", + "integrity": "sha512-J32dgx2hw8vXrSbu4ZlVhn1Nm3GbeCFNw2FWL8S5QKucHGY0cyNwjdQdO+KMBZ4wpmC7KhLCiNsdk1RFRIYUQQ==", + "dev": true + }, + "@types/normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", + "dev": true + }, + "@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "dev": true + }, + "@types/q": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", + "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==", + "dev": true + }, + "@types/unist": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.3.tgz", + "integrity": "sha512-FvUupuM3rlRsRtCN+fDudtmytGO6iHJuuRKS1Ss0pG5z8oX0diNEw94UEL7hgDbpN94rgaK5R7sWm6RrSkZuAQ==", + "dev": true + }, + "@typescript-eslint/experimental-utils": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-3.10.1.tgz", + "integrity": "sha512-DewqIgscDzmAfd5nOGe4zm6Bl7PKtMG2Ad0KG8CUZAHlXfAKTF9Ol5PXhiMh39yRL2ChRH1cuuUGOcVyyrhQIw==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.3", + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/typescript-estree": "3.10.1", + "eslint-scope": "^5.0.0", + "eslint-utils": "^2.0.0" + } + }, + "@typescript-eslint/parser": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-3.10.1.tgz", + "integrity": "sha512-Ug1RcWcrJP02hmtaXVS3axPPTTPnZjupqhgj+NnZ6BCkwSImWk/283347+x9wN+lqOdK9Eo3vsyiyDHgsmiEJw==", + "dev": true, + "requires": { + "@types/eslint-visitor-keys": "^1.0.0", + "@typescript-eslint/experimental-utils": "3.10.1", + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/typescript-estree": "3.10.1", + "eslint-visitor-keys": "^1.1.0" + } + }, + "@typescript-eslint/types": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-3.10.1.tgz", + "integrity": "sha512-+3+FCUJIahE9q0lDi1WleYzjCwJs5hIsbugIgnbB+dSCYUxl8L6PwmsyOPFZde2hc1DlTo/xnkOgiTLSyAbHiQ==", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-3.10.1.tgz", + "integrity": "sha512-QbcXOuq6WYvnB3XPsZpIwztBoquEYLXh2MtwVU+kO8jgYCiv4G5xrSP/1wg4tkvrEE+esZVquIPX/dxPlePk1w==", + "dev": true, + "requires": { + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/visitor-keys": "3.10.1", + "debug": "^4.1.1", + "glob": "^7.1.6", + "is-glob": "^4.0.1", + "lodash": "^4.17.15", + "semver": "^7.3.2", + "tsutils": "^3.17.1" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "semver": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + } + } + }, + "@typescript-eslint/visitor-keys": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-3.10.1.tgz", + "integrity": "sha512-9JgC82AaQeglebjZMgYR5wgmfUdUc+EitGUUMW8u2nDckaeimzW+VsoLV6FoimPv2id3VQzfjwBxEMVz08ameQ==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "dev": true, + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } + }, + "abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true + }, + "accepts": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", + "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", + "dev": true, + "requires": { + "mime-types": "~2.1.24", + "negotiator": "0.6.2" + } + }, + "acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true + }, + "acorn-jsx": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", + "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", + "dev": true + }, + "acorn-node": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz", + "integrity": "sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==", + "dev": true, + "requires": { + "acorn": "^7.0.0", + "acorn-walk": "^7.0.0", + "xtend": "^4.0.2" + } + }, + "acorn-walk": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz", + "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", + "dev": true + }, + "agentkeepalive": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz", + "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=", + "dev": true + }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "algoliasearch": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", + "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", + "dev": true, + "requires": { + "agentkeepalive": "^2.2.0", + "debug": "^2.6.9", + "envify": "^4.0.0", + "es6-promise": "^4.1.0", + "events": "^1.1.0", + "foreach": "^2.0.5", + "global": "^4.3.2", + "inherits": "^2.0.1", + "isarray": "^2.0.1", + "load-script": "^1.0.0", + "object-keys": "^1.0.11", + "querystring-es3": "^0.2.1", + "reduce": "^1.0.1", + "semver": "^5.1.0", + "tunnel-agent": "^0.6.0" + }, + "dependencies": { + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", + "dev": true + }, + "isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + } + } + }, + "alphanum-sort": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", + "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=", + "dev": true + }, + "ansi-colors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-1.1.0.tgz", + "integrity": "sha512-SFKX67auSNoVR38N3L+nvsPjOE0bybKTYbkf5tRvushrAPQ9V75huw0ZxBkKVeRU9kqH3d6HA4xTckbwZ4ixmA==", + "dev": true, + "requires": { + "ansi-wrap": "^0.1.0" + } + }, + "ansi-escapes": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", + "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", + "dev": true, + "requires": { + "type-fest": "^0.11.0" + }, + "dependencies": { + "type-fest": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", + "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", + "dev": true + } + } + }, + "ansi-gray": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/ansi-gray/-/ansi-gray-0.1.1.tgz", + "integrity": "sha1-KWLPVOyXksSFEKPetSRDaGHvclE=", + "dev": true, + "requires": { + "ansi-wrap": "0.1.0" + } + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "ansi-wrap": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/ansi-wrap/-/ansi-wrap-0.1.0.tgz", + "integrity": "sha1-qCJQ3bABXponyoLoLqYDu/pF768=", + "dev": true + }, + "anymatch": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", + "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "dev": true, + "requires": { + "micromatch": "^3.1.4", + "normalize-path": "^2.1.1" + }, + "dependencies": { + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "requires": { + "remove-trailing-separator": "^1.0.1" + } + } + } + }, + "append-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/append-buffer/-/append-buffer-1.0.2.tgz", + "integrity": "sha1-2CIM9GYIFSXv6lBhTz3mUU36WPE=", + "dev": true, + "requires": { + "buffer-equal": "^1.0.0" + } + }, + "arch": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", + "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", + "dev": true, + "optional": true + }, + "archive-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/archive-type/-/archive-type-4.0.0.tgz", + "integrity": "sha1-+S5yIzBW38aWlHJ0nCZ72wRrHXA=", + "dev": true, + "optional": true, + "requires": { + "file-type": "^4.2.0" + }, + "dependencies": { + "file-type": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-4.4.0.tgz", + "integrity": "sha1-G2AOX8ofvcboDApwxxyNul95BsU=", + "dev": true, + "optional": true + } + } + }, + "archy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", + "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", + "dev": true + }, + "arr-filter": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/arr-filter/-/arr-filter-1.1.2.tgz", + "integrity": "sha1-Q/3d0JHo7xGqTEXZzcGOLf8XEe4=", + "dev": true, + "requires": { + "make-iterator": "^1.0.0" + } + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "dev": true + }, + "arr-map": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/arr-map/-/arr-map-2.0.2.tgz", + "integrity": "sha1-Onc0X/wc814qkYJWAfnljy4kysQ=", + "dev": true, + "requires": { + "make-iterator": "^1.0.0" + } + }, + "arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", + "dev": true + }, + "array-each": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-each/-/array-each-1.0.1.tgz", + "integrity": "sha1-p5SvDAWrF1KEbudTofIRoFugxE8=", + "dev": true + }, + "array-find-index": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", + "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", + "dev": true, + "optional": true + }, + "array-from": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/array-from/-/array-from-2.1.1.tgz", + "integrity": "sha1-z+nYwmYoudxa7MYqn12PHzUsEZU=", + "dev": true + }, + "array-includes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.2.tgz", + "integrity": "sha512-w2GspexNQpx+PutG3QpT437/BenZBj0M/MZGn5mzv/MofYqo0xmRHzn4lFsoDlWJ+THYsGJmFlW68WlDFx7VRw==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.18.0-next.1", + "get-intrinsic": "^1.0.1", + "is-string": "^1.0.5" + } + }, + "array-initial": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/array-initial/-/array-initial-1.1.0.tgz", + "integrity": "sha1-L6dLJnOTccOUe9enrcc74zSz15U=", + "dev": true, + "requires": { + "array-slice": "^1.0.0", + "is-number": "^4.0.0" + }, + "dependencies": { + "is-number": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", + "dev": true + } + } + }, + "array-last": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/array-last/-/array-last-1.3.0.tgz", + "integrity": "sha512-eOCut5rXlI6aCOS7Z7kCplKRKyiFQ6dHFBem4PwlwKeNFk2/XxTrhRh5T9PyaEWGy/NHTZWbY+nsZlNFJu9rYg==", + "dev": true, + "requires": { + "is-number": "^4.0.0" + }, + "dependencies": { + "is-number": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", + "dev": true + } + } + }, + "array-slice": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/array-slice/-/array-slice-1.1.0.tgz", + "integrity": "sha512-B1qMD3RBP7O8o0H2KbrXDyB0IccejMF15+87Lvlor12ONPRHP6gTjXMNkt/d3ZuOGbAe66hFmaCfECI24Ufp6w==", + "dev": true + }, + "array-sort": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-sort/-/array-sort-1.0.0.tgz", + "integrity": "sha512-ihLeJkonmdiAsD7vpgN3CRcx2J2S0TiYW+IS/5zHBI7mKUq3ySvBdzzBfD236ubDBQFiiyG3SWCPc+msQ9KoYg==", + "dev": true, + "requires": { + "default-compare": "^1.0.0", + "get-value": "^2.0.6", + "kind-of": "^5.0.2" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "dev": true + } + } + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true + }, + "array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", + "dev": true + }, + "array.prototype.flat": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.4.tgz", + "integrity": "sha512-4470Xi3GAPAjZqFcljX2xzckv1qeKPizoNkiS0+O4IoPR2ZNpcjE0pkhdihlDouK+x6QOast26B4Q/O9DJnwSg==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.18.0-next.1" + } + }, + "arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", + "dev": true + }, + "asciidoctor.js": { + "version": "1.5.9", + "resolved": "https://registry.npmjs.org/asciidoctor.js/-/asciidoctor.js-1.5.9.tgz", + "integrity": "sha512-k5JgwyV82TsiCpnYbDPReuHhzf/vRUt6NaZ+OGywkDDGeGG/CPfvN2Gd1MJ0iIZKDyuk4iJHOdY/2x1KBrWMzA==", + "dev": true, + "requires": { + "opal-runtime": "1.0.11" + } + }, + "asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "dev": true, + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "asn1.js": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", + "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "safer-buffer": "^2.1.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "assert": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", + "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", + "dev": true, + "requires": { + "object-assign": "^4.1.1", + "util": "0.10.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=", + "dev": true + }, + "util": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", + "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", + "dev": true, + "requires": { + "inherits": "2.0.1" + } + } + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "dev": true + }, + "assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", + "dev": true + }, + "astral-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", + "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", + "dev": true + }, + "async-done": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/async-done/-/async-done-1.3.2.tgz", + "integrity": "sha512-uYkTP8dw2og1tu1nmza1n1CMW0qb8gWWlwqMmLb7MhBVs4BXrFziT6HXUd+/RlRA/i4H9AkofYloUbs1fwMqlw==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.2", + "process-nextick-args": "^2.0.0", + "stream-exhaust": "^1.0.1" + } + }, + "async-each": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", + "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==", + "dev": true + }, + "async-settle": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-settle/-/async-settle-1.0.0.tgz", + "integrity": "sha1-HQqRS7Aldb7IqPOnTlCA9yssDGs=", + "dev": true, + "requires": { + "async-done": "^1.2.2" + } + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "dev": true + }, + "at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true + }, + "atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "dev": true + }, + "autocomplete.js": { + "version": "0.36.0", + "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz", + "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==", + "dev": true, + "requires": { + "immediate": "^3.2.3" + } + }, + "autoprefixer": { + "version": "9.8.6", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", + "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", + "dev": true, + "requires": { + "browserslist": "^4.12.0", + "caniuse-lite": "^1.0.30001109", + "colorette": "^1.2.1", + "normalize-range": "^0.1.2", + "num2fraction": "^1.2.2", + "postcss": "^7.0.32", + "postcss-value-parser": "^4.1.0" + } + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "dev": true + }, + "aws4": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==", + "dev": true + }, + "bach": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/bach/-/bach-1.2.0.tgz", + "integrity": "sha1-Szzpa/JxNPeaG0FKUcFONMO9mIA=", + "dev": true, + "requires": { + "arr-filter": "^1.1.1", + "arr-flatten": "^1.0.1", + "arr-map": "^2.0.0", + "array-each": "^1.0.0", + "array-initial": "^1.0.0", + "array-last": "^1.1.1", + "async-done": "^1.2.2", + "async-settle": "^1.0.0", + "now-and-later": "^2.0.0" + } + }, + "bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "base": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "dev": true, + "requires": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true + }, + "batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=", + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "dev": true, + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "before-after-hook": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.1.0.tgz", + "integrity": "sha512-IWIbu7pMqyw3EAJHzzHbWa85b6oud/yfKYg5rqB5hNE8CeMi3nX+2C2sj0HswfblST86hpVEOAb9x34NZd6P7A==", + "dev": true + }, + "bin-build": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bin-build/-/bin-build-3.0.0.tgz", + "integrity": "sha512-jcUOof71/TNAI2uM5uoUaDq2ePcVBQ3R/qhxAz1rX7UfvduAL/RXD3jXzvn8cVcDJdGVkiR1shal3OH0ImpuhA==", + "dev": true, + "optional": true, + "requires": { + "decompress": "^4.0.0", + "download": "^6.2.2", + "execa": "^0.7.0", + "p-map-series": "^1.0.0", + "tempfile": "^2.0.0" + } + }, + "bin-check": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bin-check/-/bin-check-4.1.0.tgz", + "integrity": "sha512-b6weQyEUKsDGFlACWSIOfveEnImkJyK/FGW6FAG42loyoquvjdtOIqO6yBFzHyqyVVhNgNkQxxx09SFLK28YnA==", + "dev": true, + "optional": true, + "requires": { + "execa": "^0.7.0", + "executable": "^4.1.0" + } + }, + "bin-version": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/bin-version/-/bin-version-3.1.0.tgz", + "integrity": "sha512-Mkfm4iE1VFt4xd4vH+gx+0/71esbfus2LsnCGe8Pi4mndSPyT+NGES/Eg99jx8/lUGWfu3z2yuB/bt5UB+iVbQ==", + "dev": true, + "optional": true, + "requires": { + "execa": "^1.0.0", + "find-versions": "^3.0.0" + }, + "dependencies": { + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "optional": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "execa": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "dev": true, + "optional": true, + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "optional": true, + "requires": { + "pump": "^3.0.0" + } + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true, + "optional": true + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "optional": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "optional": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true, + "optional": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "optional": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "bin-version-check": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/bin-version-check/-/bin-version-check-4.0.0.tgz", + "integrity": "sha512-sR631OrhC+1f8Cvs8WyVWOA33Y8tgwjETNPyyD/myRBXLkfS/vl74FmH/lFcRl9KY3zwGh7jFhvyk9vV3/3ilQ==", + "dev": true, + "optional": true, + "requires": { + "bin-version": "^3.0.0", + "semver": "^5.6.0", + "semver-truncate": "^1.1.2" + } + }, + "bin-wrapper": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bin-wrapper/-/bin-wrapper-4.1.0.tgz", + "integrity": "sha512-hfRmo7hWIXPkbpi0ZltboCMVrU+0ClXR/JgbCKKjlDjQf6igXa7OwdqNcFWQZPZTgiY7ZpzE3+LjjkLiTN2T7Q==", + "dev": true, + "optional": true, + "requires": { + "bin-check": "^4.1.0", + "bin-version-check": "^4.0.0", + "download": "^7.1.0", + "import-lazy": "^3.1.0", + "os-filter-obj": "^2.0.0", + "pify": "^4.0.1" + }, + "dependencies": { + "download": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/download/-/download-7.1.0.tgz", + "integrity": "sha512-xqnBTVd/E+GxJVrX5/eUJiLYjCGPwMpdL+jGhGU57BvtcA7wwhtHVbXBeUk51kOpW3S7Jn3BQbN9Q1R1Km2qDQ==", + "dev": true, + "optional": true, + "requires": { + "archive-type": "^4.0.0", + "caw": "^2.0.1", + "content-disposition": "^0.5.2", + "decompress": "^4.2.0", + "ext-name": "^5.0.0", + "file-type": "^8.1.0", + "filenamify": "^2.0.0", + "get-stream": "^3.0.0", + "got": "^8.3.1", + "make-dir": "^1.2.0", + "p-event": "^2.1.0", + "pify": "^3.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "file-type": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-8.1.0.tgz", + "integrity": "sha512-qyQ0pzAy78gVoJsmYeNgl8uH8yKhr1lVhW7JbzJmnlRi0I4R2eEDEJZVKG8agpDnLpacwNbDhLNG/LMdxHD2YQ==", + "dev": true, + "optional": true + }, + "got": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", + "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", + "dev": true, + "optional": true, + "requires": { + "@sindresorhus/is": "^0.7.0", + "cacheable-request": "^2.1.1", + "decompress-response": "^3.3.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "into-stream": "^3.1.0", + "is-retry-allowed": "^1.1.0", + "isurl": "^1.0.0-alpha5", + "lowercase-keys": "^1.0.0", + "mimic-response": "^1.0.0", + "p-cancelable": "^0.4.0", + "p-timeout": "^2.0.1", + "pify": "^3.0.0", + "safe-buffer": "^5.1.1", + "timed-out": "^4.0.1", + "url-parse-lax": "^3.0.0", + "url-to-options": "^1.0.1" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dev": true, + "optional": true, + "requires": { + "pify": "^3.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "p-cancelable": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", + "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==", + "dev": true, + "optional": true + }, + "p-event": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/p-event/-/p-event-2.3.1.tgz", + "integrity": "sha512-NQCqOFhbpVTMX4qMe8PF8lbGtzZ+LCiN7pcNrb/413Na7+TRoe1xkKUzuWa/YEJdGQ0FvKtj35EEbDoVPO2kbA==", + "dev": true, + "optional": true, + "requires": { + "p-timeout": "^2.0.1" + } + }, + "p-timeout": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", + "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", + "dev": true, + "optional": true, + "requires": { + "p-finally": "^1.0.0" + } + }, + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true, + "optional": true + }, + "prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", + "dev": true, + "optional": true + }, + "url-parse-lax": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", + "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", + "dev": true, + "optional": true, + "requires": { + "prepend-http": "^2.0.0" + } + } + } + }, + "binary-extensions": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", + "dev": true + }, + "bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dev": true, + "optional": true, + "requires": { + "file-uri-to-path": "1.0.0" + } + }, + "bl": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", + "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "dev": true, + "requires": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "bn.js": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.3.tgz", + "integrity": "sha512-GkTiFpjFtUzU9CbMeJ5iazkCzGL3jrhzerzZIuqLABjbwRaFt33I9tUdSNryIptM+RxDet6OKm2WnLXzW51KsQ==", + "dev": true + }, + "body": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/body/-/body-5.1.0.tgz", + "integrity": "sha1-5LoM5BCkaTYyM2dgnstOZVMSUGk=", + "dev": true, + "requires": { + "continuable-cache": "^0.3.1", + "error": "^7.0.0", + "raw-body": "~1.1.0", + "safe-json-parse": "~1.0.1" + } + }, + "boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "dev": true, + "requires": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=", + "dev": true + }, + "browser-pack": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/browser-pack/-/browser-pack-6.1.0.tgz", + "integrity": "sha512-erYug8XoqzU3IfcU8fUgyHqyOXqIE4tUTTQ+7mqUjQlvnXkOO6OlT9c/ZoJVHYoAaqGxr09CN53G7XIsO4KtWA==", + "dev": true, + "requires": { + "JSONStream": "^1.0.3", + "combine-source-map": "~0.8.0", + "defined": "^1.0.0", + "safe-buffer": "^5.1.1", + "through2": "^2.0.0", + "umd": "^3.0.0" + } + }, + "browser-pack-flat": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/browser-pack-flat/-/browser-pack-flat-3.4.2.tgz", + "integrity": "sha512-TrUo6n2fGSOCYFAKkt/EkgenytAuuCI88fmXFA60aNFVHvz3CZEBTXYSvvXVpU6xpjM8lj/6vkC6Exn8KPjtPw==", + "dev": true, + "requires": { + "JSONStream": "^1.3.2", + "combine-source-map": "^0.8.0", + "convert-source-map": "^1.5.1", + "count-lines": "^0.1.2", + "dedent": "^0.7.0", + "estree-is-member-expression": "^1.0.0", + "estree-is-require": "^1.0.0", + "esutils": "^2.0.2", + "path-parse": "^1.0.5", + "scope-analyzer": "^2.0.0", + "stream-combiner": "^0.2.2", + "through2": "^2.0.3", + "transform-ast": "^2.4.2", + "umd": "^3.0.3", + "wrap-comment": "^1.0.0" + } + }, + "browser-process-hrtime": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz", + "integrity": "sha512-bRFnI4NnjO6cnyLmOV/7PVoDEMJChlcfN0z4s1YMBY989/SvlfMI1lgCnkFUs53e9gQF+w7qu7XdllSTiSl8Aw==", + "dev": true + }, + "browser-resolve": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-2.0.0.tgz", + "integrity": "sha512-7sWsQlYL2rGLy2IWm8WL8DCTJvYLc/qlOnsakDac87SOoCd16WLsaAMdCiAqsTNHIe+SXfaqyxyo6THoWqs8WQ==", + "dev": true, + "requires": { + "resolve": "^1.17.0" + } + }, + "browserify": { + "version": "16.5.2", + "resolved": "https://registry.npmjs.org/browserify/-/browserify-16.5.2.tgz", + "integrity": "sha512-TkOR1cQGdmXU9zW4YukWzWVSJwrxmNdADFbqbE3HFgQWe5wqZmOawqZ7J/8MPCwk/W8yY7Y0h+7mOtcZxLP23g==", + "dev": true, + "requires": { + "JSONStream": "^1.0.3", + "assert": "^1.4.0", + "browser-pack": "^6.0.1", + "browser-resolve": "^2.0.0", + "browserify-zlib": "~0.2.0", + "buffer": "~5.2.1", + "cached-path-relative": "^1.0.0", + "concat-stream": "^1.6.0", + "console-browserify": "^1.1.0", + "constants-browserify": "~1.0.0", + "crypto-browserify": "^3.0.0", + "defined": "^1.0.0", + "deps-sort": "^2.0.0", + "domain-browser": "^1.2.0", + "duplexer2": "~0.1.2", + "events": "^2.0.0", + "glob": "^7.1.0", + "has": "^1.0.0", + "htmlescape": "^1.1.0", + "https-browserify": "^1.0.0", + "inherits": "~2.0.1", + "insert-module-globals": "^7.0.0", + "labeled-stream-splicer": "^2.0.0", + "mkdirp-classic": "^0.5.2", + "module-deps": "^6.2.3", + "os-browserify": "~0.3.0", + "parents": "^1.0.1", + "path-browserify": "~0.0.0", + "process": "~0.11.0", + "punycode": "^1.3.2", + "querystring-es3": "~0.2.0", + "read-only-stream": "^2.0.0", + "readable-stream": "^2.0.2", + "resolve": "^1.1.4", + "shasum": "^1.0.0", + "shell-quote": "^1.6.1", + "stream-browserify": "^2.0.0", + "stream-http": "^3.0.0", + "string_decoder": "^1.1.1", + "subarg": "^1.0.0", + "syntax-error": "^1.1.1", + "through2": "^2.0.0", + "timers-browserify": "^1.0.1", + "tty-browserify": "0.0.1", + "url": "~0.11.0", + "util": "~0.10.1", + "vm-browserify": "^1.0.0", + "xtend": "^4.0.0" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "dev": true, + "requires": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "dev": true, + "requires": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "browserify-rsa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", + "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", + "dev": true, + "requires": { + "bn.js": "^5.0.0", + "randombytes": "^2.0.1" + } + }, + "browserify-sign": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", + "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", + "dev": true, + "requires": { + "bn.js": "^5.1.1", + "browserify-rsa": "^4.0.1", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.3", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.5", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true + } + } + }, + "browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "dev": true, + "requires": { + "pako": "~1.0.5" + } + }, + "browserslist": { + "version": "4.14.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.14.7.tgz", + "integrity": "sha512-BSVRLCeG3Xt/j/1cCGj1019Wbty0H+Yvu2AOuZSuoaUWn3RatbL33Cxk+Q4jRMRAbOm0p7SLravLjpnT6s0vzQ==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001157", + "colorette": "^1.2.1", + "electron-to-chromium": "^1.3.591", + "escalade": "^3.1.1", + "node-releases": "^1.1.66" + } + }, + "buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.2.1.tgz", + "integrity": "sha512-c+Ko0loDaFfuPWiL02ls9Xd3GO3cPVmUobQ6t3rXNUk304u6hGq+8N/kFi+QEIKhzK3uwolVhLzszmfLmMLnqg==", + "dev": true, + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4" + } + }, + "buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "dev": true, + "optional": true, + "requires": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==", + "dev": true, + "optional": true + }, + "buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", + "dev": true + }, + "buffer-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-1.0.0.tgz", + "integrity": "sha1-WWFrSYME1Var1GaWayLu2j7KX74=", + "dev": true + }, + "buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=", + "dev": true, + "optional": true + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "dev": true + }, + "buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=", + "dev": true + }, + "builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=", + "dev": true + }, + "bytes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-1.0.0.tgz", + "integrity": "sha1-NWnt6Lo0MV+rmcPpLLBMciDeH6g=", + "dev": true + }, + "cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "dev": true, + "requires": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + } + }, + "cacheable-request": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", + "integrity": "sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0=", + "dev": true, + "optional": true, + "requires": { + "clone-response": "1.0.2", + "get-stream": "3.0.0", + "http-cache-semantics": "3.8.1", + "keyv": "3.0.0", + "lowercase-keys": "1.0.0", + "normalize-url": "2.0.1", + "responselike": "1.0.2" + }, + "dependencies": { + "lowercase-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", + "integrity": "sha1-TjNms55/VFfjXxMkvfb4jQv8cwY=", + "dev": true, + "optional": true + }, + "normalize-url": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", + "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", + "dev": true, + "optional": true, + "requires": { + "prepend-http": "^2.0.0", + "query-string": "^5.0.1", + "sort-keys": "^2.0.0" + } + }, + "prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", + "dev": true, + "optional": true + }, + "sort-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", + "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", + "dev": true, + "optional": true, + "requires": { + "is-plain-obj": "^1.0.0" + } + } + } + }, + "cached-path-relative": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/cached-path-relative/-/cached-path-relative-1.0.2.tgz", + "integrity": "sha512-5r2GqsoEb4qMTTN9J+WzXfjov+hjxT+j3u5K+kIVNIwAd99DLCJE9pBIMP1qVeybV6JiijL385Oz0DcYxfbOIg==", + "dev": true + }, + "call-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.0.tgz", + "integrity": "sha512-AEXsYIyyDY3MCzbwdhzG3Jx1R0J2wetQyUynn6dYHAO+bg8l1k7jwZtRv4ryryFs7EP+NDlikJlVe59jr0cM2w==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.0" + } + }, + "caller-callsite": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", + "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=", + "dev": true, + "requires": { + "callsites": "^2.0.0" + } + }, + "caller-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", + "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=", + "dev": true, + "requires": { + "caller-callsite": "^2.0.0" + } + }, + "callsites": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", + "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=", + "dev": true + }, + "camelcase": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-3.0.0.tgz", + "integrity": "sha1-MvxLn82vhF/N9+c7uXysImHwqwo=", + "dev": true + }, + "camelcase-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", + "integrity": "sha1-MIvur/3ygRkFHvodkyITyRuPkuc=", + "dev": true, + "optional": true, + "requires": { + "camelcase": "^2.0.0", + "map-obj": "^1.0.0" + }, + "dependencies": { + "camelcase": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "dev": true, + "optional": true + } + } + }, + "caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + }, + "dependencies": { + "lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=", + "dev": true + } + } + }, + "caniuse-lite": { + "version": "1.0.30001161", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001161.tgz", + "integrity": "sha512-JharrCDxOqPLBULF9/SPa6yMcBRTjZARJ6sc3cuKrPfyIk64JN6kuMINWqA99Xc8uElMFcROliwtz0n9pYej+g==", + "dev": true + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", + "dev": true + }, + "caw": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/caw/-/caw-2.0.1.tgz", + "integrity": "sha512-Cg8/ZSBEa8ZVY9HspcGUYaK63d/bN7rqS3CYCzEGUxuYv6UlmcjzDUz2fCFFHyTvUW5Pk0I+3hkA3iXlIj6guA==", + "dev": true, + "optional": true, + "requires": { + "get-proxy": "^2.0.0", + "isurl": "^1.0.0-alpha5", + "tunnel-agent": "^0.6.0", + "url-to-options": "^1.0.1" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "dependencies": { + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "dev": true + }, + "character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "dev": true + }, + "character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "dev": true + }, + "chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true + }, + "chokidar": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", + "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", + "dev": true, + "requires": { + "anymatch": "^2.0.0", + "async-each": "^1.0.1", + "braces": "^2.3.2", + "fsevents": "^1.2.7", + "glob-parent": "^3.1.0", + "inherits": "^2.0.3", + "is-binary-path": "^1.0.0", + "is-glob": "^4.0.0", + "normalize-path": "^3.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.2.1", + "upath": "^1.1.1" + }, + "dependencies": { + "glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", + "dev": true, + "requires": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + }, + "dependencies": { + "is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "requires": { + "is-extglob": "^2.1.0" + } + } + } + } + } + }, + "cipher-base": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", + "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "requires": { + "restore-cursor": "^3.1.0" + } + }, + "cli-width": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "dev": true + }, + "cliui": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", + "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "dev": true, + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wrap-ansi": "^2.0.0" + }, + "dependencies": { + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + } + } + }, + "clone": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", + "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=", + "dev": true + }, + "clone-buffer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/clone-buffer/-/clone-buffer-1.0.0.tgz", + "integrity": "sha1-4+JbIHrE5wGvch4staFnksrD3Fg=", + "dev": true + }, + "clone-regexp": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clone-regexp/-/clone-regexp-2.2.0.tgz", + "integrity": "sha512-beMpP7BOtTipFuW8hrJvREQ2DrRu3BE7by0ZpibtfBA+qfHYvMGTc2Yb1JMYPKg/JUw0CHYvpg796aNTSW9z7Q==", + "dev": true, + "requires": { + "is-regexp": "^2.0.0" + } + }, + "clone-response": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", + "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", + "dev": true, + "optional": true, + "requires": { + "mimic-response": "^1.0.0" + } + }, + "clone-stats": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/clone-stats/-/clone-stats-1.0.0.tgz", + "integrity": "sha1-s3gt/4u1R04Yuba/D9/ngvh3doA=", + "dev": true + }, + "cloneable-readable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/cloneable-readable/-/cloneable-readable-1.1.3.tgz", + "integrity": "sha512-2EF8zTQOxYq70Y4XKtorQupqF0m49MBz2/yf5Bj+MHjvpG3Hy7sImifnqD6UA+TKYxeSV+u6qqQPawN5UvnpKQ==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "process-nextick-args": "^2.0.0", + "readable-stream": "^2.3.5" + } + }, + "coa": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", + "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", + "dev": true, + "requires": { + "@types/q": "^1.5.1", + "chalk": "^2.4.1", + "q": "^1.1.2" + } + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "dev": true + }, + "collection-map": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-map/-/collection-map-1.0.0.tgz", + "integrity": "sha1-rqDwb40mx4DCt1SUOFVEsiVa8Yw=", + "dev": true, + "requires": { + "arr-map": "^2.0.2", + "for-own": "^1.0.0", + "make-iterator": "^1.0.0" + } + }, + "collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "dev": true, + "requires": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + } + }, + "color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", + "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", + "dev": true, + "requires": { + "color-convert": "^1.9.1", + "color-string": "^1.5.4" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "color-string": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.4.tgz", + "integrity": "sha512-57yF5yt8Xa3czSEW1jfQDE79Idk0+AkN/4KWad6tbdxUmAs3MvjxlWSWD4deYytcRfoZ9nhKyFl1kj5tBvidbw==", + "dev": true, + "requires": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "dev": true + }, + "colorette": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.1.tgz", + "integrity": "sha512-puCDz0CzydiSYOrnXpz/PKd69zRrribezjtE9yd4zvytoRc8+RY/KJPvtPFKZS3E3wP6neGyMe0vOTlHO5L3Pw==", + "dev": true + }, + "combine-source-map": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/combine-source-map/-/combine-source-map-0.8.0.tgz", + "integrity": "sha1-pY0N8ELBhvz4IqjoAV9UUNLXmos=", + "dev": true, + "requires": { + "convert-source-map": "~1.1.0", + "inline-source-map": "~0.6.0", + "lodash.memoize": "~3.0.3", + "source-map": "~0.5.3" + }, + "dependencies": { + "convert-source-map": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.1.3.tgz", + "integrity": "sha1-SCnId+n+SbMWHzvzZziI4gRpmGA=", + "dev": true + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "optional": true + }, + "common-tags": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.0.tgz", + "integrity": "sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw==", + "dev": true + }, + "component-emitter": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", + "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "concat-with-sourcemaps": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/concat-with-sourcemaps/-/concat-with-sourcemaps-1.1.0.tgz", + "integrity": "sha512-4gEjHJFT9e+2W/77h/DS5SGUgwDaOwprX8L/gl5+3ixnzkVJJsZWDSelmN3Oilw3LNDZjZV0yqH1hLG3k6nghg==", + "dev": true, + "requires": { + "source-map": "^0.6.1" + } + }, + "config-chain": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.12.tgz", + "integrity": "sha512-a1eOIcu8+7lUInge4Rpf/n4Krkf3Dd9lqhljRzII1/Zno/kRtUWnznPO3jOKBmTEktkt3fkxisUcivoj0ebzoA==", + "dev": true, + "optional": true, + "requires": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "connect": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/connect/-/connect-3.7.0.tgz", + "integrity": "sha512-ZqRXc+tZukToSNmh5C2iWMSoV3X1YUcPbqEM4DkEG5tNQXrQUZCNVGGv3IuicnkMtPfGf3Xtp8WCXs295iQ1pQ==", + "dev": true, + "requires": { + "debug": "2.6.9", + "finalhandler": "1.1.2", + "parseurl": "~1.3.3", + "utils-merge": "1.0.1" + } + }, + "connect-livereload": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/connect-livereload/-/connect-livereload-0.6.1.tgz", + "integrity": "sha512-3R0kMOdL7CjJpU66fzAkCe6HNtd3AavCS4m+uW4KtJjrdGPT0SQEZieAYd+cm+lJoBznNQ4lqipYWkhBMgk00g==", + "dev": true + }, + "console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", + "dev": true + }, + "console-stream": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/console-stream/-/console-stream-0.1.1.tgz", + "integrity": "sha1-oJX+B7IEZZVfL6/Si11yvM2UnUQ=", + "dev": true, + "optional": true + }, + "constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=", + "dev": true + }, + "contains-path": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", + "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", + "dev": true + }, + "content-disposition": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", + "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", + "dev": true, + "optional": true, + "requires": { + "safe-buffer": "5.1.2" + } + }, + "continuable-cache": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/continuable-cache/-/continuable-cache-0.3.1.tgz", + "integrity": "sha1-vXJ6f67XfnH/OYWskzUakSczrQ8=", + "dev": true + }, + "convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } + }, + "copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", + "dev": true + }, + "copy-props": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/copy-props/-/copy-props-2.0.4.tgz", + "integrity": "sha512-7cjuUME+p+S3HZlbllgsn2CDwS+5eCCX16qBgNC4jgSTf49qR1VKy/Zhl400m0IQXl/bPGEVqncgUUMjrr4s8A==", + "dev": true, + "requires": { + "each-props": "^1.3.0", + "is-plain-object": "^2.0.1" + }, + "dependencies": { + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "core-js": { + "version": "3.6.5", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.5.tgz", + "integrity": "sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA==", + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cosmiconfig": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", + "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", + "dev": true, + "requires": { + "import-fresh": "^2.0.0", + "is-directory": "^0.3.1", + "js-yaml": "^3.13.1", + "parse-json": "^4.0.0" + } + }, + "count-lines": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/count-lines/-/count-lines-0.1.2.tgz", + "integrity": "sha1-4zST+2hgqC9xWdgjeEP7+u/uWWI=", + "dev": true + }, + "create-ecdh": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", + "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "crypto-browserify": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", + "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "dev": true, + "requires": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + } + }, + "css-color-names": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", + "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=", + "dev": true + }, + "css-declaration-sorter": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", + "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", + "dev": true, + "requires": { + "postcss": "^7.0.1", + "timsort": "^0.3.0" + } + }, + "css-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", + "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", + "dev": true, + "requires": { + "boolbase": "^1.0.0", + "css-what": "^3.2.1", + "domutils": "^1.7.0", + "nth-check": "^1.0.2" + } + }, + "css-select-base-adapter": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", + "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==", + "dev": true + }, + "css-tree": { + "version": "1.0.0-alpha.37", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", + "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", + "dev": true, + "requires": { + "mdn-data": "2.0.4", + "source-map": "^0.6.1" + } + }, + "css-what": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", + "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", + "dev": true + }, + "cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true + }, + "cssnano": { + "version": "4.1.10", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.10.tgz", + "integrity": "sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ==", + "dev": true, + "requires": { + "cosmiconfig": "^5.0.0", + "cssnano-preset-default": "^4.0.7", + "is-resolvable": "^1.0.0", + "postcss": "^7.0.0" + } + }, + "cssnano-preset-default": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.7.tgz", + "integrity": "sha512-x0YHHx2h6p0fCl1zY9L9roD7rnlltugGu7zXSKQx6k2rYw0Hi3IqxcoAGF7u9Q5w1nt7vK0ulxV8Lo+EvllGsA==", + "dev": true, + "requires": { + "css-declaration-sorter": "^4.0.1", + "cssnano-util-raw-cache": "^4.0.1", + "postcss": "^7.0.0", + "postcss-calc": "^7.0.1", + "postcss-colormin": "^4.0.3", + "postcss-convert-values": "^4.0.1", + "postcss-discard-comments": "^4.0.2", + "postcss-discard-duplicates": "^4.0.2", + "postcss-discard-empty": "^4.0.1", + "postcss-discard-overridden": "^4.0.1", + "postcss-merge-longhand": "^4.0.11", + "postcss-merge-rules": "^4.0.3", + "postcss-minify-font-values": "^4.0.2", + "postcss-minify-gradients": "^4.0.2", + "postcss-minify-params": "^4.0.2", + "postcss-minify-selectors": "^4.0.2", + "postcss-normalize-charset": "^4.0.1", + "postcss-normalize-display-values": "^4.0.2", + "postcss-normalize-positions": "^4.0.2", + "postcss-normalize-repeat-style": "^4.0.2", + "postcss-normalize-string": "^4.0.2", + "postcss-normalize-timing-functions": "^4.0.2", + "postcss-normalize-unicode": "^4.0.1", + "postcss-normalize-url": "^4.0.1", + "postcss-normalize-whitespace": "^4.0.2", + "postcss-ordered-values": "^4.1.2", + "postcss-reduce-initial": "^4.0.3", + "postcss-reduce-transforms": "^4.0.2", + "postcss-svgo": "^4.0.2", + "postcss-unique-selectors": "^4.0.1" + } + }, + "cssnano-util-get-arguments": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", + "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=", + "dev": true + }, + "cssnano-util-get-match": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", + "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=", + "dev": true + }, + "cssnano-util-raw-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", + "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "cssnano-util-same-parent": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", + "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==", + "dev": true + }, + "csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dev": true, + "requires": { + "css-tree": "^1.1.2" + }, + "dependencies": { + "css-tree": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.2.tgz", + "integrity": "sha512-wCoWush5Aeo48GLhfHPbmvZs59Z+M7k5+B1xDnXbdWNcEF423DoFdqSWE0PM5aNk5nI5cp1q7ms36zGApY/sKQ==", + "dev": true, + "requires": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + } + }, + "mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==", + "dev": true + } + } + }, + "cuint": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz", + "integrity": "sha1-QICG1AlVDCYxFVYZ6fp7ytw7mRs=", + "dev": true + }, + "currently-unhandled": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", + "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", + "dev": true, + "optional": true, + "requires": { + "array-find-index": "^1.0.1" + } + }, + "d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dev": true, + "requires": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "dash-ast": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dash-ast/-/dash-ast-1.0.0.tgz", + "integrity": "sha512-Vy4dx7gquTeMcQR/hDkYLGUnwVil6vk4FOOct+djUnHOUWt+zJPJAaRIXaAFkPXtJjvlY7o3rfRu0/3hpnwoUA==", + "dev": true + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "decamelize-keys": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", + "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", + "dev": true, + "requires": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + } + }, + "decode-uri-component": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", + "dev": true + }, + "decompress": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/decompress/-/decompress-4.2.1.tgz", + "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", + "dev": true, + "optional": true, + "requires": { + "decompress-tar": "^4.0.0", + "decompress-tarbz2": "^4.0.0", + "decompress-targz": "^4.0.0", + "decompress-unzip": "^4.0.1", + "graceful-fs": "^4.1.10", + "make-dir": "^1.0.0", + "pify": "^2.3.0", + "strip-dirs": "^2.0.0" + }, + "dependencies": { + "make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dev": true, + "optional": true, + "requires": { + "pify": "^3.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + } + } + }, + "decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", + "dev": true, + "optional": true, + "requires": { + "mimic-response": "^1.0.0" + } + }, + "decompress-tar": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-tar/-/decompress-tar-4.1.1.tgz", + "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", + "dev": true, + "optional": true, + "requires": { + "file-type": "^5.2.0", + "is-stream": "^1.1.0", + "tar-stream": "^1.5.2" + }, + "dependencies": { + "file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha1-LdvqfHP/42No365J3DOMBYwritY=", + "dev": true, + "optional": true + } + } + }, + "decompress-tarbz2": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", + "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", + "dev": true, + "optional": true, + "requires": { + "decompress-tar": "^4.1.0", + "file-type": "^6.1.0", + "is-stream": "^1.1.0", + "seek-bzip": "^1.0.5", + "unbzip2-stream": "^1.0.9" + }, + "dependencies": { + "file-type": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz", + "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==", + "dev": true, + "optional": true + } + } + }, + "decompress-targz": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-targz/-/decompress-targz-4.1.1.tgz", + "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", + "dev": true, + "optional": true, + "requires": { + "decompress-tar": "^4.1.1", + "file-type": "^5.2.0", + "is-stream": "^1.1.0" + }, + "dependencies": { + "file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha1-LdvqfHP/42No365J3DOMBYwritY=", + "dev": true, + "optional": true + } + } + }, + "decompress-unzip": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/decompress-unzip/-/decompress-unzip-4.0.1.tgz", + "integrity": "sha1-3qrM39FK6vhVePczroIQ+bSEj2k=", + "dev": true, + "optional": true, + "requires": { + "file-type": "^3.8.0", + "get-stream": "^2.2.0", + "pify": "^2.3.0", + "yauzl": "^2.4.2" + }, + "dependencies": { + "file-type": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", + "integrity": "sha1-JXoHg4TR24CHvESdEH1SpSZyuek=", + "dev": true, + "optional": true + }, + "get-stream": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz", + "integrity": "sha1-Xzj5PzRgCWZu4BUKBUFn+Rvdld4=", + "dev": true, + "optional": true, + "requires": { + "object-assign": "^4.0.1", + "pinkie-promise": "^2.0.0" + } + } + } + }, + "dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha1-JJXduvbrh0q7Dhvp3yLS5aVEMmw=", + "dev": true + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true + }, + "default-compare": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/default-compare/-/default-compare-1.0.0.tgz", + "integrity": "sha512-QWfXlM0EkAbqOCbD/6HjdwT19j7WCkMyiRhWilc4H9/5h/RzTF9gv5LYh1+CmDV5d1rki6KAWLtQale0xt20eQ==", + "dev": true, + "requires": { + "kind-of": "^5.0.2" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "dev": true + } + } + }, + "default-resolution": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/default-resolution/-/default-resolution-2.0.0.tgz", + "integrity": "sha1-vLgrqnKtebQmp2cy8aga1t8m1oQ=", + "dev": true + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "dev": true, + "requires": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "dependencies": { + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "defined": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz", + "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=", + "dev": true + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "dev": true + }, + "depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", + "dev": true + }, + "deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==", + "dev": true + }, + "deps-sort": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/deps-sort/-/deps-sort-2.0.1.tgz", + "integrity": "sha512-1orqXQr5po+3KI6kQb9A4jnXT1PBwggGl2d7Sq2xsnOeI9GPcE/tGcF9UiSZtZBM7MukY4cAh7MemS6tZYipfw==", + "dev": true, + "requires": { + "JSONStream": "^1.0.3", + "shasum-object": "^1.0.0", + "subarg": "^1.0.0", + "through2": "^2.0.0" + } + }, + "des.js": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", + "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "destroy": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", + "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=", + "dev": true + }, + "detect-file": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz", + "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=", + "dev": true + }, + "detective": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/detective/-/detective-5.2.0.tgz", + "integrity": "sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg==", + "dev": true, + "requires": { + "acorn-node": "^1.6.1", + "defined": "^1.0.0", + "minimist": "^1.1.1" + } + }, + "diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "requires": { + "path-type": "^4.0.0" + }, + "dependencies": { + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true + } + } + }, + "dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true + }, + "docsearch.js": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz", + "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==", + "dev": true, + "requires": { + "algoliasearch": "^3.24.5", + "autocomplete.js": "0.36.0", + "hogan.js": "^3.0.2", + "request": "^2.87.0", + "stack-utils": "^1.0.1", + "to-factory": "^1.0.0", + "zepto": "^1.2.0" + } + }, + "doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "dom-serializer": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + }, + "dependencies": { + "domelementtype": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.0.2.tgz", + "integrity": "sha512-wFwTwCVebUrMgGeAwRL/NhZtHAUyT9n9yg4IMDwf10+6iCMxSkVq9MGCVEH+QZWo1nNidy8kNvwmv4zWHDTqvA==", + "dev": true + } + } + }, + "dom-walk": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", + "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==", + "dev": true + }, + "domain-browser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", + "dev": true + }, + "domelementtype": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", + "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", + "dev": true + }, + "domhandler": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", + "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", + "dev": true, + "requires": { + "domelementtype": "1" + } + }, + "domutils": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", + "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", + "dev": true, + "requires": { + "dom-serializer": "0", + "domelementtype": "1" + } + }, + "dot-prop": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", + "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", + "dev": true, + "requires": { + "is-obj": "^2.0.0" + } + }, + "download": { + "version": "6.2.5", + "resolved": "https://registry.npmjs.org/download/-/download-6.2.5.tgz", + "integrity": "sha512-DpO9K1sXAST8Cpzb7kmEhogJxymyVUd5qz/vCOSyvwtp2Klj2XcDt5YUuasgxka44SxF0q5RriKIwJmQHG2AuA==", + "dev": true, + "optional": true, + "requires": { + "caw": "^2.0.0", + "content-disposition": "^0.5.2", + "decompress": "^4.0.0", + "ext-name": "^5.0.0", + "file-type": "5.2.0", + "filenamify": "^2.0.0", + "get-stream": "^3.0.0", + "got": "^7.0.0", + "make-dir": "^1.0.0", + "p-event": "^1.0.0", + "pify": "^3.0.0" + }, + "dependencies": { + "file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha1-LdvqfHP/42No365J3DOMBYwritY=", + "dev": true, + "optional": true + }, + "make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dev": true, + "optional": true, + "requires": { + "pify": "^3.0.0" + } + }, + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", + "dev": true + }, + "duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "dev": true, + "requires": { + "readable-stream": "^2.0.2" + } + }, + "duplexer3": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", + "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=", + "dev": true, + "optional": true + }, + "duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "each-props": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/each-props/-/each-props-1.3.2.tgz", + "integrity": "sha512-vV0Hem3zAGkJAyU7JSjixeU66rwdynTAa1vofCrSA5fEln+m67Az9CcnkVD776/fsN/UjIWmBDoNRS6t6G9RfA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.1", + "object.defaults": "^1.1.0" + }, + "dependencies": { + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "dev": true, + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=", + "dev": true + }, + "electron-to-chromium": { + "version": "1.3.609", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.609.tgz", + "integrity": "sha512-kcmRWmlHsUKKLfsSKSf7VqeTX4takM5ndjVTM3et3qpDCceITYI1DixvIfSDIngALoaTnpoMXD3SXSMpzHkYKA==", + "dev": true + }, + "elliptic": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", + "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", + "dev": true, + "requires": { + "bn.js": "^4.4.0", + "brorand": "^1.0.1", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", + "dev": true + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "entities": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", + "dev": true + }, + "envify": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz", + "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==", + "dev": true, + "requires": { + "esprima": "^4.0.0", + "through": "~2.3.4" + } + }, + "error": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/error/-/error-7.2.1.tgz", + "integrity": "sha512-fo9HBvWnx3NGUKMvMwB/CBCMMrfEJgbDTVDEkPygA3Bdd3lM1OyCd+rbQ8BwnpF6GdVeOLDNmyL4N5Bg80ZvdA==", + "dev": true, + "requires": { + "string-template": "~0.2.1" + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + }, + "es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + } + }, + "es5-ext": { + "version": "0.10.53", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", + "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", + "dev": true, + "requires": { + "es6-iterator": "~2.0.3", + "es6-symbol": "~3.1.3", + "next-tick": "~1.0.0" + } + }, + "es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "^0.10.35", + "es6-symbol": "^3.1.1" + } + }, + "es6-map": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/es6-map/-/es6-map-0.1.5.tgz", + "integrity": "sha1-kTbgUD3MBqMBaQ8LsU/042TpSfA=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "~0.10.14", + "es6-iterator": "~2.0.1", + "es6-set": "~0.1.5", + "es6-symbol": "~3.1.1", + "event-emitter": "~0.3.5" + } + }, + "es6-promise": { + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", + "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==", + "dev": true + }, + "es6-set": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/es6-set/-/es6-set-0.1.5.tgz", + "integrity": "sha1-0rPsXU2ADO2BjbU40ol02wpzzLE=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "~0.10.14", + "es6-iterator": "~2.0.1", + "es6-symbol": "3.1.1", + "event-emitter": "~0.3.5" + }, + "dependencies": { + "es6-symbol": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.1.tgz", + "integrity": "sha1-vwDvT9q2uhtG7Le2KbTH7VcVzHc=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "~0.10.14" + } + } + } + }, + "es6-symbol": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", + "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", + "dev": true, + "requires": { + "d": "^1.0.1", + "ext": "^1.1.2" + } + }, + "es6-weak-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-2.0.3.tgz", + "integrity": "sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "^0.10.46", + "es6-iterator": "^2.0.3", + "es6-symbol": "^3.1.1" + } + }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "eslint": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.2.0.tgz", + "integrity": "sha512-B3BtEyaDKC5MlfDa2Ha8/D6DsS4fju95zs0hjS3HdGazw+LNayai38A25qMppK37wWGWNYSPOR6oYzlz5MHsRQ==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^5.1.0", + "eslint-utils": "^2.0.0", + "eslint-visitor-keys": "^1.2.0", + "espree": "^7.1.0", + "esquery": "^1.2.0", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^7.0.0", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash": "^4.17.14", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^5.2.3", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "import-fresh": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + }, + "semver": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "eslint-config-standard": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz", + "integrity": "sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg==", + "dev": true + }, + "eslint-import-resolver-node": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz", + "integrity": "sha512-ogtf+5AB/O+nM6DIeBUNr2fuT7ot9Qg/1harBfBtaP13ekEWFQEEMP94BCB7zaNW3gyY+8SHYF00rnqYwXKWOA==", + "dev": true, + "requires": { + "debug": "^2.6.9", + "resolve": "^1.13.1" + } + }, + "eslint-module-utils": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.0.tgz", + "integrity": "sha512-6j9xxegbqe8/kZY8cYpcp0xhbK0EgJlg3g9mib3/miLaExuuwc3n5UEfSnU6hWMbT0FAYVvDbL9RrRgpUeQIvA==", + "dev": true, + "requires": { + "debug": "^2.6.9", + "pkg-dir": "^2.0.0" + } + }, + "eslint-plugin-es": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", + "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", + "dev": true, + "requires": { + "eslint-utils": "^2.0.0", + "regexpp": "^3.0.0" + } + }, + "eslint-plugin-import": { + "version": "2.21.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.21.2.tgz", + "integrity": "sha512-FEmxeGI6yaz+SnEB6YgNHlQK1Bs2DKLM+YF+vuTk5H8J9CLbJLtlPvRFgZZ2+sXiKAlN5dpdlrWOjK8ZoZJpQA==", + "dev": true, + "requires": { + "array-includes": "^3.1.1", + "array.prototype.flat": "^1.2.3", + "contains-path": "^0.1.0", + "debug": "^2.6.9", + "doctrine": "1.5.0", + "eslint-import-resolver-node": "^0.3.3", + "eslint-module-utils": "^2.6.0", + "has": "^1.0.3", + "minimatch": "^3.0.4", + "object.values": "^1.1.1", + "read-pkg-up": "^2.0.0", + "resolve": "^1.17.0", + "tsconfig-paths": "^3.9.0" + }, + "dependencies": { + "doctrine": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", + "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", + "dev": true, + "requires": { + "esutils": "^2.0.2", + "isarray": "^1.0.0" + } + } + } + }, + "eslint-plugin-node": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", + "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", + "dev": true, + "requires": { + "eslint-plugin-es": "^3.0.0", + "eslint-utils": "^2.0.0", + "ignore": "^5.1.1", + "minimatch": "^3.0.4", + "resolve": "^1.10.1", + "semver": "^6.1.0" + }, + "dependencies": { + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "eslint-plugin-promise": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", + "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", + "dev": true + }, + "eslint-plugin-standard": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.2.tgz", + "integrity": "sha512-nKptN8l7jksXkwFk++PhJB3cCDTcXOEyhISIN86Ue2feJ1LFyY3PrY3/xT2keXlJSY5bpmbiTG0f885/YKAvTA==", + "dev": true + }, + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "eslint-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true + }, + "espree": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.0.tgz", + "integrity": "sha512-dksIWsvKCixn1yrEXO8UosNSxaDoSYpq9reEjZSbHLpT5hpaCAKTLBwq0RHtLrIr+c0ByiYzWT8KTMRzoRCNlw==", + "dev": true, + "requires": { + "acorn": "^7.4.0", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.3.0" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "esquery": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz", + "integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==", + "dev": true, + "requires": { + "estraverse": "^5.1.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "dev": true + } + } + }, + "esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "requires": { + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "dev": true + } + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true + }, + "estree-is-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-is-function/-/estree-is-function-1.0.0.tgz", + "integrity": "sha512-nSCWn1jkSq2QAtkaVLJZY2ezwcFO161HVc174zL1KPW3RJ+O6C3eJb8Nx7OXzvhoEv+nLgSR1g71oWUHUDTrJA==", + "dev": true + }, + "estree-is-identifier": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-is-identifier/-/estree-is-identifier-1.0.0.tgz", + "integrity": "sha512-2BDRGrkQJV/NhCAmmE33A35WAaxq3WQaGHgQuD//7orGWfpFqj8Srkwvx0TH+20yIdOF1yMQwi8anv5ISec2AQ==", + "dev": true + }, + "estree-is-member-expression": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-is-member-expression/-/estree-is-member-expression-1.0.0.tgz", + "integrity": "sha512-Ec+X44CapIGExvSZN+pGkmr5p7HwUVQoPQSd458Lqwvaf4/61k/invHSh4BYK8OXnCkfEhWuIoG5hayKLQStIg==", + "dev": true + }, + "estree-is-require": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-is-require/-/estree-is-require-1.0.0.tgz", + "integrity": "sha512-oWxQdSEmnUwNZsDQYiBNpVxKEhMmsJQSSxnDrwsr1MWtooCLfhgzsNGzmokdmfK0EzEIS5V4LPvqxv1Kmb1vvA==", + "dev": true, + "requires": { + "estree-is-identifier": "^1.0.0" + } + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true + }, + "etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", + "dev": true + }, + "event-emitter": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", + "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", + "dev": true, + "requires": { + "d": "1", + "es5-ext": "~0.10.14" + } + }, + "events": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/events/-/events-2.1.0.tgz", + "integrity": "sha512-3Zmiobend8P9DjmKAty0Era4jV8oJ0yGYe2nJJAxgymF9+N8F2m0hhZiMoWtcfepExzNKZumFU3ksdQbInGWCg==", + "dev": true + }, + "evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "dev": true, + "requires": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "exec-buffer": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/exec-buffer/-/exec-buffer-3.2.0.tgz", + "integrity": "sha512-wsiD+2Tp6BWHoVv3B+5Dcx6E7u5zky+hUwOHjuH2hKSLR3dvRmX8fk8UD8uqQixHs4Wk6eDmiegVrMPjKj7wpA==", + "dev": true, + "optional": true, + "requires": { + "execa": "^0.7.0", + "p-finally": "^1.0.0", + "pify": "^3.0.0", + "rimraf": "^2.5.4", + "tempfile": "^2.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "execa": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", + "integrity": "sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c=", + "dev": true, + "optional": true, + "requires": { + "cross-spawn": "^5.0.1", + "get-stream": "^3.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + }, + "dependencies": { + "cross-spawn": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", + "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=", + "dev": true, + "optional": true, + "requires": { + "lru-cache": "^4.0.1", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "optional": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true, + "optional": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "optional": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "execall": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/execall/-/execall-2.0.0.tgz", + "integrity": "sha512-0FU2hZ5Hh6iQnarpRtQurM/aAvp3RIbfvgLHrcqJYzhXyV2KFruhuChf9NC6waAhiUR7FFtlugkI4p7f2Fqlow==", + "dev": true, + "requires": { + "clone-regexp": "^2.1.0" + } + }, + "executable": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", + "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", + "dev": true, + "optional": true, + "requires": { + "pify": "^2.2.0" + } + }, + "expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", + "dev": true, + "requires": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "expand-tilde": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz", + "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.1" + } + }, + "ext": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", + "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", + "dev": true, + "requires": { + "type": "^2.0.0" + }, + "dependencies": { + "type": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.1.0.tgz", + "integrity": "sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA==", + "dev": true + } + } + }, + "ext-list": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/ext-list/-/ext-list-2.2.2.tgz", + "integrity": "sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA==", + "dev": true, + "optional": true, + "requires": { + "mime-db": "^1.28.0" + } + }, + "ext-name": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ext-name/-/ext-name-5.0.0.tgz", + "integrity": "sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ==", + "dev": true, + "optional": true, + "requires": { + "ext-list": "^2.0.0", + "sort-keys-length": "^1.0.0" + } + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true + }, + "extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", + "dev": true, + "requires": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "requires": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + } + }, + "extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "dev": true, + "requires": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "dev": true + }, + "fancy-log": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/fancy-log/-/fancy-log-1.3.3.tgz", + "integrity": "sha512-k9oEhlyc0FrVh25qYuSELjr8oxsCoc4/LEZfg2iJJrfEk/tZL9bCoJE47gqAvI2m/AUjluCS4+3I0eTx8n3AEw==", + "dev": true, + "requires": { + "ansi-gray": "^0.1.1", + "color-support": "^1.1.3", + "parse-node-version": "^1.0.0", + "time-stamp": "^1.0.0" + } + }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "fast-glob": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.4.tgz", + "integrity": "sha512-kr/Oo6PX51265qeuCYsyGypiO5uJFgBS0jksyG7FUeCyQzNwYnzrNIMR1NXfkZXsMYXYLRAHgISHBz8gQcxKHQ==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.0", + "merge2": "^1.3.0", + "micromatch": "^4.0.2", + "picomatch": "^2.2.1" + }, + "dependencies": { + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + } + } + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "fast-safe-stringify": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", + "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==", + "dev": true + }, + "fastq": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.9.0.tgz", + "integrity": "sha512-i7FVWL8HhVY+CTkwFxkN2mk3h+787ixS5S63eb78diVRc1MCssarHq3W5cj0av7YDSwmaV928RNag+U1etRQ7w==", + "dev": true, + "requires": { + "reusify": "^1.0.4" + } + }, + "faye-websocket": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.10.0.tgz", + "integrity": "sha1-TkkvjQTftviQA1B/btvy1QHnxvQ=", + "dev": true, + "requires": { + "websocket-driver": ">=0.5.1" + } + }, + "fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", + "dev": true, + "requires": { + "pend": "~1.2.0" + } + }, + "figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "file-entry-cache": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", + "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "dev": true, + "requires": { + "flat-cache": "^2.0.1" + } + }, + "file-type": { + "version": "12.4.2", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-12.4.2.tgz", + "integrity": "sha512-UssQP5ZgIOKelfsaB5CuGAL+Y+q7EmONuiwF3N5HAH0t27rvrttgi6Ra9k/+DVaY9UF6+ybxu5pOXLUdA8N7Vg==", + "dev": true + }, + "file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "optional": true + }, + "filename-reserved-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-2.0.0.tgz", + "integrity": "sha1-q/c9+rc10EVECr/qLZHzieu/oik=", + "dev": true, + "optional": true + }, + "filenamify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/filenamify/-/filenamify-2.1.0.tgz", + "integrity": "sha512-ICw7NTT6RsDp2rnYKVd8Fu4cr6ITzGy3+u4vUujPkabyaz+03F24NWEX7fs5fp+kBonlaqPH8fAO2NM+SXt/JA==", + "dev": true, + "optional": true, + "requires": { + "filename-reserved-regex": "^2.0.0", + "strip-outer": "^1.0.0", + "trim-repeated": "^1.0.0" + } + }, + "fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "finalhandler": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", + "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", + "dev": true, + "requires": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "~2.3.0", + "parseurl": "~1.3.3", + "statuses": "~1.5.0", + "unpipe": "~1.0.0" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "find-versions": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-3.2.0.tgz", + "integrity": "sha512-P8WRou2S+oe222TOCHitLy8zj+SIsVJh52VP4lvXkaFVnOFFdoWv1H1Jjvel1aI6NCFOAaeAVm8qrI0odiLcww==", + "dev": true, + "optional": true, + "requires": { + "semver-regex": "^2.0.0" + } + }, + "findup-sync": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-3.0.0.tgz", + "integrity": "sha512-YbffarhcicEhOrm4CtrwdKBdCuz576RLdhJDsIfvNtxUuhdRet1qZcsMjqbePtAseKdAnDyM/IyXbu7PRPRLYg==", + "dev": true, + "requires": { + "detect-file": "^1.0.0", + "is-glob": "^4.0.0", + "micromatch": "^3.0.4", + "resolve-dir": "^1.0.1" + } + }, + "fined": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/fined/-/fined-1.2.0.tgz", + "integrity": "sha512-ZYDqPLGxDkDhDZBjZBb+oD1+j0rA4E0pXY50eplAAOPg2N/gUBSSk5IM1/QhPfyVo19lJ+CvXpqfvk+b2p/8Ng==", + "dev": true, + "requires": { + "expand-tilde": "^2.0.2", + "is-plain-object": "^2.0.3", + "object.defaults": "^1.1.0", + "object.pick": "^1.2.0", + "parse-filepath": "^1.0.1" + }, + "dependencies": { + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "flagged-respawn": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/flagged-respawn/-/flagged-respawn-1.0.1.tgz", + "integrity": "sha512-lNaHNVymajmk0OJMBn8fVUAU1BtDeKIqKoVhk4xAALB57aALg6b4W0MfJ/cUE0g9YBXy5XhSlPIpYIJ7HaY/3Q==", + "dev": true + }, + "flat-cache": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", + "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "dev": true, + "requires": { + "flatted": "^2.0.0", + "rimraf": "2.6.3", + "write": "1.0.3" + } + }, + "flatted": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", + "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", + "dev": true + }, + "flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "dev": true + }, + "for-own": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/for-own/-/for-own-1.0.0.tgz", + "integrity": "sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs=", + "dev": true, + "requires": { + "for-in": "^1.0.1" + } + }, + "foreach": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", + "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=", + "dev": true + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "dev": true + }, + "form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dev": true, + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + } + }, + "fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", + "dev": true, + "requires": { + "map-cache": "^0.2.2" + } + }, + "fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", + "dev": true + }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "dev": true, + "optional": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "dev": true, + "optional": true + }, + "fs-extra": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.0.1.tgz", + "integrity": "sha512-h2iAoN838FqAFJY2/qVpzFXy+EBxfVE220PalAqQLDVsFOHLJrZvut5puAbCdNv6WJk+B8ihI+k0c7JK5erwqQ==", + "dev": true, + "requires": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^1.0.0" + } + }, + "fs-mkdirp-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-mkdirp-stream/-/fs-mkdirp-stream-1.0.0.tgz", + "integrity": "sha1-C3gV/DIBxqaeFNuYzgmMFpNSWes=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "through2": "^2.0.3" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", + "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", + "dev": true, + "optional": true, + "requires": { + "bindings": "^1.5.0", + "nan": "^2.12.1" + } + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", + "dev": true + }, + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true + }, + "get-assigned-identifiers": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/get-assigned-identifiers/-/get-assigned-identifiers-1.2.0.tgz", + "integrity": "sha512-mBBwmeGTrxEMO4pMaaf/uUEFHnYtwr8FTe8Y/mer4rcV/bye0qGm6pw1bGZFGStxC5O76c5ZAVBGnqHmOaJpdQ==", + "dev": true + }, + "get-caller-file": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.3.tgz", + "integrity": "sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==", + "dev": true + }, + "get-intrinsic": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.0.1.tgz", + "integrity": "sha512-ZnWP+AmS1VUaLgTRy47+zKtjTxz+0xMpx3I52i+aalBK1QP19ggLF3Db89KJX7kjfOfP2eoa01qc++GwPgufPg==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" + } + }, + "get-proxy": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/get-proxy/-/get-proxy-2.1.0.tgz", + "integrity": "sha512-zmZIaQTWnNQb4R4fJUEp/FC51eZsc6EkErspy3xtIYStaq8EB/hDIWipxsal+E8rz0qD7f2sL/NA9Xee4RInJw==", + "dev": true, + "optional": true, + "requires": { + "npm-conf": "^1.1.0" + } + }, + "get-stdin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", + "integrity": "sha1-uWjGsKBDhDJJAui/Gl3zJXmkUP4=", + "dev": true, + "optional": true + }, + "get-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", + "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", + "dev": true, + "optional": true + }, + "get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", + "dev": true + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "gifsicle": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/gifsicle/-/gifsicle-4.0.1.tgz", + "integrity": "sha512-A/kiCLfDdV+ERV/UB+2O41mifd+RxH8jlRG8DMxZO84Bma/Fw0htqZ+hY2iaalLRNyUu7tYZQslqUBJxBggxbg==", + "dev": true, + "optional": true, + "requires": { + "bin-build": "^3.0.0", + "bin-wrapper": "^4.0.0", + "execa": "^1.0.0", + "logalot": "^2.0.0" + }, + "dependencies": { + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "optional": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "execa": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "dev": true, + "optional": true, + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "optional": true, + "requires": { + "pump": "^3.0.0" + } + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true, + "optional": true + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "optional": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "optional": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true, + "optional": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "optional": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "glob": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/glob/-/glob-6.0.4.tgz", + "integrity": "sha1-DwiGD2oVUSey+t1PnOJLGqtuTSI=", + "dev": true, + "requires": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", + "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "glob-stream": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/glob-stream/-/glob-stream-6.1.0.tgz", + "integrity": "sha1-cEXJlBOz65SIjYOrRtC0BMx73eQ=", + "dev": true, + "requires": { + "extend": "^3.0.0", + "glob": "^7.1.1", + "glob-parent": "^3.1.0", + "is-negated-glob": "^1.0.0", + "ordered-read-streams": "^1.0.0", + "pumpify": "^1.3.5", + "readable-stream": "^2.1.5", + "remove-trailing-separator": "^1.0.1", + "to-absolute-glob": "^2.0.0", + "unique-stream": "^2.0.2" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", + "dev": true, + "requires": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + } + }, + "is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "requires": { + "is-extglob": "^2.1.0" + } + } + } + }, + "glob-watcher": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/glob-watcher/-/glob-watcher-5.0.5.tgz", + "integrity": "sha512-zOZgGGEHPklZNjZQaZ9f41i7F2YwE+tS5ZHrDhbBCk3stwahn5vQxnFmBJZHoYdusR6R1bLSXeGUy/BhctwKzw==", + "dev": true, + "requires": { + "anymatch": "^2.0.0", + "async-done": "^1.2.0", + "chokidar": "^2.0.0", + "is-negated-glob": "^1.0.0", + "just-debounce": "^1.0.0", + "normalize-path": "^3.0.0", + "object.defaults": "^1.1.0" + } + }, + "global": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz", + "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==", + "dev": true, + "requires": { + "min-document": "^2.19.0", + "process": "^0.11.10" + } + }, + "global-modules": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz", + "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==", + "dev": true, + "requires": { + "global-prefix": "^1.0.1", + "is-windows": "^1.0.1", + "resolve-dir": "^1.0.0" + } + }, + "global-prefix": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz", + "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.2", + "homedir-polyfill": "^1.0.1", + "ini": "^1.3.4", + "is-windows": "^1.0.1", + "which": "^1.2.14" + }, + "dependencies": { + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "dev": true, + "requires": { + "type-fest": "^0.8.1" + } + }, + "globby": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-10.0.2.tgz", + "integrity": "sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg==", + "dev": true, + "requires": { + "@types/glob": "^7.1.1", + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.0.3", + "glob": "^7.1.3", + "ignore": "^5.1.1", + "merge2": "^1.2.3", + "slash": "^3.0.0" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + } + } + }, + "globjoin": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/globjoin/-/globjoin-0.1.4.tgz", + "integrity": "sha1-L0SUrIkZ43Z8XLtpHp9GMyQoXUM=", + "dev": true + }, + "glogg": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/glogg/-/glogg-1.0.2.tgz", + "integrity": "sha512-5mwUoSuBk44Y4EshyiqcH95ZntbDdTQqA3QYSrxmzj28Ai0vXBGMH1ApSANH14j2sIRtqCEyg6PfsuP7ElOEDA==", + "dev": true, + "requires": { + "sparkles": "^1.0.0" + } + }, + "gonzales-pe": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/gonzales-pe/-/gonzales-pe-4.3.0.tgz", + "integrity": "sha512-otgSPpUmdWJ43VXyiNgEYE4luzHCL2pz4wQ0OnDluC6Eg4Ko3Vexy/SrSynglw/eR+OhkzmqFCZa/OFa/RgAOQ==", + "dev": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "got": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/got/-/got-7.1.0.tgz", + "integrity": "sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw==", + "dev": true, + "optional": true, + "requires": { + "decompress-response": "^3.2.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-plain-obj": "^1.1.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "isurl": "^1.0.0-alpha5", + "lowercase-keys": "^1.0.0", + "p-cancelable": "^0.3.0", + "p-timeout": "^1.1.1", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "url-parse-lax": "^1.0.0", + "url-to-options": "^1.0.1" + } + }, + "graceful-fs": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", + "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==", + "dev": true + }, + "gulp": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/gulp/-/gulp-4.0.2.tgz", + "integrity": "sha512-dvEs27SCZt2ibF29xYgmnwwCYZxdxhQ/+LFWlbAW8y7jt68L/65402Lz3+CKy0Ov4rOs+NERmDq7YlZaDqUIfA==", + "dev": true, + "requires": { + "glob-watcher": "^5.0.3", + "gulp-cli": "^2.2.0", + "undertaker": "^1.2.1", + "vinyl-fs": "^3.0.0" + }, + "dependencies": { + "gulp-cli": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/gulp-cli/-/gulp-cli-2.3.0.tgz", + "integrity": "sha512-zzGBl5fHo0EKSXsHzjspp3y5CONegCm8ErO5Qh0UzFzk2y4tMvzLWhoDokADbarfZRL2pGpRp7yt6gfJX4ph7A==", + "dev": true, + "requires": { + "ansi-colors": "^1.0.1", + "archy": "^1.0.0", + "array-sort": "^1.0.0", + "color-support": "^1.1.3", + "concat-stream": "^1.6.0", + "copy-props": "^2.0.1", + "fancy-log": "^1.3.2", + "gulplog": "^1.0.0", + "interpret": "^1.4.0", + "isobject": "^3.0.1", + "liftoff": "^3.1.0", + "matchdep": "^2.0.0", + "mute-stdout": "^1.0.0", + "pretty-hrtime": "^1.0.0", + "replace-homedir": "^1.0.0", + "semver-greatest-satisfied-range": "^1.1.0", + "v8flags": "^3.2.0", + "yargs": "^7.1.0" + } + } + } + }, + "gulp-concat": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/gulp-concat/-/gulp-concat-2.6.1.tgz", + "integrity": "sha1-Yz0WyV2IUEYorQJmVmPO5aR5M1M=", + "dev": true, + "requires": { + "concat-with-sourcemaps": "^1.0.0", + "through2": "^2.0.0", + "vinyl": "^2.0.0" + } + }, + "gulp-connect": { + "version": "5.7.0", + "resolved": "https://registry.npmjs.org/gulp-connect/-/gulp-connect-5.7.0.tgz", + "integrity": "sha512-8tRcC6wgXMLakpPw9M7GRJIhxkYdgZsXwn7n56BA2bQYGLR9NOPhMzx7js+qYDy6vhNkbApGKURjAw1FjY4pNA==", + "dev": true, + "requires": { + "ansi-colors": "^2.0.5", + "connect": "^3.6.6", + "connect-livereload": "^0.6.0", + "fancy-log": "^1.3.2", + "map-stream": "^0.0.7", + "send": "^0.16.2", + "serve-index": "^1.9.1", + "serve-static": "^1.13.2", + "tiny-lr": "^1.1.1" + }, + "dependencies": { + "ansi-colors": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-2.0.5.tgz", + "integrity": "sha512-yAdfUZ+c2wetVNIFsNRn44THW+Lty6S5TwMpUfLA/UaGhiXbBv/F8E60/1hMLd0cnF/CDoWH8vzVaI5bAcHCjw==", + "dev": true + } + } + }, + "gulp-eslint": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gulp-eslint/-/gulp-eslint-6.0.0.tgz", + "integrity": "sha512-dCVPSh1sA+UVhn7JSQt7KEb4An2sQNbOdB3PA8UCfxsoPlAKjJHxYHGXdXC7eb+V1FAnilSFFqslPrq037l1ig==", + "dev": true, + "requires": { + "eslint": "^6.0.0", + "fancy-log": "^1.3.2", + "plugin-error": "^1.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + } + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", + "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.10.0", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^5.0.0", + "eslint-utils": "^1.4.3", + "eslint-visitor-keys": "^1.1.0", + "espree": "^6.1.2", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^7.0.0", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.14", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.3", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^6.1.2", + "strip-ansi": "^5.2.0", + "strip-json-comments": "^3.0.1", + "table": "^5.2.3", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + } + }, + "eslint-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", + "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "espree": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", + "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "dev": true, + "requires": { + "acorn": "^7.1.1", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.1.0" + } + }, + "import-fresh": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dev": true, + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "dev": true + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "gulp-imagemin": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/gulp-imagemin/-/gulp-imagemin-6.2.0.tgz", + "integrity": "sha512-luHT+8kUz60KGzjJLUFzaPjl4b38UQLj8BJGkpJACRjiVEuzjohMOmLagkgXs+Rs4vYaUBr9tt1F/vLizaxgGg==", + "dev": true, + "requires": { + "chalk": "^2.4.1", + "fancy-log": "^1.3.2", + "imagemin": "^7.0.0", + "imagemin-gifsicle": "^6.0.1", + "imagemin-jpegtran": "^6.0.0", + "imagemin-optipng": "^7.0.0", + "imagemin-svgo": "^7.0.0", + "plugin-error": "^1.0.1", + "plur": "^3.0.1", + "pretty-bytes": "^5.3.0", + "through2-concurrent": "^2.0.0" + } + }, + "gulp-postcss": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/gulp-postcss/-/gulp-postcss-8.0.0.tgz", + "integrity": "sha512-Wtl6vH7a+8IS/fU5W9IbOpcaLqKxd5L1DUOzaPmlnCbX1CrG0aWdwVnC3Spn8th0m8D59YbysV5zPUe1n/GJYg==", + "dev": true, + "requires": { + "fancy-log": "^1.3.2", + "plugin-error": "^1.0.1", + "postcss": "^7.0.2", + "postcss-load-config": "^2.0.0", + "vinyl-sourcemaps-apply": "^0.2.1" + } + }, + "gulp-stylelint": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/gulp-stylelint/-/gulp-stylelint-13.0.0.tgz", + "integrity": "sha512-qFWBXnYDsGy6ttzqptctMZjJhhGc0FdFE+UNPlj/5fTyuUo5mfxcc7pzN4hIJnvB79BO1WikLtdtXuC/G2AhGA==", + "dev": true, + "requires": { + "chalk": "^3.0.0", + "fancy-log": "^1.3.3", + "plugin-error": "^1.0.1", + "source-map": "^0.7.3", + "strip-ansi": "^6.0.0", + "through2": "^3.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "source-map": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", + "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "through2": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/through2/-/through2-3.0.2.tgz", + "integrity": "sha512-enaDQ4MUyP2W6ZyT6EsMzqBPZaM/avg8iuo+l2d3QCs0J+6RaqkHV/2/lOwDTueBHeJ/2LG9lrLW3d5rWPucuQ==", + "dev": true, + "requires": { + "inherits": "^2.0.4", + "readable-stream": "2 || 3" + } + } + } + }, + "gulp-uglify": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/gulp-uglify/-/gulp-uglify-3.0.2.tgz", + "integrity": "sha512-gk1dhB74AkV2kzqPMQBLA3jPoIAPd/nlNzP2XMDSG8XZrqnlCiDGAqC+rZOumzFvB5zOphlFh6yr3lgcAb/OOg==", + "dev": true, + "requires": { + "array-each": "^1.0.1", + "extend-shallow": "^3.0.2", + "gulplog": "^1.0.0", + "has-gulplog": "^0.1.0", + "isobject": "^3.0.1", + "make-error-cause": "^1.1.1", + "safe-buffer": "^5.1.2", + "through2": "^2.0.0", + "uglify-js": "^3.0.5", + "vinyl-sourcemaps-apply": "^0.2.0" + } + }, + "gulp-vinyl-zip": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/gulp-vinyl-zip/-/gulp-vinyl-zip-2.2.1.tgz", + "integrity": "sha512-9lwCZUkrENzP649hVQB2r+8GgeGtVrqA2fEeVDX6aYr6+yJjdczWu0r1C6WvbZdzhXcA61MtR5MEyjR9a3D7cw==", + "dev": true, + "requires": { + "queue": "^4.2.1", + "through": "^2.3.8", + "through2": "^2.0.3", + "vinyl": "^2.0.2", + "vinyl-fs": "^3.0.3", + "yauzl": "^2.2.1", + "yazl": "^2.2.1" + } + }, + "gulplog": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gulplog/-/gulplog-1.0.0.tgz", + "integrity": "sha1-4oxNRdBey77YGDY86PnFkmIp/+U=", + "dev": true, + "requires": { + "glogg": "^1.0.0" + } + }, + "handlebars": { + "version": "4.7.6", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.6.tgz", + "integrity": "sha512-1f2BACcBfiwAfStCKZNrUCgqNZkGsAT7UM3kkYtXuLo0KnaVfjKOyf7PRzB6++aK9STyT1Pd2ZCPe3EGOXleXA==", + "dev": true, + "requires": { + "minimist": "^1.2.5", + "neo-async": "^2.6.0", + "source-map": "^0.6.1", + "uglify-js": "^3.1.4", + "wordwrap": "^1.0.0" + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "dev": true + }, + "har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "dev": true, + "requires": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + } + }, + "hard-rejection": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", + "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", + "dev": true + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "has-gulplog": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/has-gulplog/-/has-gulplog-0.1.0.tgz", + "integrity": "sha1-ZBTIKRNpfaUVkDl9r7EvIpZ4Ec4=", + "dev": true, + "requires": { + "sparkles": "^1.0.0" + } + }, + "has-symbol-support-x": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", + "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==", + "dev": true, + "optional": true + }, + "has-symbols": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", + "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", + "dev": true + }, + "has-to-string-tag-x": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", + "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", + "dev": true, + "optional": true, + "requires": { + "has-symbol-support-x": "^1.4.1" + } + }, + "has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "dev": true, + "requires": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + } + }, + "has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "hash-base": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", + "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", + "dev": true, + "requires": { + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true + } + } + }, + "hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "hex-color-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", + "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==", + "dev": true + }, + "highlight.js": { + "version": "9.18.5", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-9.18.5.tgz", + "integrity": "sha512-a5bFyofd/BHCX52/8i8uJkjr9DYwXIPnM/plwI6W7ezItLGqzt7X2G2nXuYSfsIJdkwwj/g9DG1LkcGJI/dDoA==", + "dev": true + }, + "hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", + "dev": true, + "requires": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "hogan.js": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz", + "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=", + "dev": true, + "requires": { + "mkdirp": "0.3.0", + "nopt": "1.0.10" + }, + "dependencies": { + "mkdirp": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz", + "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=", + "dev": true + } + } + }, + "homedir-polyfill": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz", + "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==", + "dev": true, + "requires": { + "parse-passwd": "^1.0.0" + } + }, + "hosted-git-info": { + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz", + "integrity": "sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==", + "dev": true + }, + "hsl-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", + "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=", + "dev": true + }, + "hsla-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", + "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=", + "dev": true + }, + "html-comment-regex": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/html-comment-regex/-/html-comment-regex-1.1.2.tgz", + "integrity": "sha512-P+M65QY2JQ5Y0G9KKdlDpo0zK+/OHptU5AaBwUfAIDJZk1MYf32Frm84EcOytfJE0t5JvkAnKlmjsXDnWzCJmQ==", + "dev": true + }, + "html-tags": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", + "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", + "dev": true + }, + "htmlescape": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/htmlescape/-/htmlescape-1.1.1.tgz", + "integrity": "sha1-OgPtwiFLyjtmQko+eVk0lQnLA1E=", + "dev": true + }, + "htmlparser2": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", + "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", + "dev": true, + "requires": { + "domelementtype": "^1.3.1", + "domhandler": "^2.3.0", + "domutils": "^1.5.1", + "entities": "^1.1.1", + "inherits": "^2.0.1", + "readable-stream": "^3.1.1" + }, + "dependencies": { + "entities": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", + "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==", + "dev": true + }, + "readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "http-cache-semantics": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", + "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==", + "dev": true, + "optional": true + }, + "http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", + "dev": true, + "requires": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + } + } + }, + "http-parser-js": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.2.tgz", + "integrity": "sha512-opCO9ASqg5Wy2FNo7A0sxy71yGbbkJJXLdgMK04Tcypw9jr2MgWbyubb0+WdmDmGnFflO7fRbqbaihh/ENDlRQ==", + "dev": true + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=", + "dev": true + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + }, + "imagemin": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/imagemin/-/imagemin-7.0.1.tgz", + "integrity": "sha512-33AmZ+xjZhg2JMCe+vDf6a9mzWukE7l+wAtesjE7KyteqqKjzxv7aVQeWnul1Ve26mWvEQqyPwl0OctNBfSR9w==", + "dev": true, + "requires": { + "file-type": "^12.0.0", + "globby": "^10.0.0", + "graceful-fs": "^4.2.2", + "junk": "^3.1.0", + "make-dir": "^3.0.0", + "p-pipe": "^3.0.0", + "replace-ext": "^1.0.0" + } + }, + "imagemin-gifsicle": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/imagemin-gifsicle/-/imagemin-gifsicle-6.0.1.tgz", + "integrity": "sha512-kuu47c6iKDQ6R9J10xCwL0lgs0+sMz3LRHqRcJ2CRBWdcNmo3T5hUaM8hSZfksptZXJLGKk8heSAvwtSdB1Fng==", + "dev": true, + "optional": true, + "requires": { + "exec-buffer": "^3.0.0", + "gifsicle": "^4.0.0", + "is-gif": "^3.0.0" + } + }, + "imagemin-jpegtran": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/imagemin-jpegtran/-/imagemin-jpegtran-6.0.0.tgz", + "integrity": "sha512-Ih+NgThzqYfEWv9t58EItncaaXIHR0u9RuhKa8CtVBlMBvY0dCIxgQJQCfwImA4AV1PMfmUKlkyIHJjb7V4z1g==", + "dev": true, + "optional": true, + "requires": { + "exec-buffer": "^3.0.0", + "is-jpg": "^2.0.0", + "jpegtran-bin": "^4.0.0" + } + }, + "imagemin-optipng": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/imagemin-optipng/-/imagemin-optipng-7.1.0.tgz", + "integrity": "sha512-JNORTZ6j6untH7e5gF4aWdhDCxe3ODsSLKs/f7Grewy3ebZpl1ZsU+VUTPY4rzeHgaFA8GSWOoA8V2M3OixWZQ==", + "dev": true, + "optional": true, + "requires": { + "exec-buffer": "^3.0.0", + "is-png": "^2.0.0", + "optipng-bin": "^6.0.0" + } + }, + "imagemin-svgo": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/imagemin-svgo/-/imagemin-svgo-7.1.0.tgz", + "integrity": "sha512-0JlIZNWP0Luasn1HT82uB9nU9aa+vUj6kpT+MjPW11LbprXC+iC4HDwn1r4Q2/91qj4iy9tRZNsFySMlEpLdpg==", + "dev": true, + "optional": true, + "requires": { + "is-svg": "^4.2.1", + "svgo": "^1.3.2" + }, + "dependencies": { + "is-svg": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-4.2.1.tgz", + "integrity": "sha512-PHx3ANecKsKNl5y5+Jvt53Y4J7MfMpbNZkv384QNiswMKAWIbvcqbPz+sYbFKJI8Xv3be01GSFniPmoaP+Ai5A==", + "dev": true, + "optional": true, + "requires": { + "html-comment-regex": "^1.1.2" + } + } + } + }, + "immediate": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==", + "dev": true + }, + "import-cwd": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz", + "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=", + "dev": true, + "requires": { + "import-from": "^2.1.0" + } + }, + "import-fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", + "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=", + "dev": true, + "requires": { + "caller-path": "^2.0.0", + "resolve-from": "^3.0.0" + } + }, + "import-from": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz", + "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=", + "dev": true, + "requires": { + "resolve-from": "^3.0.0" + } + }, + "import-lazy": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-3.1.0.tgz", + "integrity": "sha512-8/gvXvX2JMn0F+CDlSC4l6kOmVaLOO3XLkksI7CI3Ud95KDYJuYur2b9P/PUt/i/pDAMd/DulQsNbbbmRRsDIQ==", + "dev": true, + "optional": true + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true + }, + "indent-string": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", + "integrity": "sha1-ji1INIdCEhtKghi3oTfppSBJ3IA=", + "dev": true, + "optional": true, + "requires": { + "repeating": "^2.0.0" + } + }, + "indexes-of": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", + "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "dev": true + }, + "inline-source-map": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/inline-source-map/-/inline-source-map-0.6.2.tgz", + "integrity": "sha1-+Tk0ccGKedFyT4Y/o4tYY3Ct4qU=", + "dev": true, + "requires": { + "source-map": "~0.5.3" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "inquirer": { + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz", + "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==", + "dev": true, + "requires": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.19", + "mute-stream": "0.0.8", + "run-async": "^2.4.0", + "rxjs": "^6.6.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "insert-module-globals": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/insert-module-globals/-/insert-module-globals-7.2.1.tgz", + "integrity": "sha512-ufS5Qq9RZN+Bu899eA9QCAYThY+gGW7oRkmb0vC93Vlyu/CFGcH0OYPEjVkDXA5FEbTt1+VWzdoOD3Ny9N+8tg==", + "dev": true, + "requires": { + "JSONStream": "^1.0.3", + "acorn-node": "^1.5.2", + "combine-source-map": "^0.8.0", + "concat-stream": "^1.6.1", + "is-buffer": "^1.1.0", + "path-is-absolute": "^1.0.1", + "process": "~0.11.0", + "through2": "^2.0.0", + "undeclared-identifiers": "^1.1.2", + "xtend": "^4.0.0" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + } + } + }, + "interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "dev": true + }, + "into-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", + "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=", + "dev": true, + "optional": true, + "requires": { + "from2": "^2.1.1", + "p-is-promise": "^1.1.0" + } + }, + "invert-kv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", + "dev": true + }, + "ip-regex": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.2.0.tgz", + "integrity": "sha512-n5cDDeTWWRwK1EBoWwRti+8nP4NbytBBY0pldmnIkq6Z55KNFmWofh4rl9dPZpj+U/nVq7gweR3ylrvMt4YZ5A==", + "dev": true + }, + "irregular-plurals": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/irregular-plurals/-/irregular-plurals-2.0.0.tgz", + "integrity": "sha512-Y75zBYLkh0lJ9qxeHlMjQ7bSbyiSqNW/UOPWDmzC7cXskL1hekSITh1Oc6JV0XCWWZ9DE8VYSB71xocLk3gmGw==", + "dev": true + }, + "is-absolute": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", + "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==", + "dev": true, + "requires": { + "is-relative": "^1.0.0", + "is-windows": "^1.0.1" + } + }, + "is-absolute-url": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", + "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=", + "dev": true + }, + "is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "dev": true + }, + "is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dev": true, + "requires": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dev": true, + "requires": { + "binary-extensions": "^1.0.0" + } + }, + "is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "dev": true + }, + "is-callable": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.2.tgz", + "integrity": "sha512-dnMqspv5nU3LoewK2N/y7KLtxtakvTuaCsU9FU50/QDmdbHNy/4/JuRtMHqRU22o3q+W89YQndQEeCVwK+3qrA==", + "dev": true + }, + "is-color-stop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", + "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=", + "dev": true, + "requires": { + "css-color-names": "^0.0.4", + "hex-color-regex": "^1.1.0", + "hsl-regex": "^1.0.0", + "hsla-regex": "^1.0.0", + "rgb-regex": "^1.0.1", + "rgba-regex": "^1.0.0" + } + }, + "is-core-module": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", + "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", + "dev": true, + "requires": { + "has": "^1.0.3" + } + }, + "is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-date-object": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", + "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", + "dev": true + }, + "is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "dev": true + }, + "is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "dev": true + } + } + }, + "is-directory": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", + "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=", + "dev": true + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "dev": true + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true + }, + "is-finite": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", + "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==", + "dev": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "is-gif": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-gif/-/is-gif-3.0.0.tgz", + "integrity": "sha512-IqJ/jlbw5WJSNfwQ/lHEDXF8rxhRgF6ythk2oiEvhpG29F704eX9NO6TvPfMiq9DrbwgcEDnETYNcZDPewQoVw==", + "dev": true, + "optional": true, + "requires": { + "file-type": "^10.4.0" + }, + "dependencies": { + "file-type": { + "version": "10.11.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-10.11.0.tgz", + "integrity": "sha512-uzk64HRpUZyTGZtVuvrjP0FYxzQrBf4rojot6J65YMEbwBLB0CWm0CLojVpwpmFmxcE/lkvYICgfcGozbBq6rw==", + "dev": true, + "optional": true + } + } + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "dev": true + }, + "is-jpg": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-jpg/-/is-jpg-2.0.0.tgz", + "integrity": "sha1-LhmX+m6RZuqsAkLarkQ0A+TvHZc=", + "dev": true, + "optional": true + }, + "is-natural-number": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-natural-number/-/is-natural-number-4.0.1.tgz", + "integrity": "sha1-q5124dtM7VHjXeDHLr7PCfc0zeg=", + "dev": true, + "optional": true + }, + "is-negated-glob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-negated-glob/-/is-negated-glob-1.0.0.tgz", + "integrity": "sha1-aRC8pdqMleeEtXUbl2z1oQ/uNtI=", + "dev": true + }, + "is-negative-zero": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.0.tgz", + "integrity": "sha1-lVOxIbD6wohp2p7UWeIMdUN4hGE=", + "dev": true + }, + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "dev": true + }, + "is-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz", + "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=", + "dev": true, + "optional": true + }, + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", + "dev": true + }, + "is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true + }, + "is-png": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-png/-/is-png-2.0.0.tgz", + "integrity": "sha512-4KPGizaVGj2LK7xwJIz8o5B2ubu1D/vcQsgOGFEDlpcvgZHto4gBnyd0ig7Ws+67ixmwKoNmu0hYnpo6AaKb5g==", + "dev": true, + "optional": true + }, + "is-regex": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.1.tgz", + "integrity": "sha512-1+QkEcxiLlB7VEyFtyBg94e08OAsvq7FUBgApTq/w2ymCLyKJgDPsybBENVtA7XCQEgEXxKPonG+mvYRxh/LIg==", + "dev": true, + "requires": { + "has-symbols": "^1.0.1" + } + }, + "is-regexp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-2.1.0.tgz", + "integrity": "sha512-OZ4IlER3zmRIoB9AqNhEggVxqIH4ofDns5nRrPS6yQxXE1TPCUpFznBfRQmQa8uC+pXqjMnukiJBxCisIxiLGA==", + "dev": true + }, + "is-relative": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", + "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==", + "dev": true, + "requires": { + "is-unc-path": "^1.0.0" + } + }, + "is-resolvable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", + "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==", + "dev": true + }, + "is-retry-allowed": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", + "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==", + "dev": true, + "optional": true + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true, + "optional": true + }, + "is-string": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", + "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", + "dev": true + }, + "is-svg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-3.0.0.tgz", + "integrity": "sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ==", + "dev": true, + "requires": { + "html-comment-regex": "^1.1.0" + } + }, + "is-symbol": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", + "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "dev": true, + "requires": { + "has-symbols": "^1.0.1" + } + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "is-unc-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz", + "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==", + "dev": true, + "requires": { + "unc-path-regex": "^0.1.2" + } + }, + "is-url-superb": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-url-superb/-/is-url-superb-3.0.0.tgz", + "integrity": "sha512-3faQP+wHCGDQT1qReM5zCPx2mxoal6DzbzquFlCYJLWyy4WPTved33ea2xFbX37z4NoriEwZGIYhFtx8RUB5wQ==", + "dev": true, + "requires": { + "url-regex": "^5.0.0" + } + }, + "is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=", + "dev": true + }, + "is-valid-glob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-valid-glob/-/is-valid-glob-1.0.0.tgz", + "integrity": "sha1-Kb8+/3Ab4tTTFdusw5vDn+j2Aao=", + "dev": true + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "dev": true + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", + "dev": true + }, + "isurl": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", + "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", + "dev": true, + "optional": true, + "requires": { + "has-to-string-tag-x": "^1.2.0", + "is-object": "^1.0.1" + } + }, + "jpegtran-bin": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jpegtran-bin/-/jpegtran-bin-4.0.0.tgz", + "integrity": "sha512-2cRl1ism+wJUoYAYFt6O/rLBfpXNWG2dUWbgcEkTt5WGMnqI46eEro8T4C5zGROxKRqyKpCBSdHPvt5UYCtxaQ==", + "dev": true, + "optional": true, + "requires": { + "bin-build": "^3.0.0", + "bin-wrapper": "^4.0.0", + "logalot": "^2.0.0" + } + }, + "jquery": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.5.1.tgz", + "integrity": "sha512-XwIBPqcMn57FxfT+Go5pzySnm4KWkT1Tv7gjrpT1srtf8Weynl6R273VJ5GjkRb51IzMp5nbaPjJXMWeju2MKg==", + "dev": true + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "dev": true + }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true + }, + "json-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", + "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=", + "dev": true, + "optional": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json-stable-stringify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-0.0.1.tgz", + "integrity": "sha1-YRwj6BTbN1Un34URk9tZ3Sryf0U=", + "dev": true, + "requires": { + "jsonify": "~0.0.0" + } + }, + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + } + }, + "jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + }, + "dependencies": { + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "dev": true + } + } + }, + "jsonify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", + "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", + "dev": true + }, + "jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", + "dev": true + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "junk": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/junk/-/junk-3.1.0.tgz", + "integrity": "sha512-pBxcB3LFc8QVgdggvZWyeys+hnrNWg4OcZIU/1X59k5jQdLBlCsYGRQaz234SqoRLTCgMH00fY0xRJH+F9METQ==", + "dev": true + }, + "just-debounce": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/just-debounce/-/just-debounce-1.0.0.tgz", + "integrity": "sha1-h/zPrv/AtozRnVX2cilD+SnqNeo=", + "dev": true + }, + "keyv": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", + "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", + "dev": true, + "optional": true, + "requires": { + "json-buffer": "3.0.0" + } + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true + }, + "known-css-properties": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.19.0.tgz", + "integrity": "sha512-eYboRV94Vco725nKMlpkn3nV2+96p9c3gKXRsYqAJSswSENvBhN7n5L+uDhY58xQa0UukWsDMTGELzmD8Q+wTA==", + "dev": true + }, + "labeled-stream-splicer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/labeled-stream-splicer/-/labeled-stream-splicer-2.0.2.tgz", + "integrity": "sha512-Ca4LSXFFZUjPScRaqOcFxneA0VpKZr4MMYCljyQr4LIewTLb3Y0IUTIsnBBsVubIeEfxeSZpSjSsRM8APEQaAw==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "stream-splicer": "^2.0.0" + } + }, + "last-run": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/last-run/-/last-run-1.1.1.tgz", + "integrity": "sha1-RblpQsF7HHnHchmCWbqUO+v4yls=", + "dev": true, + "requires": { + "default-resolution": "^2.0.0", + "es6-weak-map": "^2.0.1" + } + }, + "lazystream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.0.tgz", + "integrity": "sha1-9plf4PggOS9hOWvolGJAe7dxaOQ=", + "dev": true, + "requires": { + "readable-stream": "^2.0.5" + } + }, + "lcid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", + "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "dev": true, + "requires": { + "invert-kv": "^1.0.0" + } + }, + "lead": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lead/-/lead-1.0.0.tgz", + "integrity": "sha1-bxT5mje+Op3XhPVJVpDlkDRm7kI=", + "dev": true, + "requires": { + "flush-write-stream": "^1.0.2" + } + }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true + }, + "levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + } + }, + "liftoff": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/liftoff/-/liftoff-3.1.0.tgz", + "integrity": "sha512-DlIPlJUkCV0Ips2zf2pJP0unEoT1kwYhiiPUGF3s/jtxTCjziNLoiVVh+jqWOWeFi6mmwQ5fNxvAUyPad4Dfog==", + "dev": true, + "requires": { + "extend": "^3.0.0", + "findup-sync": "^3.0.0", + "fined": "^1.0.1", + "flagged-respawn": "^1.0.0", + "is-plain-object": "^2.0.4", + "object.map": "^1.0.0", + "rechoir": "^0.6.2", + "resolve": "^1.1.7" + }, + "dependencies": { + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "lines-and-columns": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", + "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", + "dev": true + }, + "livereload-js": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/livereload-js/-/livereload-js-2.4.0.tgz", + "integrity": "sha512-XPQH8Z2GDP/Hwz2PCDrh2mth4yFejwA1OZ/81Ti3LgKyhDcEjsSsqFWZojHG0va/duGd+WyosY7eXLDoOyqcPw==", + "dev": true + }, + "load-json-file": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", + "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "strip-bom": "^3.0.0" + }, + "dependencies": { + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "requires": { + "error-ex": "^1.2.0" + } + } + } + }, + "load-script": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", + "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=", + "dev": true + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==", + "dev": true + }, + "lodash.memoize": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-3.0.4.tgz", + "integrity": "sha1-LcvSwofLwKVcxCMovQxzYVDVPj8=", + "dev": true + }, + "lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=", + "dev": true + }, + "log-symbols": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", + "dev": true, + "requires": { + "chalk": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "logalot": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/logalot/-/logalot-2.1.0.tgz", + "integrity": "sha1-X46MkNME7fElMJUaVVSruMXj9VI=", + "dev": true, + "optional": true, + "requires": { + "figures": "^1.3.5", + "squeak": "^1.0.0" + }, + "dependencies": { + "figures": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", + "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", + "dev": true, + "optional": true, + "requires": { + "escape-string-regexp": "^1.0.5", + "object-assign": "^4.1.0" + } + } + } + }, + "loglevel": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", + "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==", + "dev": true + }, + "loglevel-colored-level-prefix": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/loglevel-colored-level-prefix/-/loglevel-colored-level-prefix-1.0.0.tgz", + "integrity": "sha1-akAhj9x64V/HbD0PPmdsRlOIYD4=", + "dev": true, + "requires": { + "chalk": "^1.1.3", + "loglevel": "^1.4.1" + }, + "dependencies": { + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + } + } + }, + "longest": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", + "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=", + "dev": true, + "optional": true + }, + "longest-streak": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-2.0.4.tgz", + "integrity": "sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==", + "dev": true + }, + "loud-rejection": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", + "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", + "dev": true, + "optional": true, + "requires": { + "currently-unhandled": "^0.4.1", + "signal-exit": "^3.0.0" + } + }, + "lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "dev": true, + "optional": true + }, + "lpad-align": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/lpad-align/-/lpad-align-1.1.2.tgz", + "integrity": "sha1-IfYArBwwlcPG5JfuZyce4ISB/p4=", + "dev": true, + "optional": true, + "requires": { + "get-stdin": "^4.0.1", + "indent-string": "^2.1.0", + "longest": "^1.0.0", + "meow": "^3.3.0" + } + }, + "lru-cache": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", + "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", + "dev": true, + "optional": true, + "requires": { + "pseudomap": "^1.0.2", + "yallist": "^2.1.2" + } + }, + "magic-string": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.23.2.tgz", + "integrity": "sha512-oIUZaAxbcxYIp4AyLafV6OVKoB3YouZs0UTCJ8mOKBHNyJgGDaMJ4TgA+VylJh6fx7EQCC52XkbURxxG9IoJXA==", + "dev": true, + "requires": { + "sourcemap-codec": "^1.4.1" + } + }, + "make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "requires": { + "semver": "^6.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "make-error-cause": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/make-error-cause/-/make-error-cause-1.2.2.tgz", + "integrity": "sha1-3wOI/NCzeBbf8KX7gQiTl3fcvJ0=", + "dev": true, + "requires": { + "make-error": "^1.2.0" + } + }, + "make-iterator": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/make-iterator/-/make-iterator-1.0.1.tgz", + "integrity": "sha512-pxiuXh0iVEq7VM7KMIhs5gxsfxCux2URptUQaXo4iZZJxBAzTPOLE2BumO5dbfVYq/hBJFBR/a1mFDmOx5AGmw==", + "dev": true, + "requires": { + "kind-of": "^6.0.2" + } + }, + "map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", + "dev": true + }, + "map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "dev": true + }, + "map-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/map-stream/-/map-stream-0.0.7.tgz", + "integrity": "sha1-ih8HiW2CsQkmvTdEokIACfiJdKg=", + "dev": true + }, + "map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "dev": true, + "requires": { + "object-visit": "^1.0.0" + } + }, + "mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha1-GA8fnr74sOY45BZq1S24eb6y/8U=", + "dev": true + }, + "matchdep": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/matchdep/-/matchdep-2.0.0.tgz", + "integrity": "sha1-xvNINKDY28OzfCfui7yyfHd1WC4=", + "dev": true, + "requires": { + "findup-sync": "^2.0.0", + "micromatch": "^3.0.4", + "resolve": "^1.4.0", + "stack-trace": "0.0.10" + }, + "dependencies": { + "findup-sync": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-2.0.0.tgz", + "integrity": "sha1-kyaxSIwi0aYIhlCoaQGy2akKLLw=", + "dev": true, + "requires": { + "detect-file": "^1.0.0", + "is-glob": "^3.1.0", + "micromatch": "^3.0.4", + "resolve-dir": "^1.0.1" + } + }, + "is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "requires": { + "is-extglob": "^2.1.0" + } + } + } + }, + "mathml-tag-names": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", + "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", + "dev": true + }, + "md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "mdast-util-from-markdown": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.1.tgz", + "integrity": "sha512-qJXNcFcuCSPqUF0Tb0uYcFDIq67qwB3sxo9RPdf9vG8T90ViKnksFqdB/Coq2a7sTnxL/Ify2y7aIQXDkQFH0w==", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-string": "^1.0.0", + "micromark": "~2.10.0", + "parse-entities": "^2.0.0" + } + }, + "mdast-util-to-markdown": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-0.5.4.tgz", + "integrity": "sha512-0jQTkbWYx0HdEA/h++7faebJWr5JyBoBeiRf0u3F4F3QtnyyGaWIsOwo749kRb1ttKrLLr+wRtOkfou9yB0p6A==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "longest-streak": "^2.0.0", + "mdast-util-to-string": "^2.0.0", + "parse-entities": "^2.0.0", + "repeat-string": "^1.0.0", + "zwitch": "^1.0.0" + }, + "dependencies": { + "mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true + } + } + }, + "mdast-util-to-string": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-1.1.0.tgz", + "integrity": "sha512-jVU0Nr2B9X3MU4tSK7JP1CMkSvOj7X5l/GboG1tKRw52lLF1x2Ju92Ms9tNetCcbfX3hzlM73zYo2NKkWSfF/A==", + "dev": true + }, + "mdn-data": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", + "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==", + "dev": true + }, + "meow": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", + "integrity": "sha1-cstmi0JSKCkKu/qFaJJYcwioAfs=", + "dev": true, + "optional": true, + "requires": { + "camelcase-keys": "^2.0.0", + "decamelize": "^1.1.2", + "loud-rejection": "^1.0.0", + "map-obj": "^1.0.1", + "minimist": "^1.1.3", + "normalize-package-data": "^2.3.4", + "object-assign": "^4.0.1", + "read-pkg-up": "^1.0.1", + "redent": "^1.0.0", + "trim-newlines": "^1.0.0" + }, + "dependencies": { + "find-up": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", + "integrity": "sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=", + "dev": true, + "optional": true, + "requires": { + "path-exists": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "load-json-file": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", + "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", + "dev": true, + "optional": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0", + "strip-bom": "^2.0.0" + } + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "optional": true, + "requires": { + "error-ex": "^1.2.0" + } + }, + "path-exists": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", + "integrity": "sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=", + "dev": true, + "optional": true, + "requires": { + "pinkie-promise": "^2.0.0" + } + }, + "path-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", + "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", + "dev": true, + "optional": true, + "requires": { + "graceful-fs": "^4.1.2", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "read-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", + "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", + "dev": true, + "optional": true, + "requires": { + "load-json-file": "^1.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^1.0.0" + } + }, + "read-pkg-up": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", + "integrity": "sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI=", + "dev": true, + "optional": true, + "requires": { + "find-up": "^1.0.0", + "read-pkg": "^1.0.0" + } + }, + "strip-bom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", + "dev": true, + "optional": true, + "requires": { + "is-utf8": "^0.2.0" + } + } + } + }, + "merge-source-map": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.0.4.tgz", + "integrity": "sha1-pd5GU42uhNQRTMXqArR3KmNGcB8=", + "dev": true, + "requires": { + "source-map": "^0.5.6" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true + }, + "micromark": { + "version": "2.10.1", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-2.10.1.tgz", + "integrity": "sha512-fUuVF8sC1X7wsCS29SYQ2ZfIZYbTymp0EYr6sab3idFjigFFjGa5UwoniPlV9tAgntjuapW1t9U+S0yDYeGKHQ==", + "dev": true, + "requires": { + "debug": "^4.0.0", + "parse-entities": "^2.0.0" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + } + }, + "miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "mime": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", + "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==", + "dev": true + }, + "mime-db": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", + "dev": true + }, + "mime-types": { + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", + "dev": true, + "requires": { + "mime-db": "1.44.0" + } + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + }, + "mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "dev": true, + "optional": true + }, + "min-document": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", + "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=", + "dev": true, + "requires": { + "dom-walk": "^0.1.0" + } + }, + "min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true + }, + "minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "dev": true + }, + "minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "minimist-options": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", + "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", + "dev": true, + "requires": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0", + "kind-of": "^6.0.3" + } + }, + "mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "dev": true, + "requires": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "dev": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "dev": true + }, + "module-deps": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/module-deps/-/module-deps-6.2.3.tgz", + "integrity": "sha512-fg7OZaQBcL4/L+AK5f4iVqf9OMbCclXfy/znXRxTVhJSeW5AIlS9AwheYwDaXM3lVW7OBeaeUEY3gbaC6cLlSA==", + "dev": true, + "requires": { + "JSONStream": "^1.0.3", + "browser-resolve": "^2.0.0", + "cached-path-relative": "^1.0.2", + "concat-stream": "~1.6.0", + "defined": "^1.0.0", + "detective": "^5.2.0", + "duplexer2": "^0.1.2", + "inherits": "^2.0.1", + "parents": "^1.0.0", + "readable-stream": "^2.0.2", + "resolve": "^1.4.0", + "stream-combiner2": "^1.1.1", + "subarg": "^1.0.0", + "through2": "^2.0.0", + "xtend": "^4.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "mute-stdout": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mute-stdout/-/mute-stdout-1.0.1.tgz", + "integrity": "sha512-kDcwXR4PS7caBpuRYYBUz9iVixUk3anO3f5OYFiIPwK/20vCzKCHyKoulbiDY1S53zD2bxUpxN/IJ+TnXjfvxg==", + "dev": true + }, + "mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "dev": true + }, + "mutexify": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/mutexify/-/mutexify-1.3.1.tgz", + "integrity": "sha512-nU7mOEuaXiQIB/EgTIjYZJ7g8KqMm2D8l4qp+DqA4jxWOb/tnb1KEoqp+tlbdQIDIAiC1i7j7X/3yHDFXLxr9g==", + "dev": true + }, + "nan": { + "version": "2.14.2", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", + "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", + "dev": true, + "optional": true + }, + "nanobench": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nanobench/-/nanobench-2.1.1.tgz", + "integrity": "sha512-z+Vv7zElcjN+OpzAxAquUayFLGK3JI/ubCl0Oh64YQqsTGG09CGqieJVQw4ui8huDnnAgrvTv93qi5UaOoNj8A==", + "dev": true, + "requires": { + "browser-process-hrtime": "^0.1.2", + "chalk": "^1.1.3", + "mutexify": "^1.1.0", + "pretty-hrtime": "^1.0.2" + }, + "dependencies": { + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + } + } + }, + "nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + } + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true + }, + "negotiator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", + "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", + "dev": true + }, + "neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true + }, + "next-tick": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=", + "dev": true + }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true + }, + "node-fetch": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", + "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", + "dev": true + }, + "node-releases": { + "version": "1.1.67", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.67.tgz", + "integrity": "sha512-V5QF9noGFl3EymEwUYzO+3NTDpGfQB4ve6Qfnzf3UNydMhjQRVPR1DZTuvWiLzaFJYw2fmDwAfnRNEVb64hSIg==", + "dev": true + }, + "nopt": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", + "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", + "dev": true, + "requires": { + "abbrev": "1" + } + }, + "normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", + "dev": true + }, + "normalize-selector": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/normalize-selector/-/normalize-selector-0.2.0.tgz", + "integrity": "sha1-0LFF62kRicY6eNIB3E/bEpPvDAM=", + "dev": true + }, + "normalize-url": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", + "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", + "dev": true + }, + "now-and-later": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/now-and-later/-/now-and-later-2.0.1.tgz", + "integrity": "sha512-KGvQ0cB70AQfg107Xvs/Fbu+dGmZoTRJp2TaPwcwQm3/7PteUyN2BCgk8KBMPGBUXZdVwyWS8fDCGFygBm19UQ==", + "dev": true, + "requires": { + "once": "^1.3.2" + } + }, + "npm-conf": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/npm-conf/-/npm-conf-1.1.3.tgz", + "integrity": "sha512-Yic4bZHJOt9RCFbRP3GgpqhScOY4HH3V2P8yBj6CeYq118Qr+BLXqT2JvpJ00mryLESpgOxf5XlFv4ZjXxLScw==", + "dev": true, + "optional": true, + "requires": { + "config-chain": "^1.1.11", + "pify": "^3.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true, + "optional": true + } + } + }, + "npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "dev": true, + "optional": true, + "requires": { + "path-key": "^2.0.0" + }, + "dependencies": { + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true, + "optional": true + } + } + }, + "nth-check": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", + "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", + "dev": true, + "requires": { + "boolbase": "~1.0.0" + } + }, + "num2fraction": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", + "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=", + "dev": true + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", + "dev": true, + "requires": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "object-inspect": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.8.0.tgz", + "integrity": "sha512-jLdtEOB112fORuypAyl/50VRVIBIdVQOSUUGQHzJ4xBSbit81zRarz7GThkEFZy1RceYrWYcPcBFPQwHyAc1gA==", + "dev": true + }, + "object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true + }, + "object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", + "dev": true, + "requires": { + "isobject": "^3.0.0" + } + }, + "object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + } + }, + "object.defaults": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/object.defaults/-/object.defaults-1.1.0.tgz", + "integrity": "sha1-On+GgzS0B96gbaFtiNXNKeQ1/s8=", + "dev": true, + "requires": { + "array-each": "^1.0.1", + "array-slice": "^1.0.0", + "for-own": "^1.0.0", + "isobject": "^3.0.0" + } + }, + "object.getownpropertydescriptors": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz", + "integrity": "sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.0-next.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } + } + }, + "object.map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object.map/-/object.map-1.0.1.tgz", + "integrity": "sha1-z4Plncj8wK1fQlDh94s7gb2AHTc=", + "dev": true, + "requires": { + "for-own": "^1.0.0", + "make-iterator": "^1.0.0" + } + }, + "object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "object.reduce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object.reduce/-/object.reduce-1.0.1.tgz", + "integrity": "sha1-b+NI8qx/oPlcpiEiZZkJaCW7A60=", + "dev": true, + "requires": { + "for-own": "^1.0.0", + "make-iterator": "^1.0.0" + } + }, + "object.values": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.2.tgz", + "integrity": "sha512-MYC0jvJopr8EK6dPBiO8Nb9mvjdypOachO5REGk6MXzujbBrAisKo3HmdEI6kZDL6fC31Mwee/5YbtMebixeag==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "es-abstract": "^1.18.0-next.1", + "has": "^1.0.3" + } + }, + "on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "dev": true, + "requires": { + "ee-first": "1.1.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "opal-runtime": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/opal-runtime/-/opal-runtime-1.0.11.tgz", + "integrity": "sha512-L+6pnRvXPlDtbamBRnJAnB9mEMXmsIQ/b+0r/2xJ5/n/nxheEkLo+Pm5QNQ08LEbEN9TI6/kedhIspqRRu6tXA==", + "dev": true, + "requires": { + "glob": "6.0.4", + "xmlhttprequest": "1.8.0" + } + }, + "optionator": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "dev": true, + "requires": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" + } + }, + "optipng-bin": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/optipng-bin/-/optipng-bin-6.0.0.tgz", + "integrity": "sha512-95bB4y8IaTsa/8x6QH4bLUuyvyOoGBCLDA7wOgDL8UFqJpSUh1Hob8JRJhit+wC1ZLN3tQ7mFt7KuBj0x8F2Wg==", + "dev": true, + "optional": true, + "requires": { + "bin-build": "^3.0.0", + "bin-wrapper": "^4.0.0", + "logalot": "^2.0.0" + } + }, + "ordered-read-streams": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ordered-read-streams/-/ordered-read-streams-1.0.1.tgz", + "integrity": "sha1-d8DLN8QVJdZBZtmQ/61+xqDhNj4=", + "dev": true, + "requires": { + "readable-stream": "^2.0.1" + } + }, + "os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=", + "dev": true + }, + "os-filter-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/os-filter-obj/-/os-filter-obj-2.0.0.tgz", + "integrity": "sha512-uksVLsqG3pVdzzPvmAHpBK0wKxYItuzZr7SziusRPoz67tGV8rL1szZ6IdeUrbqLjGDwApBtN29eEE3IqGHOjg==", + "dev": true, + "optional": true, + "requires": { + "arch": "^2.1.0" + } + }, + "os-locale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", + "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", + "dev": true, + "requires": { + "lcid": "^1.0.0" + } + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "p-cancelable": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.3.0.tgz", + "integrity": "sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw==", + "dev": true, + "optional": true + }, + "p-event": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-event/-/p-event-1.3.0.tgz", + "integrity": "sha1-jmtPT2XHK8W2/ii3XtqHT5akoIU=", + "dev": true, + "optional": true, + "requires": { + "p-timeout": "^1.1.1" + } + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", + "dev": true, + "optional": true + }, + "p-is-promise": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", + "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=", + "dev": true, + "optional": true + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-map-series": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-map-series/-/p-map-series-1.0.0.tgz", + "integrity": "sha1-v5j+V1cFZYqeE1G++4WuTB8Hvco=", + "dev": true, + "optional": true, + "requires": { + "p-reduce": "^1.0.0" + } + }, + "p-pipe": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-pipe/-/p-pipe-3.1.0.tgz", + "integrity": "sha512-08pj8ATpzMR0Y80x50yJHn37NF6vjrqHutASaX5LiH5npS9XPvrUmscd9MF5R4fuYRHOxQR1FfMIlF7AzwoPqw==", + "dev": true + }, + "p-reduce": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-1.0.0.tgz", + "integrity": "sha1-GMKw3ZNqRpClKfgjH1ig/bakffo=", + "dev": true, + "optional": true + }, + "p-timeout": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-1.2.1.tgz", + "integrity": "sha1-XrOzU7f86Z8QGhA4iAuwVOu+o4Y=", + "dev": true, + "optional": true, + "requires": { + "p-finally": "^1.0.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true + }, + "parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "requires": { + "callsites": "^3.0.0" + }, + "dependencies": { + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true + } + } + }, + "parents": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parents/-/parents-1.0.1.tgz", + "integrity": "sha1-/t1NK/GTp3dF/nHjcdc8MwfZx1E=", + "dev": true, + "requires": { + "path-platform": "~0.11.15" + } + }, + "parse-asn1": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", + "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", + "dev": true, + "requires": { + "asn1.js": "^5.2.0", + "browserify-aes": "^1.0.0", + "evp_bytestokey": "^1.0.0", + "pbkdf2": "^3.0.3", + "safe-buffer": "^5.1.1" + } + }, + "parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dev": true, + "requires": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "parse-filepath": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/parse-filepath/-/parse-filepath-1.0.2.tgz", + "integrity": "sha1-pjISf1Oq89FYdvWHLz/6x2PWyJE=", + "dev": true, + "requires": { + "is-absolute": "^1.0.0", + "map-cache": "^0.2.0", + "path-root": "^0.1.1" + } + }, + "parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "dev": true, + "requires": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + } + }, + "parse-node-version": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parse-node-version/-/parse-node-version-1.0.1.tgz", + "integrity": "sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==", + "dev": true + }, + "parse-passwd": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz", + "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=", + "dev": true + }, + "parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "dev": true + }, + "pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", + "dev": true + }, + "path-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==", + "dev": true + }, + "path-dirname": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", + "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "path-parse": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", + "dev": true + }, + "path-platform": { + "version": "0.11.15", + "resolved": "https://registry.npmjs.org/path-platform/-/path-platform-0.11.15.tgz", + "integrity": "sha1-6GQhf3TDaFDwhSt43Hv31KVyG/I=", + "dev": true + }, + "path-root": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/path-root/-/path-root-0.1.1.tgz", + "integrity": "sha1-mkpoFMrBwM1zNgqV8yCDyOpHRbc=", + "dev": true, + "requires": { + "path-root-regex": "^0.1.0" + } + }, + "path-root-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/path-root-regex/-/path-root-regex-0.1.2.tgz", + "integrity": "sha1-v8zcjfWxLcUsi0PsONGNcsBLqW0=", + "dev": true + }, + "path-type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", + "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", + "dev": true, + "requires": { + "pify": "^2.0.0" + } + }, + "pbkdf2": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz", + "integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==", + "dev": true, + "requires": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", + "dev": true + }, + "picomatch": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", + "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", + "dev": true + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "dev": true + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true, + "requires": { + "pinkie": "^2.0.0" + } + }, + "pkg-dir": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", + "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", + "dev": true, + "requires": { + "find-up": "^2.1.0" + } + }, + "plugin-error": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/plugin-error/-/plugin-error-1.0.1.tgz", + "integrity": "sha512-L1zP0dk7vGweZME2i+EeakvUNqSrdiI3F91TwEoYiGrAfUXmVv6fJIq4g82PAXxNsWOp0J7ZqQy/3Szz0ajTxA==", + "dev": true, + "requires": { + "ansi-colors": "^1.0.1", + "arr-diff": "^4.0.0", + "arr-union": "^3.1.0", + "extend-shallow": "^3.0.2" + } + }, + "plur": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/plur/-/plur-3.1.1.tgz", + "integrity": "sha512-t1Ax8KUvV3FFII8ltczPn2tJdjqbd1sIzu6t4JL7nQ3EyeL/lTrj5PWKb06ic5/6XYDr65rQ4uzQEGN70/6X5w==", + "dev": true, + "requires": { + "irregular-plurals": "^2.0.0" + } + }, + "posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", + "dev": true + }, + "postcss": { + "version": "7.0.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", + "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", + "dev": true, + "requires": { + "chalk": "^2.4.2", + "source-map": "^0.6.1", + "supports-color": "^6.1.0" + } + }, + "postcss-calc": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", + "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", + "dev": true, + "requires": { + "postcss": "^7.0.27", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.0.2" + } + }, + "postcss-colormin": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", + "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "color": "^3.0.0", + "has": "^1.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-convert-values": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", + "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", + "dev": true, + "requires": { + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-custom-properties": { + "version": "9.1.1", + "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-9.1.1.tgz", + "integrity": "sha512-GVu+j7vwMTKUGhGXckYAFAAG5tTJUkSt8LuSyimtZdVVmdAEZYYqserkAgX8vwMhgGDPA4vJtWt7VgFxgiooDA==", + "dev": true, + "requires": { + "postcss": "^7.0.17", + "postcss-values-parser": "^3.0.5" + } + }, + "postcss-discard-comments": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", + "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "postcss-discard-duplicates": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", + "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "postcss-discard-empty": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", + "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "postcss-discard-overridden": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", + "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "postcss-html": { + "version": "0.36.0", + "resolved": "https://registry.npmjs.org/postcss-html/-/postcss-html-0.36.0.tgz", + "integrity": "sha512-HeiOxGcuwID0AFsNAL0ox3mW6MHH5cstWN1Z3Y+n6H+g12ih7LHdYxWwEA/QmrebctLjo79xz9ouK3MroHwOJw==", + "dev": true, + "requires": { + "htmlparser2": "^3.10.0" + } + }, + "postcss-import": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-12.0.1.tgz", + "integrity": "sha512-3Gti33dmCjyKBgimqGxL3vcV8w9+bsHwO5UrBawp796+jdardbcFl4RP5w/76BwNL7aGzpKstIfF9I+kdE8pTw==", + "dev": true, + "requires": { + "postcss": "^7.0.1", + "postcss-value-parser": "^3.2.3", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-less": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/postcss-less/-/postcss-less-3.1.4.tgz", + "integrity": "sha512-7TvleQWNM2QLcHqvudt3VYjULVB49uiW6XzEUFmvwHzvsOEF5MwBrIXZDJQvJNFGjJQTzSzZnDoCJ8h/ljyGXA==", + "dev": true, + "requires": { + "postcss": "^7.0.14" + } + }, + "postcss-load-config": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", + "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", + "dev": true, + "requires": { + "cosmiconfig": "^5.0.0", + "import-cwd": "^2.0.0" + } + }, + "postcss-media-query-parser": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", + "integrity": "sha1-J7Ocb02U+Bsac7j3Y1HGCeXO8kQ=", + "dev": true + }, + "postcss-merge-longhand": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", + "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", + "dev": true, + "requires": { + "css-color-names": "0.0.4", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0", + "stylehacks": "^4.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-merge-rules": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", + "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "caniuse-api": "^3.0.0", + "cssnano-util-same-parent": "^4.0.0", + "postcss": "^7.0.0", + "postcss-selector-parser": "^3.0.0", + "vendors": "^1.0.0" + }, + "dependencies": { + "postcss-selector-parser": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "dev": true, + "requires": { + "dot-prop": "^5.2.0", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1" + } + } + } + }, + "postcss-minify-font-values": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", + "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", + "dev": true, + "requires": { + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-minify-gradients": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", + "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", + "dev": true, + "requires": { + "cssnano-util-get-arguments": "^4.0.0", + "is-color-stop": "^1.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-minify-params": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", + "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", + "dev": true, + "requires": { + "alphanum-sort": "^1.0.0", + "browserslist": "^4.0.0", + "cssnano-util-get-arguments": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0", + "uniqs": "^2.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-minify-selectors": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", + "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", + "dev": true, + "requires": { + "alphanum-sort": "^1.0.0", + "has": "^1.0.0", + "postcss": "^7.0.0", + "postcss-selector-parser": "^3.0.0" + }, + "dependencies": { + "postcss-selector-parser": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "dev": true, + "requires": { + "dot-prop": "^5.2.0", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1" + } + } + } + }, + "postcss-normalize-charset": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", + "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", + "dev": true, + "requires": { + "postcss": "^7.0.0" + } + }, + "postcss-normalize-display-values": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", + "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", + "dev": true, + "requires": { + "cssnano-util-get-match": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-positions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", + "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", + "dev": true, + "requires": { + "cssnano-util-get-arguments": "^4.0.0", + "has": "^1.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-repeat-style": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", + "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", + "dev": true, + "requires": { + "cssnano-util-get-arguments": "^4.0.0", + "cssnano-util-get-match": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-string": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", + "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", + "dev": true, + "requires": { + "has": "^1.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-timing-functions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", + "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", + "dev": true, + "requires": { + "cssnano-util-get-match": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-unicode": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", + "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-url": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", + "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", + "dev": true, + "requires": { + "is-absolute-url": "^2.0.0", + "normalize-url": "^3.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-normalize-whitespace": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", + "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", + "dev": true, + "requires": { + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-ordered-values": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", + "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", + "dev": true, + "requires": { + "cssnano-util-get-arguments": "^4.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-reduce-initial": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", + "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "caniuse-api": "^3.0.0", + "has": "^1.0.0", + "postcss": "^7.0.0" + } + }, + "postcss-reduce-transforms": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", + "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", + "dev": true, + "requires": { + "cssnano-util-get-match": "^4.0.0", + "has": "^1.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-reporter": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-6.0.1.tgz", + "integrity": "sha512-LpmQjfRWyabc+fRygxZjpRxfhRf9u/fdlKf4VHG4TSPbV2XNsuISzYW1KL+1aQzx53CAppa1bKG4APIB/DOXXw==", + "dev": true, + "requires": { + "chalk": "^2.4.1", + "lodash": "^4.17.11", + "log-symbols": "^2.2.0", + "postcss": "^7.0.7" + }, + "dependencies": { + "log-symbols": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz", + "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==", + "dev": true, + "requires": { + "chalk": "^2.0.1" + } + } + } + }, + "postcss-resolve-nested-selector": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/postcss-resolve-nested-selector/-/postcss-resolve-nested-selector-0.1.1.tgz", + "integrity": "sha1-Kcy8fDfe36wwTp//C/FZaz9qDk4=", + "dev": true + }, + "postcss-safe-parser": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz", + "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==", + "dev": true, + "requires": { + "postcss": "^7.0.26" + } + }, + "postcss-sass": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/postcss-sass/-/postcss-sass-0.4.4.tgz", + "integrity": "sha512-BYxnVYx4mQooOhr+zer0qWbSPYnarAy8ZT7hAQtbxtgVf8gy+LSLT/hHGe35h14/pZDTw1DsxdbrwxBN++H+fg==", + "dev": true, + "requires": { + "gonzales-pe": "^4.3.0", + "postcss": "^7.0.21" + } + }, + "postcss-scss": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-2.1.1.tgz", + "integrity": "sha512-jQmGnj0hSGLd9RscFw9LyuSVAa5Bl1/KBPqG1NQw9w8ND55nY4ZEsdlVuYJvLPpV+y0nwTV5v/4rHPzZRihQbA==", + "dev": true, + "requires": { + "postcss": "^7.0.6" + } + }, + "postcss-selector-parser": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", + "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", + "dev": true, + "requires": { + "cssesc": "^3.0.0", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1", + "util-deprecate": "^1.0.2" + } + }, + "postcss-svgo": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.2.tgz", + "integrity": "sha512-C6wyjo3VwFm0QgBy+Fu7gCYOkCmgmClghO+pjcxvrcBKtiKt0uCF+hvbMO1fyv5BMImRK90SMb+dwUnfbGd+jw==", + "dev": true, + "requires": { + "is-svg": "^3.0.0", + "postcss": "^7.0.0", + "postcss-value-parser": "^3.0.0", + "svgo": "^1.0.0" + }, + "dependencies": { + "postcss-value-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", + "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==", + "dev": true + } + } + }, + "postcss-syntax": { + "version": "0.36.2", + "resolved": "https://registry.npmjs.org/postcss-syntax/-/postcss-syntax-0.36.2.tgz", + "integrity": "sha512-nBRg/i7E3SOHWxF3PpF5WnJM/jQ1YpY9000OaVXlAQj6Zp/kIqJxEDWIZ67tAd7NLuk7zqN4yqe9nc0oNAOs1w==", + "dev": true + }, + "postcss-unique-selectors": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", + "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", + "dev": true, + "requires": { + "alphanum-sort": "^1.0.0", + "postcss": "^7.0.0", + "uniqs": "^2.0.0" + } + }, + "postcss-url": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/postcss-url/-/postcss-url-8.0.0.tgz", + "integrity": "sha512-E2cbOQ5aii2zNHh8F6fk1cxls7QVFZjLPSrqvmiza8OuXLzIpErij8BDS5Y3STPfJgpIMNCPEr8JlKQWEoozUw==", + "dev": true, + "requires": { + "mime": "^2.3.1", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.0", + "postcss": "^7.0.2", + "xxhashjs": "^0.2.1" + }, + "dependencies": { + "mime": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.6.tgz", + "integrity": "sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA==", + "dev": true + } + } + }, + "postcss-value-parser": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", + "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==", + "dev": true + }, + "postcss-values-parser": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/postcss-values-parser/-/postcss-values-parser-3.2.1.tgz", + "integrity": "sha512-SQ7/88VE9LhJh9gc27/hqnSU/aZaREVJcRVccXBmajgP2RkjdJzNyH/a9GCVMI5nsRhT0jC5HpUMwfkz81DVVg==", + "dev": true, + "requires": { + "color-name": "^1.1.4", + "is-url-superb": "^3.0.0", + "postcss": "^7.0.5", + "url-regex": "^5.0.0" + }, + "dependencies": { + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + } + } + }, + "prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true + }, + "prepend-http": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", + "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=", + "dev": true, + "optional": true + }, + "prettier": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.2.0.tgz", + "integrity": "sha512-yYerpkvseM4iKD/BXLYUkQV5aKt4tQPqaGW6EsZjzyu0r7sVZZNPJW4Y8MyKmicp6t42XUPcBVA+H6sB3gqndw==", + "dev": true + }, + "prettier-eslint": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/prettier-eslint/-/prettier-eslint-11.0.0.tgz", + "integrity": "sha512-ACjL7T8m10HCO7DwYdXwhNWuZzQv86JkZAhVpzFV9brTMWi3i6LhqoELFaXf6RetDngujz89tnbDmGyvDl+rzA==", + "dev": true, + "requires": { + "@typescript-eslint/parser": "^3.0.0", + "common-tags": "^1.4.0", + "dlv": "^1.1.0", + "eslint": "^6.8.0", + "indent-string": "^4.0.0", + "lodash.merge": "^4.6.0", + "loglevel-colored-level-prefix": "^1.0.0", + "prettier": "^2.0.0", + "pretty-format": "^23.0.1", + "require-relative": "^0.8.7", + "typescript": "^3.9.3", + "vue-eslint-parser": "~7.1.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + } + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", + "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.10.0", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^5.0.0", + "eslint-utils": "^1.4.3", + "eslint-visitor-keys": "^1.1.0", + "espree": "^6.1.2", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^7.0.0", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.14", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.3", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^6.1.2", + "strip-ansi": "^5.2.0", + "strip-json-comments": "^3.0.1", + "table": "^5.2.3", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + } + }, + "eslint-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", + "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "espree": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", + "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "dev": true, + "requires": { + "acorn": "^7.1.1", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.1.0" + } + }, + "import-fresh": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dev": true, + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "dev": true + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "pretty-bytes": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.4.1.tgz", + "integrity": "sha512-s1Iam6Gwz3JI5Hweaz4GoCD1WUNUIyzePFy5+Js2hjwGVt2Z79wNN+ZKOZ2vB6C+Xs6njyB84Z1IthQg8d9LxA==", + "dev": true + }, + "pretty-format": { + "version": "23.6.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-23.6.0.tgz", + "integrity": "sha512-zf9NV1NSlDLDjycnwm6hpFATCGl/K1lt0R/GdkAK2O5LN/rwJoB+Mh93gGJjut4YbmecbfgLWVGSTCr0Ewvvbw==", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0", + "ansi-styles": "^3.2.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + } + } + }, + "pretty-hrtime": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz", + "integrity": "sha1-t+PqQkNaTJsnWdmeDyAesZWALuE=", + "dev": true + }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true + }, + "proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", + "dev": true, + "optional": true + }, + "pseudomap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", + "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", + "dev": true, + "optional": true + }, + "psl": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", + "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==", + "dev": true + }, + "public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", + "dev": true + } + } + }, + "pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "dev": true, + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + }, + "q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "dev": true + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "dev": true + }, + "query-string": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", + "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", + "dev": true, + "optional": true, + "requires": { + "decode-uri-component": "^0.2.0", + "object-assign": "^4.1.0", + "strict-uri-encode": "^1.0.0" + } + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "dev": true + }, + "querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", + "dev": true + }, + "queue": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/queue/-/queue-4.5.1.tgz", + "integrity": "sha512-AMD7w5hRXcFSb8s9u38acBZ+309u6GsiibP4/0YacJeaurRshogB7v/ZcVPxP5gD5+zIw6ixRHdutiYUJfwKHw==", + "dev": true, + "requires": { + "inherits": "~2.0.0" + } + }, + "quick-lru": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", + "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "dev": true, + "requires": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "dev": true + }, + "raw-body": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-1.1.7.tgz", + "integrity": "sha1-HQJ8K/oRasxmI7yo8AAWVyqH1CU=", + "dev": true, + "requires": { + "bytes": "1", + "string_decoder": "0.10" + }, + "dependencies": { + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + } + } + }, + "read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha1-5mTvMRYRZsl1HNvo28+GtftY93Q=", + "dev": true, + "requires": { + "pify": "^2.3.0" + } + }, + "read-only-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-only-stream/-/read-only-stream-2.0.0.tgz", + "integrity": "sha1-JyT9aoET1zdkrCiNQ4YnDB2/F/A=", + "dev": true, + "requires": { + "readable-stream": "^2.0.2" + } + }, + "read-pkg": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", + "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", + "dev": true, + "requires": { + "load-json-file": "^2.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^2.0.0" + } + }, + "read-pkg-up": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", + "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", + "dev": true, + "requires": { + "find-up": "^2.0.0", + "read-pkg": "^2.0.0" + } + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + } + }, + "rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", + "dev": true, + "requires": { + "resolve": "^1.1.6" + } + }, + "redent": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", + "integrity": "sha1-z5Fqsf1fHxbfsggi3W7H9zDCr94=", + "dev": true, + "optional": true, + "requires": { + "indent-string": "^2.1.0", + "strip-indent": "^1.0.1" + } + }, + "reduce": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz", + "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==", + "dev": true, + "requires": { + "object-keys": "^1.1.0" + } + }, + "regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + } + }, + "regexpp": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", + "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", + "dev": true + }, + "remark": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/remark/-/remark-13.0.0.tgz", + "integrity": "sha512-HDz1+IKGtOyWN+QgBiAT0kn+2s6ovOxHyPAFGKVE81VSzJ+mq7RwHFledEvB5F1p4iJvOah/LOKdFuzvRnNLCA==", + "dev": true, + "requires": { + "remark-parse": "^9.0.0", + "remark-stringify": "^9.0.0", + "unified": "^9.1.0" + } + }, + "remark-parse": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-9.0.0.tgz", + "integrity": "sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw==", + "dev": true, + "requires": { + "mdast-util-from-markdown": "^0.8.0" + } + }, + "remark-stringify": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-9.0.0.tgz", + "integrity": "sha512-8x29DpTbVzEc6Dwb90qhxCtbZ6hmj3BxWWDpMhA+1WM4dOEGH5U5/GFe3Be5Hns5MvPSFAr1e2KSVtKZkK5nUw==", + "dev": true, + "requires": { + "mdast-util-to-markdown": "^0.5.0" + } + }, + "remove-bom-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remove-bom-buffer/-/remove-bom-buffer-3.0.0.tgz", + "integrity": "sha512-8v2rWhaakv18qcvNeli2mZ/TMTL2nEyAKRvzo1WtnZBl15SHyEhrCu2/xKlJyUFKHiHgfXIyuY6g2dObJJycXQ==", + "dev": true, + "requires": { + "is-buffer": "^1.1.5", + "is-utf8": "^0.2.1" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + } + } + }, + "remove-bom-stream": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/remove-bom-stream/-/remove-bom-stream-1.2.0.tgz", + "integrity": "sha1-BfGlk/FuQuH7kOv1nejlaVJflSM=", + "dev": true, + "requires": { + "remove-bom-buffer": "^3.0.0", + "safe-buffer": "^5.1.0", + "through2": "^2.0.3" + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "repeat-element": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", + "dev": true + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "repeating": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", + "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", + "dev": true, + "optional": true, + "requires": { + "is-finite": "^1.0.0" + } + }, + "replace-ext": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.1.tgz", + "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==", + "dev": true + }, + "replace-homedir": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/replace-homedir/-/replace-homedir-1.0.0.tgz", + "integrity": "sha1-6H9tUTuSjd6AgmDBK+f+xv9ueYw=", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.1", + "is-absolute": "^1.0.0", + "remove-trailing-separator": "^1.1.0" + } + }, + "request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "dev": true, + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true + }, + "require-main-filename": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-1.0.1.tgz", + "integrity": "sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=", + "dev": true + }, + "require-relative": { + "version": "0.8.7", + "resolved": "https://registry.npmjs.org/require-relative/-/require-relative-0.8.7.tgz", + "integrity": "sha1-eZlTn8ngR6N5KPoZb44VY9q9Nt4=", + "dev": true + }, + "resolve": { + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.19.0.tgz", + "integrity": "sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg==", + "dev": true, + "requires": { + "is-core-module": "^2.1.0", + "path-parse": "^1.0.6" + } + }, + "resolve-dir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz", + "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.0", + "global-modules": "^1.0.0" + } + }, + "resolve-from": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", + "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", + "dev": true + }, + "resolve-options": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/resolve-options/-/resolve-options-1.1.0.tgz", + "integrity": "sha1-MrueOcBtZzONyTeMDW1gdFZq0TE=", + "dev": true, + "requires": { + "value-or-function": "^3.0.0" + } + }, + "resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", + "dev": true + }, + "responselike": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", + "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", + "dev": true, + "optional": true, + "requires": { + "lowercase-keys": "^1.0.0" + } + }, + "restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "requires": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + } + }, + "ret": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "dev": true + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true + }, + "rgb-regex": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", + "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=", + "dev": true + }, + "rgba-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", + "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=", + "dev": true + }, + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + }, + "dependencies": { + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "ripemd160": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", + "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true + }, + "run-parallel": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.10.tgz", + "integrity": "sha512-zb/1OuZ6flOlH6tQyMPUrE3x3Ulxjlo9WIVXR4yVYi4H9UXQaeIsPbLn2R3O3vQCnDKkAl2qHiuocKKX4Tz/Sw==", + "dev": true + }, + "rxjs": { + "version": "6.6.3", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.3.tgz", + "integrity": "sha512-trsQc+xYYXZ3urjOiJOuCOa5N3jAZ3eiSpQB5hIT8zGlL2QfnHLJ2r7GMkBGuIausdJN1OneaI6gQlsqNHHmZQ==", + "dev": true, + "requires": { + "tslib": "^1.9.0" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "safe-json-parse": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-1.0.1.tgz", + "integrity": "sha1-PnZyPjjf3aE8mx0poeB//uSzC1c=", + "dev": true + }, + "safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "dev": true, + "requires": { + "ret": "~0.1.10" + } + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "sax": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", + "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", + "dev": true + }, + "scope-analyzer": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/scope-analyzer/-/scope-analyzer-2.1.1.tgz", + "integrity": "sha512-azEAihtQ9mEyZGhfgTJy3IbOWEzeOrYbg7NcYEshPKnKd+LZmC3TNd5dmDxbLBsTG/JVWmCp+vDJ03vJjeXMHg==", + "dev": true, + "requires": { + "array-from": "^2.1.1", + "dash-ast": "^1.0.0", + "es6-map": "^0.1.5", + "es6-set": "^0.1.5", + "es6-symbol": "^3.1.1", + "estree-is-function": "^1.0.0", + "get-assigned-identifiers": "^1.1.0" + } + }, + "seek-bzip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-1.0.6.tgz", + "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", + "dev": true, + "optional": true, + "requires": { + "commander": "^2.8.1" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + }, + "semver-greatest-satisfied-range": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/semver-greatest-satisfied-range/-/semver-greatest-satisfied-range-1.1.0.tgz", + "integrity": "sha1-E+jCZYq5aRywzXEJMkAoDTb3els=", + "dev": true, + "requires": { + "sver-compat": "^1.5.0" + } + }, + "semver-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-2.0.0.tgz", + "integrity": "sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw==", + "dev": true, + "optional": true + }, + "semver-truncate": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/semver-truncate/-/semver-truncate-1.1.2.tgz", + "integrity": "sha1-V/Qd5pcHpicJp+AQS6IRcQnqR+g=", + "dev": true, + "optional": true, + "requires": { + "semver": "^5.3.0" + } + }, + "send": { + "version": "0.16.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", + "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "dev": true, + "requires": { + "debug": "2.6.9", + "depd": "~1.1.2", + "destroy": "~1.0.4", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "~1.6.2", + "mime": "1.4.1", + "ms": "2.0.0", + "on-finished": "~2.3.0", + "range-parser": "~1.2.0", + "statuses": "~1.4.0" + }, + "dependencies": { + "statuses": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", + "dev": true + } + } + }, + "serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", + "dev": true, + "requires": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + } + }, + "serve-static": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", + "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", + "dev": true, + "requires": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.17.1" + }, + "dependencies": { + "http-errors": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.3.tgz", + "integrity": "sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw==", + "dev": true, + "requires": { + "depd": "~1.1.2", + "inherits": "2.0.4", + "setprototypeof": "1.1.1", + "statuses": ">= 1.5.0 < 2", + "toidentifier": "1.0.0" + } + }, + "mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "dev": true + }, + "send": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", + "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", + "dev": true, + "requires": { + "debug": "2.6.9", + "depd": "~1.1.2", + "destroy": "~1.0.4", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "~1.7.2", + "mime": "1.6.0", + "ms": "2.1.1", + "on-finished": "~2.3.0", + "range-parser": "~1.2.1", + "statuses": "~1.5.0" + } + }, + "setprototypeof": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", + "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==", + "dev": true + } + } + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + } + } + }, + "setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", + "dev": true + }, + "sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "shasum": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/shasum/-/shasum-1.0.2.tgz", + "integrity": "sha1-5wEjENj0F/TetXEhUOVni4euVl8=", + "dev": true, + "requires": { + "json-stable-stringify": "~0.0.0", + "sha.js": "~2.4.4" + } + }, + "shasum-object": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shasum-object/-/shasum-object-1.0.0.tgz", + "integrity": "sha512-Iqo5rp/3xVi6M4YheapzZhhGPVs0yZwHj7wvwQ1B9z8H6zk+FEnI7y3Teq7qwnekfEhu8WmG2z0z4iWZaxLWVg==", + "dev": true, + "requires": { + "fast-safe-stringify": "^2.0.7" + } + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "shell-quote": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz", + "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==", + "dev": true + }, + "signal-exit": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", + "dev": true + }, + "simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "dev": true + }, + "simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", + "dev": true, + "requires": { + "is-arrayish": "^0.3.1" + }, + "dependencies": { + "is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "dev": true + } + } + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true + }, + "slice-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", + "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "astral-regex": "^1.0.0", + "is-fullwidth-code-point": "^2.0.0" + }, + "dependencies": { + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + } + } + }, + "snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "dev": true, + "requires": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "dev": true, + "requires": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "dev": true, + "requires": { + "kind-of": "^3.2.0" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "sort-keys": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", + "integrity": "sha1-RBttTTRnmPG05J6JIK37oOVD+a0=", + "dev": true, + "optional": true, + "requires": { + "is-plain-obj": "^1.0.0" + } + }, + "sort-keys-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sort-keys-length/-/sort-keys-length-1.0.1.tgz", + "integrity": "sha1-nLb09OnkgVWmqgZx7dM2/xR5oYg=", + "dev": true, + "optional": true, + "requires": { + "sort-keys": "^1.0.0" + } + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + }, + "source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "dev": true, + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "source-map-url": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", + "dev": true + }, + "sourcemap-codec": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", + "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==", + "dev": true + }, + "sparkles": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sparkles/-/sparkles-1.0.1.tgz", + "integrity": "sha512-dSO0DDYUahUt/0/pD/Is3VIm5TGJjludZ0HVymmhYF6eNA53PVLhnUk0znSYbH8IYBuJdCE+1luR22jNLMaQdw==", + "dev": true + }, + "spdx-correct": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", + "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", + "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.6.tgz", + "integrity": "sha512-+orQK83kyMva3WyPf59k1+Y525csj5JejicWut55zeTWANuN17qSiSLUXWtzHeNWORSvT7GLDJ/E/XiIWoXBTw==", + "dev": true + }, + "specificity": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/specificity/-/specificity-0.4.1.tgz", + "integrity": "sha512-1klA3Gi5PD1Wv9Q0wUoOQN1IWAuPu0D1U03ThXTr0cJ20+/iq2tHSDnK7Kk/0LXJ1ztUB2/1Os0wKmfyNgUQfg==", + "dev": true + }, + "split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.0" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "squeak": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/squeak/-/squeak-1.3.0.tgz", + "integrity": "sha1-MwRQN7ZDiLVnZ0uEMiplIQc5FsM=", + "dev": true, + "optional": true, + "requires": { + "chalk": "^1.0.0", + "console-stream": "^0.1.1", + "lpad-align": "^1.0.1" + }, + "dependencies": { + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true, + "optional": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "optional": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true, + "optional": true + } + } + }, + "sshpk": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", + "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "dev": true, + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "dev": true + }, + "stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=", + "dev": true + }, + "stack-utils": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.4.tgz", + "integrity": "sha512-IPDJfugEGbfizBwBZRZ3xpccMdRyP5lqsBWXGQWimVjua/ccLCeMOAVjlc1R7LxFjo5sEDhyNIXd8mo/AiDS9w==", + "dev": true, + "requires": { + "escape-string-regexp": "^2.0.0" + }, + "dependencies": { + "escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true + } + } + }, + "static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "dev": true, + "requires": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", + "dev": true + }, + "stream-browserify": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", + "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", + "dev": true, + "requires": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "stream-combiner": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.2.2.tgz", + "integrity": "sha1-rsjLrBd7Vrb0+kec7YwZEs7lKFg=", + "dev": true, + "requires": { + "duplexer": "~0.1.1", + "through": "~2.3.4" + } + }, + "stream-combiner2": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", + "integrity": "sha1-+02KFCDqNidk4hrUeAOXvry0HL4=", + "dev": true, + "requires": { + "duplexer2": "~0.1.0", + "readable-stream": "^2.0.2" + } + }, + "stream-exhaust": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/stream-exhaust/-/stream-exhaust-1.0.2.tgz", + "integrity": "sha512-b/qaq/GlBK5xaq1yrK9/zFcyRSTNxmcZwFLGSTG0mXgZl/4Z6GgiyYOXOvY7N3eEvFRAG1bkDRz5EPGSvPYQlw==", + "dev": true + }, + "stream-http": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-3.1.1.tgz", + "integrity": "sha512-S7OqaYu0EkFpgeGFb/NPOoPLxFko7TPqtEeFg5DXPB4v/KETHG0Ln6fRFrNezoelpaDKmycEmmZ81cC9DAwgYg==", + "dev": true, + "requires": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "xtend": "^4.0.2" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", + "dev": true + }, + "stream-splicer": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/stream-splicer/-/stream-splicer-2.0.1.tgz", + "integrity": "sha512-Xizh4/NPuYSyAXyT7g8IvdJ9HJpxIGL9PjyhtywCZvvP0OPIdqyrr4dMikeuvY8xahpdKEBlBTySe583totajg==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.2" + } + }, + "strict-uri-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", + "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=", + "dev": true, + "optional": true + }, + "string-template": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/string-template/-/string-template-0.2.1.tgz", + "integrity": "sha1-QpMuWYo1LQH8IuwzZ9nYTuxsmt0=", + "dev": true + }, + "string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + } + } + }, + "string.prototype.trimend": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.3.tgz", + "integrity": "sha512-ayH0pB+uf0U28CtjlLvL7NaohvR1amUvVZk+y3DYb0Ey2PUV5zPkkKy9+U1ndVEIXO8hNg18eIv9Jntbii+dKw==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" + } + }, + "string.prototype.trimstart": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.3.tgz", + "integrity": "sha512-oBIBUy5lea5tt0ovtOFiEQaBkoBBkyJhZXzJYrSmDo5IUUqbOPvVezuRs/agBIdZ2p2Eo1FD6bD9USyBLfl3xg==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true + }, + "strip-dirs": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-2.1.0.tgz", + "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", + "dev": true, + "optional": true, + "requires": { + "is-natural-number": "^4.0.1" + } + }, + "strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", + "dev": true, + "optional": true + }, + "strip-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", + "integrity": "sha1-DHlipq3vp7vUrDZkYKY4VSrhoKI=", + "dev": true, + "optional": true, + "requires": { + "get-stdin": "^4.0.1" + } + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true + }, + "strip-outer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-outer/-/strip-outer-1.0.1.tgz", + "integrity": "sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==", + "dev": true, + "optional": true, + "requires": { + "escape-string-regexp": "^1.0.2" + } + }, + "style-search": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/style-search/-/style-search-0.1.0.tgz", + "integrity": "sha1-eVjHk+R+MuB9K1yv5cC/jhLneQI=", + "dev": true + }, + "stylehacks": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", + "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", + "dev": true, + "requires": { + "browserslist": "^4.0.0", + "postcss": "^7.0.0", + "postcss-selector-parser": "^3.0.0" + }, + "dependencies": { + "postcss-selector-parser": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", + "dev": true, + "requires": { + "dot-prop": "^5.2.0", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1" + } + } + } + }, + "stylelint": { + "version": "13.6.1", + "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-13.6.1.tgz", + "integrity": "sha512-XyvKyNE7eyrqkuZ85Citd/Uv3ljGiuYHC6UiztTR6sWS9rza8j3UeQv/eGcQS9NZz/imiC4GKdk1EVL3wst5vw==", + "dev": true, + "requires": { + "@stylelint/postcss-css-in-js": "^0.37.1", + "@stylelint/postcss-markdown": "^0.36.1", + "autoprefixer": "^9.8.0", + "balanced-match": "^1.0.0", + "chalk": "^4.1.0", + "cosmiconfig": "^6.0.0", + "debug": "^4.1.1", + "execall": "^2.0.0", + "file-entry-cache": "^5.0.1", + "get-stdin": "^8.0.0", + "global-modules": "^2.0.0", + "globby": "^11.0.1", + "globjoin": "^0.1.4", + "html-tags": "^3.1.0", + "ignore": "^5.1.8", + "import-lazy": "^4.0.0", + "imurmurhash": "^0.1.4", + "known-css-properties": "^0.19.0", + "leven": "^3.1.0", + "lodash": "^4.17.15", + "log-symbols": "^4.0.0", + "mathml-tag-names": "^2.1.3", + "meow": "^7.0.1", + "micromatch": "^4.0.2", + "normalize-selector": "^0.2.0", + "postcss": "^7.0.32", + "postcss-html": "^0.36.0", + "postcss-less": "^3.1.4", + "postcss-media-query-parser": "^0.2.3", + "postcss-reporter": "^6.0.1", + "postcss-resolve-nested-selector": "^0.1.1", + "postcss-safe-parser": "^4.0.2", + "postcss-sass": "^0.4.4", + "postcss-scss": "^2.1.1", + "postcss-selector-parser": "^6.0.2", + "postcss-syntax": "^0.36.2", + "postcss-value-parser": "^4.1.0", + "resolve-from": "^5.0.0", + "slash": "^3.0.0", + "specificity": "^0.4.1", + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "style-search": "^0.1.0", + "sugarss": "^2.0.0", + "svg-tags": "^1.0.0", + "table": "^5.4.6", + "v8-compile-cache": "^2.1.1", + "write-file-atomic": "^3.0.3" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "camelcase-keys": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", + "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "get-stdin": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-8.0.0.tgz", + "integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==", + "dev": true + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "requires": { + "global-prefix": "^3.0.0" + } + }, + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + } + }, + "globby": { + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", + "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", + "dev": true, + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.1.1", + "ignore": "^5.1.4", + "merge2": "^1.3.0", + "slash": "^3.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + }, + "import-fresh": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + } + } + }, + "import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "dev": true + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "map-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.1.0.tgz", + "integrity": "sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g==", + "dev": true + }, + "meow": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/meow/-/meow-7.1.1.tgz", + "integrity": "sha512-GWHvA5QOcS412WCo8vwKDlTelGLsCGBVevQB5Kva961rmNfun0PCbv5+xta2kUMFJyR8/oWnn7ddeKdosbAPbA==", + "dev": true, + "requires": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^2.5.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.13.1", + "yargs-parser": "^18.1.3" + } + }, + "micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "parse-json": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.1.0.tgz", + "integrity": "sha512-+mi/lmVVNKFNVyLXV31ERiy2CY5E1/F6QtJFEzoChPRwwngMNXRDQ9GJ5WdE2Z2P4AujsOi0/+2qHID68KwfIQ==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true + }, + "read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "requires": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "dependencies": { + "type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true + } + } + }, + "read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "requires": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "dependencies": { + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + } + } + }, + "redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "requires": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + } + }, + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "requires": { + "min-indent": "^1.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "trim-newlines": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.0.tgz", + "integrity": "sha512-C4+gOpvmxaSMKuEf9Qc134F1ZuOHVXKRbtEflf4NTtuuJDEIJ9p5PXsalL8SkeRw+qit1Mo+yuvMPAKwWg/1hA==", + "dev": true + }, + "type-fest": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", + "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", + "dev": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } + }, + "stylelint-config-recommended": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-3.0.0.tgz", + "integrity": "sha512-F6yTRuc06xr1h5Qw/ykb2LuFynJ2IxkKfCMf+1xqPffkxh0S09Zc902XCffcsw/XMFq/OzQ1w54fLIDtmRNHnQ==", + "dev": true + }, + "stylelint-config-standard": { + "version": "20.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-20.0.0.tgz", + "integrity": "sha512-IB2iFdzOTA/zS4jSVav6z+wGtin08qfj+YyExHB3LF9lnouQht//YyB0KZq9gGz5HNPkddHOzcY8HsUey6ZUlA==", + "dev": true, + "requires": { + "stylelint-config-recommended": "^3.0.0" + } + }, + "subarg": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/subarg/-/subarg-1.0.0.tgz", + "integrity": "sha1-9izxdYHplrSPyWVpn1TAauJouNI=", + "dev": true, + "requires": { + "minimist": "^1.1.0" + } + }, + "sugarss": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/sugarss/-/sugarss-2.0.0.tgz", + "integrity": "sha512-WfxjozUk0UVA4jm+U1d736AUpzSrNsQcIbyOkoE364GrtWmIrFdk5lksEupgWMD4VaT/0kVx1dobpiDumSgmJQ==", + "dev": true, + "requires": { + "postcss": "^7.0.2" + } + }, + "supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "sver-compat": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/sver-compat/-/sver-compat-1.5.0.tgz", + "integrity": "sha1-PPh9/rTQe0o/FIJ7wYaz/QxkXNg=", + "dev": true, + "requires": { + "es6-iterator": "^2.0.1", + "es6-symbol": "^3.1.1" + } + }, + "svg-tags": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", + "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=", + "dev": true + }, + "svgo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", + "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", + "dev": true, + "requires": { + "chalk": "^2.4.1", + "coa": "^2.0.2", + "css-select": "^2.0.0", + "css-select-base-adapter": "^0.1.1", + "css-tree": "1.0.0-alpha.37", + "csso": "^4.0.2", + "js-yaml": "^3.13.1", + "mkdirp": "~0.5.1", + "object.values": "^1.1.0", + "sax": "~1.2.4", + "stable": "^0.1.8", + "unquote": "~1.1.1", + "util.promisify": "~1.0.0" + } + }, + "syntax-error": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/syntax-error/-/syntax-error-1.4.0.tgz", + "integrity": "sha512-YPPlu67mdnHGTup2A8ff7BC2Pjq0e0Yp/IyTFN03zWO0RcK07uLcbi7C2KpGR2FvWbaB0+bfE27a+sBKebSo7w==", + "dev": true, + "requires": { + "acorn-node": "^1.2.0" + } + }, + "table": { + "version": "5.4.6", + "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", + "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", + "dev": true, + "requires": { + "ajv": "^6.10.2", + "lodash": "^4.17.14", + "slice-ansi": "^2.1.0", + "string-width": "^3.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + } + } + }, + "tar-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", + "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "dev": true, + "optional": true, + "requires": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + } + }, + "temp-dir": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-1.0.0.tgz", + "integrity": "sha1-CnwOom06Oa+n4OvqnB/AvE2qAR0=", + "dev": true, + "optional": true + }, + "tempfile": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tempfile/-/tempfile-2.0.0.tgz", + "integrity": "sha1-awRGhWqbERTRhW/8vlCczLCXcmU=", + "dev": true, + "optional": true, + "requires": { + "temp-dir": "^1.0.0", + "uuid": "^3.0.1" + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "through2-concurrent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/through2-concurrent/-/through2-concurrent-2.0.0.tgz", + "integrity": "sha512-R5/jLkfMvdmDD+seLwN7vB+mhbqzWop5fAjx5IX8/yQq7VhBhzDmhXgaHAOnhnWkCpRMM7gToYHycB0CS/pd+A==", + "dev": true, + "requires": { + "through2": "^2.0.0" + } + }, + "through2-filter": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/through2-filter/-/through2-filter-3.0.0.tgz", + "integrity": "sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA==", + "dev": true, + "requires": { + "through2": "~2.0.0", + "xtend": "~4.0.0" + } + }, + "time-stamp": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/time-stamp/-/time-stamp-1.1.0.tgz", + "integrity": "sha1-dkpaEa9QVhkhsTPztE5hhofg9cM=", + "dev": true + }, + "timed-out": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", + "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8=", + "dev": true, + "optional": true + }, + "timers-browserify": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-1.4.2.tgz", + "integrity": "sha1-ycWLV1voQHN1y14kYtrO50NZ9B0=", + "dev": true, + "requires": { + "process": "~0.11.0" + } + }, + "timsort": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", + "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=", + "dev": true + }, + "tiny-lr": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tiny-lr/-/tiny-lr-1.1.1.tgz", + "integrity": "sha512-44yhA3tsaRoMOjQQ+5v5mVdqef+kH6Qze9jTpqtVufgYjYt08zyZAwNwwVBj3i1rJMnR52IxOW0LK0vBzgAkuA==", + "dev": true, + "requires": { + "body": "^5.1.0", + "debug": "^3.1.0", + "faye-websocket": "~0.10.0", + "livereload-js": "^2.3.0", + "object-assign": "^4.1.0", + "qs": "^6.4.0" + }, + "dependencies": { + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "tlds": { + "version": "1.214.0", + "resolved": "https://registry.npmjs.org/tlds/-/tlds-1.214.0.tgz", + "integrity": "sha512-+i48KYsrCkkIZnsj31cTIj9cu5NtFxKo7xlNIB7jg8kXi//b4Ertl5qaHgqFF+y+g0nFwt/k+eph2uUNQJgfwg==", + "dev": true + }, + "tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "requires": { + "os-tmpdir": "~1.0.2" + } + }, + "to-absolute-glob": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/to-absolute-glob/-/to-absolute-glob-2.0.2.tgz", + "integrity": "sha1-GGX0PZ50sIItufFFt4z/fQ98hJs=", + "dev": true, + "requires": { + "is-absolute": "^1.0.0", + "is-negated-glob": "^1.0.0" + } + }, + "to-buffer": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==", + "dev": true, + "optional": true + }, + "to-factory": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz", + "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE=", + "dev": true + }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true + }, + "to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "dev": true, + "requires": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + } + }, + "to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + } + }, + "to-through": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-through/-/to-through-2.0.0.tgz", + "integrity": "sha1-/JKtq6ByZHvAtn1rA2ZKoZUJOvY=", + "dev": true, + "requires": { + "through2": "^2.0.3" + } + }, + "toidentifier": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", + "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", + "dev": true + }, + "tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dev": true, + "requires": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + }, + "dependencies": { + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + } + } + }, + "transform-ast": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/transform-ast/-/transform-ast-2.4.4.tgz", + "integrity": "sha512-AxjeZAcIOUO2lev2GDe3/xZ1Q0cVGjIMk5IsriTy8zbWlsEnjeB025AhkhBJHoy997mXpLd4R+kRbvnnQVuQHQ==", + "dev": true, + "requires": { + "acorn-node": "^1.3.0", + "convert-source-map": "^1.5.1", + "dash-ast": "^1.0.0", + "is-buffer": "^2.0.0", + "magic-string": "^0.23.2", + "merge-source-map": "1.0.4", + "nanobench": "^2.1.1" + } + }, + "trim-newlines": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", + "integrity": "sha1-WIeWa7WCpFA6QetST301ARgVphM=", + "dev": true, + "optional": true + }, + "trim-repeated": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/trim-repeated/-/trim-repeated-1.0.0.tgz", + "integrity": "sha1-42RqLqTokTEr9+rObPsFOAvAHCE=", + "dev": true, + "optional": true, + "requires": { + "escape-string-regexp": "^1.0.2" + } + }, + "trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "dev": true + }, + "tsconfig-paths": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.9.0.tgz", + "integrity": "sha512-dRcuzokWhajtZWkQsDVKbWyY+jgcLC5sqJhg2PSgf4ZkH2aHPvaOY8YWGhmjb68b5qqTfasSsDO9k7RUiEmZAw==", + "dev": true, + "requires": { + "@types/json5": "^0.0.29", + "json5": "^1.0.1", + "minimist": "^1.2.0", + "strip-bom": "^3.0.0" + } + }, + "tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "tsutils": { + "version": "3.17.1", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.17.1.tgz", + "integrity": "sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g==", + "dev": true, + "requires": { + "tslib": "^1.8.1" + } + }, + "tty-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", + "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==", + "dev": true + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dev": true, + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "dev": true + }, + "type": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", + "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==", + "dev": true + }, + "type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1" + } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dev": true, + "requires": { + "is-typedarray": "^1.0.0" + } + }, + "typeface-red-hat-display": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/typeface-red-hat-display/-/typeface-red-hat-display-1.1.13.tgz", + "integrity": "sha512-NTPuz3EmvmRSogRha9M1ZXMIjyn0QOn9AMI1vx6wqmQdilnMoK2+6B0DPxWccwz8W9GbuFuJHpdhFN6wHVUYcw==" + }, + "typeface-roboto-condensed": { + "version": "0.0.75", + "resolved": "https://registry.npmjs.org/typeface-roboto-condensed/-/typeface-roboto-condensed-0.0.75.tgz", + "integrity": "sha512-Cq0slVsJ0uR1BmV9XCtIV2fJ3lr3vKsGTi4NyRX94Fkvwf/J3nh++NGpZ6gN5P+AzKetZEUifdpM5EQ2HCvb5g==", + "dev": true + }, + "typeface-roboto-mono": { + "version": "0.0.75", + "resolved": "https://registry.npmjs.org/typeface-roboto-mono/-/typeface-roboto-mono-0.0.75.tgz", + "integrity": "sha512-dYfyXd6HrKyMC/PuBAAtay0tZKsBrzxIW/fBY325vLxFfi/IDKSuyTkWxkU4lyZV6KPHetFnJ661PNXzz2FS/w==", + "dev": true + }, + "typescript": { + "version": "3.9.7", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.7.tgz", + "integrity": "sha512-BLbiRkiBzAwsjut4x/dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw==", + "dev": true + }, + "uglify-js": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.12.0.tgz", + "integrity": "sha512-8lBMSkFZuAK7gGF8LswsXmir8eX8d2AAMOnxSDWjKBx/fBR6MypQjs78m6ML9zQVp1/hD4TBdfeMZMC7nW1TAA==", + "dev": true + }, + "umd": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/umd/-/umd-3.0.3.tgz", + "integrity": "sha512-4IcGSufhFshvLNcMCV80UnQVlZ5pMOC8mvNPForqwA4+lzYQuetTESLDQkeLmihq8bRcnpbQa48Wb8Lh16/xow==", + "dev": true + }, + "unbzip2-stream": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", + "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", + "dev": true, + "optional": true, + "requires": { + "buffer": "^5.2.1", + "through": "^2.3.8" + } + }, + "unc-path-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz", + "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=", + "dev": true + }, + "undeclared-identifiers": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/undeclared-identifiers/-/undeclared-identifiers-1.1.3.tgz", + "integrity": "sha512-pJOW4nxjlmfwKApE4zvxLScM/njmwj/DiUBv7EabwE4O8kRUy+HIwxQtZLBPll/jx1LJyBcqNfB3/cpv9EZwOw==", + "dev": true, + "requires": { + "acorn-node": "^1.3.0", + "dash-ast": "^1.0.0", + "get-assigned-identifiers": "^1.2.0", + "simple-concat": "^1.0.0", + "xtend": "^4.0.1" + } + }, + "undertaker": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/undertaker/-/undertaker-1.3.0.tgz", + "integrity": "sha512-/RXwi5m/Mu3H6IHQGww3GNt1PNXlbeCuclF2QYR14L/2CHPz3DFZkvB5hZ0N/QUkiXWCACML2jXViIQEQc2MLg==", + "dev": true, + "requires": { + "arr-flatten": "^1.0.1", + "arr-map": "^2.0.0", + "bach": "^1.0.0", + "collection-map": "^1.0.0", + "es6-weak-map": "^2.0.1", + "fast-levenshtein": "^1.0.0", + "last-run": "^1.1.0", + "object.defaults": "^1.0.0", + "object.reduce": "^1.0.0", + "undertaker-registry": "^1.0.0" + }, + "dependencies": { + "fast-levenshtein": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-1.1.4.tgz", + "integrity": "sha1-5qdUzI8V5YmHqpy9J69m/W9OWvk=", + "dev": true + } + } + }, + "undertaker-registry": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/undertaker-registry/-/undertaker-registry-1.0.1.tgz", + "integrity": "sha1-XkvaMI5KiirlhPm5pDWaSZglzFA=", + "dev": true + }, + "unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "dev": true, + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "dependencies": { + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true + } + } + }, + "union-value": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^2.0.1" + } + }, + "uniq": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", + "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=", + "dev": true + }, + "uniqs": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", + "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=", + "dev": true + }, + "unique-stream": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/unique-stream/-/unique-stream-2.3.1.tgz", + "integrity": "sha512-2nY4TnBE70yoxHkDli7DMazpWiP7xMdCYqU2nBRO0UB+ZpEkGsSija7MvmvnZFUeC+mrgiUfcHSr3LmRFIg4+A==", + "dev": true, + "requires": { + "json-stable-stringify-without-jsonify": "^1.0.1", + "through2-filter": "^3.0.0" + } + }, + "unist-util-find-all-after": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/unist-util-find-all-after/-/unist-util-find-all-after-3.0.2.tgz", + "integrity": "sha512-xaTC/AGZ0rIM2gM28YVRAFPIZpzbpDtU3dRmp7EXlNVA8ziQc4hY3H7BHXM1J49nEmiqc3svnqMReW+PGqbZKQ==", + "dev": true, + "requires": { + "unist-util-is": "^4.0.0" + } + }, + "unist-util-is": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.0.3.tgz", + "integrity": "sha512-bTofCFVx0iQM8Jqb1TBDVRIQW03YkD3p66JOd/aCWuqzlLyUtx1ZAGw/u+Zw+SttKvSVcvTiKYbfrtLoLefykw==", + "dev": true + }, + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "requires": { + "@types/unist": "^2.0.2" + } + }, + "universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==", + "dev": true + }, + "universalify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-1.0.0.tgz", + "integrity": "sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug==", + "dev": true + }, + "unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", + "dev": true + }, + "unquote": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", + "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=", + "dev": true + }, + "unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "dev": true, + "requires": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "dependencies": { + "has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", + "dev": true, + "requires": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "dependencies": { + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dev": true, + "requires": { + "isarray": "1.0.0" + } + } + } + }, + "has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", + "dev": true + } + } + }, + "upath": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", + "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", + "dev": true + }, + "uri-js": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.0.tgz", + "integrity": "sha512-B0yRTzYdUCCn9n+F4+Gh4yIDtMQcaJsmYBDsTSG8g/OejKBodLQ2IHfN3bM7jUsRXndopT7OIXWdYqc1fjmV6g==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + }, + "dependencies": { + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + } + } + }, + "urix": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", + "dev": true + }, + "url": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", + "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", + "dev": true, + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + }, + "dependencies": { + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", + "dev": true + } + } + }, + "url-parse-lax": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", + "integrity": "sha1-evjzA2Rem9eaJy56FKxovAYJ2nM=", + "dev": true, + "optional": true, + "requires": { + "prepend-http": "^1.0.1" + } + }, + "url-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/url-regex/-/url-regex-5.0.0.tgz", + "integrity": "sha512-O08GjTiAFNsSlrUWfqF1jH0H1W3m35ZyadHrGv5krdnmPPoxP27oDTqux/579PtaroiSGm5yma6KT1mHFH6Y/g==", + "dev": true, + "requires": { + "ip-regex": "^4.1.0", + "tlds": "^1.203.0" + } + }, + "url-to-options": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", + "integrity": "sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k=", + "dev": true, + "optional": true + }, + "use": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "dev": true + }, + "util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "dev": true, + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "util.promisify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", + "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.2", + "has-symbols": "^1.0.1", + "object.getownpropertydescriptors": "^2.1.0" + }, + "dependencies": { + "es-abstract": { + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } + } + }, + "utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", + "dev": true + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "dev": true + }, + "v8-compile-cache": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.2.0.tgz", + "integrity": "sha512-gTpR5XQNKFwOd4clxfnhaqvfqMpqEwr4tOtCyz4MtYZX2JYhfr1JvBFKdS+7K/9rfpZR3VLX+YWBbKoxCgS43Q==", + "dev": true + }, + "v8flags": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-3.2.0.tgz", + "integrity": "sha512-mH8etigqMfiGWdeXpaaqGfs6BndypxusHHcv2qSHyZkGEznCd/qAXCWWRzeowtL54147cktFOC4P5y+kl8d8Jg==", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.1" + } + }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "value-or-function": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/value-or-function/-/value-or-function-3.0.0.tgz", + "integrity": "sha1-HCQ6ULWVwb5Up1S/7OhWO5/42BM=", + "dev": true + }, + "vendors": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", + "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==", + "dev": true + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "vfile": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.0.tgz", + "integrity": "sha512-a/alcwCvtuc8OX92rqqo7PflxiCgXRFjdyoGVuYV+qbgCb0GgZJRvIgCD4+U/Kl1yhaRsaTwksF88xbPyGsgpw==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "replace-ext": "1.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "dependencies": { + "replace-ext": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.0.tgz", + "integrity": "sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs=", + "dev": true + } + } + }, + "vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + } + }, + "vinyl": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/vinyl/-/vinyl-2.2.1.tgz", + "integrity": "sha512-LII3bXRFBZLlezoG5FfZVcXflZgWP/4dCwKtxd5ky9+LOtM4CS3bIRQsmR1KMnMW07jpE8fqR2lcxPZ+8sJIcw==", + "dev": true, + "requires": { + "clone": "^2.1.1", + "clone-buffer": "^1.0.0", + "clone-stats": "^1.0.0", + "cloneable-readable": "^1.0.0", + "remove-trailing-separator": "^1.0.1", + "replace-ext": "^1.0.0" + } + }, + "vinyl-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/vinyl-buffer/-/vinyl-buffer-1.0.1.tgz", + "integrity": "sha1-lsGjR5uMU5JULGEgKQE7Wyf4i78=", + "dev": true, + "requires": { + "bl": "^1.2.1", + "through2": "^2.0.3" + } + }, + "vinyl-fs": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/vinyl-fs/-/vinyl-fs-3.0.3.tgz", + "integrity": "sha512-vIu34EkyNyJxmP0jscNzWBSygh7VWhqun6RmqVfXePrOwi9lhvRs//dOaGOTRUQr4tx7/zd26Tk5WeSVZitgng==", + "dev": true, + "requires": { + "fs-mkdirp-stream": "^1.0.0", + "glob-stream": "^6.1.0", + "graceful-fs": "^4.0.0", + "is-valid-glob": "^1.0.0", + "lazystream": "^1.0.0", + "lead": "^1.0.0", + "object.assign": "^4.0.4", + "pumpify": "^1.3.5", + "readable-stream": "^2.3.3", + "remove-bom-buffer": "^3.0.0", + "remove-bom-stream": "^1.2.0", + "resolve-options": "^1.1.0", + "through2": "^2.0.0", + "to-through": "^2.0.0", + "value-or-function": "^3.0.0", + "vinyl": "^2.0.0", + "vinyl-sourcemap": "^1.1.0" + } + }, + "vinyl-sourcemap": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/vinyl-sourcemap/-/vinyl-sourcemap-1.1.0.tgz", + "integrity": "sha1-kqgAWTo4cDqM2xHYswCtS+Y7PhY=", + "dev": true, + "requires": { + "append-buffer": "^1.0.2", + "convert-source-map": "^1.5.0", + "graceful-fs": "^4.1.6", + "normalize-path": "^2.1.1", + "now-and-later": "^2.0.0", + "remove-bom-buffer": "^3.0.0", + "vinyl": "^2.0.0" + }, + "dependencies": { + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "requires": { + "remove-trailing-separator": "^1.0.1" + } + } + } + }, + "vinyl-sourcemaps-apply": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/vinyl-sourcemaps-apply/-/vinyl-sourcemaps-apply-0.2.1.tgz", + "integrity": "sha1-q2VJ1h0XLCsbh75cUI0jnI74dwU=", + "dev": true, + "requires": { + "source-map": "^0.5.1" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + } + } + }, + "vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", + "dev": true + }, + "vue-eslint-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-7.1.1.tgz", + "integrity": "sha512-8FdXi0gieEwh1IprIBafpiJWcApwrU+l2FEj8c1HtHFdNXMd0+2jUSjBVmcQYohf/E72irwAXEXLga6TQcB3FA==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "eslint-scope": "^5.0.0", + "eslint-visitor-keys": "^1.1.0", + "espree": "^6.2.1", + "esquery": "^1.0.1", + "lodash": "^4.17.15" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "espree": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", + "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "dev": true, + "requires": { + "acorn": "^7.1.1", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.1.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dev": true, + "requires": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + } + }, + "websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "dev": true + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-1.0.0.tgz", + "integrity": "sha1-u6Y8qGGUiZT/MHc2CJ47lgJsKk8=", + "dev": true + }, + "word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "dev": true + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true + }, + "wrap-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", + "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "dev": true, + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + }, + "dependencies": { + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + } + } + }, + "wrap-comment": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wrap-comment/-/wrap-comment-1.0.1.tgz", + "integrity": "sha512-APccrMwl/ont0RHFTXNAQfM647duYYEfs6cngrIyTByTI0xbWnDnPSptFZhS68L4WCjt2ZxuhCFwuY6Pe88KZQ==", + "dev": true + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "write": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", + "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", + "dev": true, + "requires": { + "mkdirp": "^0.5.1" + } + }, + "write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "xmlhttprequest": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz", + "integrity": "sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw=", + "dev": true + }, + "xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true + }, + "xxhashjs": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz", + "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==", + "dev": true, + "requires": { + "cuint": "^0.2.2" + } + }, + "y18n": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", + "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=", + "dev": true + }, + "yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", + "dev": true, + "optional": true + }, + "yaml": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.0.tgz", + "integrity": "sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg==", + "dev": true + }, + "yargs": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-7.1.1.tgz", + "integrity": "sha512-huO4Fr1f9PmiJJdll5kwoS2e4GqzGSsMT3PPMpOwoVkOK8ckqAewMTZyA6LXVQWflleb/Z8oPBEvNsMft0XE+g==", + "dev": true, + "requires": { + "camelcase": "^3.0.0", + "cliui": "^3.2.0", + "decamelize": "^1.1.1", + "get-caller-file": "^1.0.1", + "os-locale": "^1.4.0", + "read-pkg-up": "^1.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^1.0.1", + "set-blocking": "^2.0.0", + "string-width": "^1.0.2", + "which-module": "^1.0.0", + "y18n": "^3.2.1", + "yargs-parser": "5.0.0-security.0" + }, + "dependencies": { + "find-up": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", + "integrity": "sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=", + "dev": true, + "requires": { + "path-exists": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "load-json-file": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", + "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0", + "strip-bom": "^2.0.0" + } + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "requires": { + "error-ex": "^1.2.0" + } + }, + "path-exists": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", + "integrity": "sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=", + "dev": true, + "requires": { + "pinkie-promise": "^2.0.0" + } + }, + "path-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", + "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0" + } + }, + "read-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", + "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", + "dev": true, + "requires": { + "load-json-file": "^1.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^1.0.0" + } + }, + "read-pkg-up": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", + "integrity": "sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI=", + "dev": true, + "requires": { + "find-up": "^1.0.0", + "read-pkg": "^1.0.0" + } + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "strip-bom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", + "dev": true, + "requires": { + "is-utf8": "^0.2.0" + } + } + } + }, + "yargs-parser": { + "version": "5.0.0-security.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-5.0.0-security.0.tgz", + "integrity": "sha512-T69y4Ps64LNesYxeYGYPvfoMTt/7y1XtfpIslUeK4um+9Hu7hlGoRtaDLvdXb7+/tfq4opVa2HRY5xGip022rQ==", + "dev": true, + "requires": { + "camelcase": "^3.0.0", + "object.assign": "^4.1.0" + } + }, + "yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", + "dev": true, + "requires": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "yazl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/yazl/-/yazl-2.5.1.tgz", + "integrity": "sha512-phENi2PLiHnHb6QBVot+dJnaAZ0xosj7p3fWl+znIjBDlnMI2PsZCJZ306BPTFOaHf5qdDEI8x5qFrSOBN5vrw==", + "dev": true, + "requires": { + "buffer-crc32": "~0.2.3" + } + }, + "zepto": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz", + "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g=", + "dev": true + }, + "zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "dev": true + } + } +} diff --git a/site-ui/package.json b/site-ui/package.json new file mode 100644 index 000000000..ed7b2c19e --- /dev/null +++ b/site-ui/package.json @@ -0,0 +1,62 @@ +{ + "name": "cassandra-website-ui", + "description": "Produces the UI bundle for the Apache Cassandra website", + "homepage": "https://cassandra.apache.org", + "license": "MPL-2.0", + "repository": { + "type": "git", + "url": "https://github.com/apache/cassandra-website/site-ui" + }, + "engines": { + "node": ">= 10.0.0" + }, + "browserslist": [ + "last 2 versions" + ], + "devDependencies": { + "@octokit/rest": "~18.0", + "asciidoctor.js": "1.5.9", + "autoprefixer": "~9.8", + "browser-pack-flat": "~3.4", + "browserify": "~16.5", + "core-js": "~3.6", + "cssnano": "~4.1", + "docsearch.js": "~2.6", + "eslint": "~7.2", + "eslint-config-standard": "~14.1", + "eslint-plugin-import": "~2.21", + "eslint-plugin-node": "~11.1", + "eslint-plugin-promise": "~4.2", + "eslint-plugin-standard": "~4.0", + "fancy-log": "~1.3", + "fs-extra": "~9.0", + "gulp": "~4.0", + "gulp-concat": "~2.6", + "gulp-connect": "~5.7", + "gulp-eslint": "~6.0", + "gulp-imagemin": "~6.2", + "gulp-postcss": "~8.0", + "gulp-stylelint": "~13.0", + "gulp-uglify": "~3.0", + "gulp-vinyl-zip": "~2.2", + "handlebars": "~4.7", + "highlight.js": "~9.18", + "jquery": "~3.5", + "js-yaml": "~3.14", + "mark.js": "~8.11", + "merge-stream": "~2.0", + "postcss-calc": "~7.0", + "postcss-custom-properties": "~9.1", + "postcss-import": "~12.0", + "postcss-url": "~8.0", + "prettier-eslint": "~11.0", + "require-directory": "~2.1", + "require-from-string": "~2.0", + "stylelint": "~13.6", + "stylelint-config-standard": "~20.0", + "typeface-roboto": "0.0.75", + "typeface-roboto-mono": "0.0.75", + "vinyl-buffer": "~1.0", + "vinyl-fs": "~3.0" + } +} diff --git a/site-ui/preview-src/404.adoc b/site-ui/preview-src/404.adoc new file mode 100644 index 000000000..e69de29bb diff --git a/site-ui/preview-src/arrow-small-down.svg b/site-ui/preview-src/arrow-small-down.svg new file mode 100644 index 000000000..0372c98cd --- /dev/null +++ b/site-ui/preview-src/arrow-small-down.svg @@ -0,0 +1,54 @@ + + + + + + + +image/svg+xml + + + + + + + diff --git a/site-ui/preview-src/arrow-small-up.svg b/site-ui/preview-src/arrow-small-up.svg new file mode 100644 index 000000000..e1da21f72 --- /dev/null +++ b/site-ui/preview-src/arrow-small-up.svg @@ -0,0 +1,12 @@ + + + + +image/svg+xml + + + + + + + diff --git a/site-ui/preview-src/git-branch.svg b/site-ui/preview-src/git-branch.svg new file mode 100644 index 000000000..f3399d876 --- /dev/null +++ b/site-ui/preview-src/git-branch.svg @@ -0,0 +1,12 @@ + + + + +image/svg+xml + + + + + + + diff --git a/site-ui/preview-src/icon-cloud-integration.svg b/site-ui/preview-src/icon-cloud-integration.svg new file mode 100644 index 000000000..92fe442d9 --- /dev/null +++ b/site-ui/preview-src/icon-cloud-integration.svg @@ -0,0 +1,31 @@ + + + Group + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/site-ui/preview-src/index.adoc b/site-ui/preview-src/index.adoc new file mode 100644 index 000000000..7e2ae242e --- /dev/null +++ b/site-ui/preview-src/index.adoc @@ -0,0 +1,106 @@ += Apache Cassandra Landing Page +:page-layout: tutorials +:page-role: tiles +:!sectids: + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Hybrid + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Fault Tolerant + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Scalable + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Performant + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== You're in control + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Cloud Native + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} diff --git a/site-ui/preview-src/stats-summary.png b/site-ui/preview-src/stats-summary.png new file mode 100644 index 000000000..06c322c3b Binary files /dev/null and b/site-ui/preview-src/stats-summary.png differ diff --git a/site-ui/preview-src/tiles.adoc b/site-ui/preview-src/tiles.adoc new file mode 100644 index 000000000..4137aa185 --- /dev/null +++ b/site-ui/preview-src/tiles.adoc @@ -0,0 +1,18 @@ += Tiles +:page-role: tiles -toc + +== First Tile + +Content of tile. + +== Second Tile + +Content of tile. + +== Third Tile with Longer Title + +Content of tile. + +== Fourth Tile to Wrap It Up and Say G'night + +Content of tile. diff --git a/site-ui/preview-src/tutorials.adoc b/site-ui/preview-src/tutorials.adoc new file mode 100644 index 000000000..7e2ae242e --- /dev/null +++ b/site-ui/preview-src/tutorials.adoc @@ -0,0 +1,106 @@ += Apache Cassandra Landing Page +:page-layout: tutorials +:page-role: tiles +:!sectids: + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Hybrid + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Fault Tolerant + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Scalable + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Performant + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== You're in control + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} + + +[.developer] +== {empty} + +image::icon-cloud-integration.svg[] + +[.title] +=== Cloud Native + +[.content] +==== {empty} + +[.summary] +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras accumsan mi a ligula imperdiet, et cursus lectus bibendum. Etiam metus magna, dapibus vitae ipsum vel, sollicitudin interdum metus. Phasellus quis rutrum enim. Vivamus euismod sem nec posuere accumsan. Praesent elementum lectus vitae magna pellentesque, a facilisis risus pulvinar. Donec scelerisque porttitor aliquet. + +===== {empty} diff --git a/site-ui/preview-src/ui-model.yml b/site-ui/preview-src/ui-model.yml new file mode 100644 index 000000000..90b6b0230 --- /dev/null +++ b/site-ui/preview-src/ui-model.yml @@ -0,0 +1,137 @@ +antoraVersion: '2.3.1' +site: + title: Apache Cassandra + url: http://localhost:5252 + homeUrl: &home_url /tutorials.html + components: + server: + versions: + - version: '4.0' + displayVersion: '4.0 Beta3' + url: /4.0/index.html + - &latest_server + version: '3.11' + title: Apache Cassandra + url: /index.html + navigation: + - content: Introduction + items: + - content: Why Cassandra? + url: '#' + urlType: fragment + - content: What's New? + url: '#' + urlType: fragment + - content: Getting Started + items: + - content: Start Here! + url: '#' + urlType: fragment + - content: Do a Quick Install + url: '#' + urlType: fragment + - content: Developers + items: + - content: Hello World! + url: '#' + urlType: fragment + - content: Users and Security + url: '#' + urlType: fragment + - content: Managing Clusters + items: + - content: Monitoring + url: /server/3.11/monitoring.html + urlType: internal + items: + - content: Monitor Using JMX + url: '#' + urlType: fragment + - content: Monitor Using Prometheus + url: '#' + urlType: fragment + - content: Monitoring Statistics + url: /index.html + urlType: internal + - content: Monitoring Management + url: '#' + urlType: fragment + - content: Troubleshooting + url: '#' + urlType: fragment + items: + - content: General Tips + url: '#' + urlType: fragment + - content: Using Logs + url: '#' + urlType: fragment + - content: Common Errors + url: '#' + urlType: fragment + - content: Core File + url: '#' + urlType: fragment + - content: Installing & Upgrading + items: + - content: Cassandra Installation Home + url: '#' + urlType: fragment + items: + - content: Software, Hardware, and Network Requirements + url: '#' + urlType: fragment + items: + - content: Supported Platforms + url: '#' + urlType: fragment + - content: Swap Space and Kernel Swappiness + url: '#' + urlType: fragment + - content: Running Cassandra in Containers & Orchestration + url: '#' + urlType: fragment + - content: Understanding Cassandra + items: + - content: Overview + url: '#' + urlType: fragment + - content: Data + url: '#' + urlType: fragment + - content: Buckets, Memory, and Storage + url: '#' + urlType: fragment + - content: Services and Indexes + url: '#' + urlType: fragment + - content: content.that.does.not.want.to.wrap.on.its.own + url: '#' + urlType: fragment + - version: '2.2' + url: '#' + - version: '2.1' + url: '#' + latest: *latest_server + elasticsearch-connector: + url: /elasticsearch-connector/3.0/index.html + kafka-connector: + url: /kafka-connector/3.4/index.html + spark-connector: + url: /spark-connector/2.2/index.html + operator: + url: /operator/1.0/overview.html + home: + latest: &home_latest + version: master + title: Home + url: *home_url + versions: + - *home_latest +page: + src: + component: server + version: '3.11' + origin: + url: https://github.com/apache/cassandra-website/site-ui + editUrlPattern: https://github.com/apache/cassandra-website/site-ui/edit/archive/preview-site-src/%s diff --git a/site-ui/src/css/base.css b/site-ui/src/css/base.css new file mode 100644 index 000000000..b203fb147 --- /dev/null +++ b/site-ui/src/css/base.css @@ -0,0 +1,76 @@ +*, +*::before, +*::after { + box-sizing: inherit; +} + +html { + box-sizing: border-box; + text-size-adjust: 100%; +} + +body { + color: var(--color-text); + font-family: "Open Sans", sans-serif; + line-height: 1.5; + margin: 0; +} + +a { + color: var(--color-link); + outline: none; + text-decoration: none; +} + +a:focus, +a:hover { + text-decoration: underline; +} + +button, +input, +select { + font-family: inherit; + outline: none; +} + +button { + cursor: pointer; + font-size: inherit; + line-height: inherit; +} + +button::-moz-focus-inner { + border: 0; +} + +code, +kbd, +pre { + font-family: "Roboto Mono", monospace; +} + +code { + color: var(--color-brand-black); + font-size: 0.9375em; + word-spacing: -0.125em; +} + +html code { + hyphens: none; +} + +b, +strong { + font-weight: var(--weight-medium); +} + +small { + font-size: 0.8em; +} + +.container { + margin: 0 auto; + max-width: var(--width-container); + padding: 0 var(--width-container-gutter); +} diff --git a/site-ui/src/css/body.css b/site-ui/src/css/body.css new file mode 100644 index 000000000..880588292 --- /dev/null +++ b/site-ui/src/css/body.css @@ -0,0 +1,57 @@ +/* NOTE min-width of flex: 1 container = content-width to prevent wrapping by default; min-width: 0 lets content wrap */ +div.body { + display: flex; + margin-top: var(--height-to-body); + /* overflow-wrap: break-word; */ + word-wrap: break-word; /* IE only supports word-wrap, which is an alias of overflow-wrap */ +} + +/* A selector that could possibly be re-used */ +.navbar-margin { + margin-bottom: 68px; + margin-top: var(--height-navbar); +} + +main { + flex: 1; + min-width: 0; +} + +nav.nav { + /* NOTE reserve no space in layout by default */ + flex: 0 0 0%; + visibility: hidden; + /* NOTE width must be set in order for fixed child to inherit */ + width: var(--width-nav); +} + +aside.toc.sidebar { + display: none; + order: 1; + flex: none; + /* NOTE lock flex width to width of container */ + width: var(--width-toc); +} + +@media screen and (min-width: 769px) { + div.body { + min-height: var(--height-min-body); + } + + nav.nav { + visibility: visible; + /* NOTE reserve space for nav in flex layout */ + /* NOTE lock flex width to width of container */ + flex-basis: auto; + } +} + +@media screen and (min-width: 1200px) { + aside.toc.embedded { + display: none; + } + + aside.toc.sidebar { + display: block; + } +} diff --git a/site-ui/src/css/crumbs.css b/site-ui/src/css/crumbs.css new file mode 100644 index 000000000..62fa348f7 --- /dev/null +++ b/site-ui/src/css/crumbs.css @@ -0,0 +1,35 @@ +.crumbs { + color: var(--color-muted); + font-weight: var(--weight-light); + /* effectively a single-line height of 1.5 */ + line-height: 1.35; + padding: 0.075em 0; +} + +.crumbs ul { + flex-wrap: wrap; +} + +.crumbs li { + max-width: 100%; +} + +.crumbs li::after { + content: "/"; + padding: 0 0.25rem; + color: var(--color-muted); +} + +.crumbs li:last-of-type::after { + content: none; +} + +.crumbs a { + color: inherit; + text-decoration: none; +} + +.crumbs a:hover, +.crumbs a:focus { + color: var(--color-link); +} diff --git a/site-ui/src/css/doc.css b/site-ui/src/css/doc.css new file mode 100644 index 000000000..93a416939 --- /dev/null +++ b/site-ui/src/css/doc.css @@ -0,0 +1,785 @@ +.doc p { + margin: 0; +} + +.doc h1, +.doc h2, +.doc h3, +.doc h4, +.doc h5, +.doc h6 { + font-weight: var(--weight-medium); + letter-spacing: -0.025rem; + line-height: 1.2; + margin: 1.5rem 0 -0.25rem; +} + +.doc h1 { + font-size: 2.125rem; + font-weight: var(--weight-light); + letter-spacing: 0; + margin: 2.5rem 0 0; +} + +.doc h1.page { + font-size: 2.375rem; + margin-top: 0; +} + +.doc h2 { + font-size: 1.875rem; + margin: 2.25rem 0 2.5rem; + max-width: fit-content; /* NOTE used to restrict width of key line */ +} + +.doc h2::after { + content: ""; + display: block; + max-width: 5.75rem; + height: 0; + margin-top: 1.0625rem; +} + +.doc h3 { + font-size: 1.5rem; +} + +.doc h4 { + font-size: 1.125rem; +} + +.doc h5 { + font-size: 1rem; +} + +.doc h1 > a.anchor, +.doc h2 > a.anchor, +.doc h3 > a.anchor, +.doc h4 > a.anchor, +.doc h5 > a.anchor, +.doc h6 > a.anchor { + color: var(--color-brand-red); + background-image: -webkit-linear-gradient(-45deg, var(--color-brand-purple), var(--color-brand-red) 75%); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + font-weight: var(--weight-normal); + position: absolute; + text-decoration: none; + width: 2ex; + margin-left: -1.75ex; + visibility: hidden; + transform: scale(0.9); + text-align: center; +} + +.doc h1 a.anchor::before, +.doc h2 a.anchor::before, +.doc h3 a.anchor::before, +.doc h4 a.anchor::before, +.doc h5 a.anchor::before, +.doc h6 a.anchor::before { + content: "\00a7"; +} + +.doc h1:hover a.anchor, +.doc h2:hover a.anchor, +.doc h3:hover a.anchor, +.doc h4:hover a.anchor, +.doc h5:hover a.anchor, +.doc h6:hover a.anchor { + visibility: visible; +} + +.doc i.fa { + font-style: normal; +} + +.doc .underline { + text-decoration: underline; +} + +.doc code { + color: var(--color-text); + font-weight: var(--weight-medium); +} + +.doc pre code { + display: block; + color: inherit; + font-size: inherit; + word-spacing: inherit; +} + +.doc .paragraph, +.doc .olist, +.doc .ulist, +.doc .admonitionblock, +.doc .exampleblock, +.doc .imageblock, +.doc .listingblock, +.doc .literalblock, +.doc .sidebarblock, +.doc .swagger-container, +.doc .verseblock, +.doc .videoblock { + margin-top: 1rem; +} + +.doc .paragraph .title, +.doc .olist .title, +.doc .ulist .title, +.doc .exampleblock .title, +.doc .listingblock .title, +.doc .literalblock .title, +.doc .openblock .title, +.doc caption { + /* font-size: 1.125rem; */ + font-size: 1.0625rem; + font-weight: var(--weight-bold); + letter-spacing: -0.025em; + line-height: 1.2; + margin-bottom: 0.25rem; +} + +.doc hr { + margin: 1rem 0; + border: 0 solid var(--color-border); + border-top-width: 1px; +} + +.doc table.tableblock, +.doc th.tableblock, +.doc td.tableblock { + border: 0 solid var(--color-border-table); +} + +.doc table.tableblock { + border-width: 1px; + border-collapse: collapse; + margin: 1.5rem 0 2rem; +} + +/* TODO drop .spread after upgrading to Asciidoctor 1.5.7 */ +.doc table.spread, +.doc table.stretch { + width: 100%; +} + +.doc caption { + text-align: left; +} + +.doc table.tableblock > tbody { + font-weight: var(--weight-light); +} + +/* NOTE prevent wide tables from exceeding bounds */ +/* TODO could also target table.tableblock[style^="width:"] */ +/* TODO drop .spread after upgrading to Asciidoctor 1.5.7 */ +.doc table.spread > tbody > tr > *, +.doc table.stretch > tbody > tr > * { + /* NOTE setting max-width reactivates overflow-wrap behavior on the table cell */ + max-width: 0; +} + +/* NOTE alternate way to prevent wide tables from exceeding bounds, but can overly compress header cells */ +/* table-layout: fixed strictly enforces table and column widths */ +/* another alternative is to wrap table in div and set overflow-x: auto on wrapper */ +/* +.doc table.spread, +.doc table.stretch { + table-layout: fixed; +} +*/ + +.doc table.tableblock > tbody > tr:nth-of-type(even) { + background-color: var(--color-shade); +} + +.doc th.tableblock { + font-weight: var(--weight-medium); + letter-spacing: -0.025em; +} + +.doc th.tableblock, +.doc td.tableblock { + border-width: 0 1px; + padding: 0.75rem 1rem; +} + +.doc thead th.tableblock { + border-width: 1px; + font-size: 1.0625rem; + line-height: 1.2; +} + +.doc table.layout, +.doc table.layout th.tableblock, +.doc table.layout td.tableblock { + border: 0; +} + +.doc table.layout > tbody > tr:nth-of-type(even) { + background-color: transparent; +} + +.doc p.tableblock + p.tableblock { + margin-top: 1rem; +} + +/* NOTE in Asciidoctor 1.5.7, this div will have a class */ +.doc td.tableblock > div > :first-child { + margin-top: 0; +} + +.doc .halign-left { + text-align: left; +} + +.doc .halign-right { + text-align: right; +} + +.doc .halign-center { + text-align: center; +} + +.doc .valign-top { + vertical-align: top; +} + +.doc .valign-bottom { + vertical-align: bottom; +} + +.doc .valign-middle { + vertical-align: middle; +} + +.doc .admonitionblock > table, +.doc .admonitionblock > table > tbody, +.doc .admonitionblock > table > tbody > tr > td { + display: block; +} + +.doc .admonitionblock > table > tbody > tr { + display: flex; +} + +.doc .admonitionblock td.icon { + padding: 0.5rem 0.375rem 0 0.75rem; +} + +.doc .admonitionblock td.icon i::before { + background: no-repeat 0/cover; + content: ""; + display: block; + height: 1.875rem; + width: 1.875rem; +} + +.doc .admonitionblock td.content { + border-bottom: 1px solid var(--color-brand-silver); + border-right: 1px solid var(--color-brand-silver); + border-top: 1px solid var(--color-brand-silver); + flex: 1; + font-size: 0.9375rem; + hyphens: auto; + line-height: 1.6; + min-width: 0; + padding: 0.75rem; +} + +.doc .admonitionblock td.content > .title { + display: inline; + font-style: italic; +} + +.doc .admonitionblock td.content > .title::after { + content: ""; + display: table; +} + +.doc .admonitionblock td.content::before { + font-weight: var(--weight-medium); +} + +.doc .admonitionblock.caution > table { + background-color: transparent; +} + +.doc .admonitionblock.caution td.icon i::before { + background-image: url(../img/caution.svg); +} + +.doc .admonitionblock.caution td.content::before { + content: "Caution: "; + color: var(--color-brand-orange); +} + +.doc .admonitionblock.caution td.content { + border-left: 6px solid var(--color-brand-orange); +} + +.doc .admonitionblock.important > table { + background-color: transparent; +} + +.doc .admonitionblock.important td.icon i::before { + background-image: url(../img/important.svg); +} + +.doc .admonitionblock.important td.content::before { + content: "Important: "; + color: var(--color-brand-blue); +} + +.doc .admonitionblock.important td.content { + border-left: 6px solid var(--color-brand-blue); +} + +.doc .admonitionblock.note > table { + background-color: transparent; +} + +.doc .admonitionblock.note td.icon i::before { + background-image: url(../img/note.svg); +} + +.doc .admonitionblock.note td.content::before { + content: "Note: "; + color: var(--color-brand-light-blue); +} + +.doc .admonitionblock.note td.content { + border-left: 6px solid var(--color-brand-light-blue); +} + +.doc .admonitionblock.tip > table { + background-color: transparent; +} + +.doc .admonitionblock.tip td.icon i::before { + background-image: url(../img/tip.svg); +} + +.doc .admonitionblock.tip td.content::before { + content: "Tip: "; + color: var(--color-brand-purple); +} + +.doc .admonitionblock.tip td.content { + border-left: 6px solid var(--color-brand-purple); +} + +.doc .admonitionblock.warning > table { + background-color: transparent; +} + +.doc .admonitionblock.warning td.icon i::before { + background-image: url(../img/warning.svg); +} + +.doc .admonitionblock.warning td.content::before { + content: "Warning: "; + color: var(--color-brand-red); +} + +.doc .admonitionblock.warning td.content { + border-left: 6px solid var(--color-brand-red); +} + +.doc .admonitionblock td.content > :first-child { + margin-top: 0; +} + +.doc .imageblock { + display: flex; + flex-direction: column; +} + +.doc .imageblock img { + display: block; + margin-left: auto; + margin-right: auto; + width: 30%; +} + +.doc .imageblock .title { + font-style: italic; + margin-top: 0.5rem; +} + +.doc .imageblock img, +.doc span.image img { + height: auto; + max-width: 100%; +} + +.doc span.image.icon { + line-height: 1; + vertical-align: -0.2em; + display: inline-flex; + padding: 0 0.25ex; +} + +.doc span.image.icon img { + height: 1em; + width: auto; +} + +.doc .abstract blockquote { + font-size: 0.9375rem; + margin: 1rem 0 1.5625rem 0; + font-weight: var(--weight-light); +} + +.doc .abstract blockquote * { + font-weight: inherit; +} + +.doc .abstract blockquote::before { + content: "Summary: "; + color: var(--color-muted); + font-weight: var(--weight-medium); +} + +.doc ul { + margin: 0; + padding: 0 0 0 1.75rem; +} + +.doc ol { + margin: 0; + padding: 0 0 0 2.625rem; +} + +.doc ul.checklist { + padding-left: 0.5rem; + list-style: none; +} + +.doc ul.checklist p > i.fa-check-square-o:first-child, +.doc ul.checklist p > i.fa-square-o:first-child { + display: inline-flex; + justify-content: center; + width: 1rem; + margin-right: 0.25rem; +} + +.doc ul.checklist i.fa-check-square-o::before { + content: "\2713"; +} + +.doc ul.checklist i.fa-square-o::before { + content: "\274f"; +} + +.doc .dlist .dlist, +.doc .dlist .olist, +.doc .dlist .ulist, +.doc .olist .dlist, +.doc .olist .olist, +.doc .olist .ulist, +.doc .ulist .dlist, +.doc .ulist .olist, +.doc .ulist .ulist { + margin-top: 0.5rem; +} + +.doc .olist li + li, +.doc .ulist li + li { + margin-top: 0.5rem; +} + +.doc .ulist .listingblock, +.doc .olist .listingblock, +.doc .admonitionblock .listingblock { + padding: 0; +} + +.doc .exampleblock > .content { + background-color: var(--color-shade); + box-shadow: inset 0 0 1px #bec0c1; + padding: 2rem 2.25rem; +} + +.doc .exampleblock > .content > :first-child { + margin-top: 0; +} + +.doc pre { + font-size: 0.875rem; + line-height: 1.25rem; + margin: 0; +} + +.doc pre:not(.highlight), +.doc pre.highlight code { + background-color: #151514; + color: #f8f8f2; + font-weight: var(--weight-normal); /* needed to override third-party styles */ + padding: 0.625rem; + white-space: pre-wrap; + /* NOTE enable these styles if side-to-side scrolling is preferred */ + /* + overflow-wrap: normal; + word-wrap: normal; + overflow-x: auto; + */ +} + +/* NOTE assume pre.highlight contains code[data-lang] */ +.doc pre.highlight { + position: relative; +} + +.doc .listingblock code[data-lang]::before { + content: attr(data-lang); + color: #f8f8f2; + display: none; + font-size: 0.75em; + font-weight: var(--weight-light); + letter-spacing: 1px; + line-height: 1; + text-transform: uppercase; + position: absolute; + top: 0.375rem; + right: 0.5rem; +} + +.doc .listingblock:hover code[data-lang]::before { + display: block; +} + +.doc .dlist { + margin: 1.5rem 0; +} + +.doc .dlist dl { + margin: 0; +} + +.doc .dlist dt { + font-weight: var(--weight-medium); +} + +.doc .dlist dd + dt { + margin-top: 1.5rem; +} + +.doc .dlist dd { + margin-left: 1.5rem; +} + +.doc .dlist dt + dd { + margin-top: 0.125rem; +} + +.doc .dlist dd > .openblock > .content > :first-child { + margin-top: 0; +} + +.doc .sidebarblock > .content { + border: 1px solid var(--color-border); + padding: 3rem; +} + +.doc .sidebarblock > .content > .title { + font-size: 1.5rem; + font-weight: var(--weight-bold); + line-height: 1.2; + margin-bottom: 2.5rem; +} + +.doc .sidebarblock > .content > .title::after { + content: ""; + display: block; + max-width: 5.75rem; + height: 0; + outline: 1px solid var(--color-brand-orange); + margin-top: 1.25rem; +} + +.doc .sidebarblock > .content > :not(.title):first-child { + margin-top: 0; +} + +.doc .conum[data-value] { + border: 1px solid currentColor; + border-radius: 100%; + display: inline-flex; + font-family: "Roboto", sans-serif; + font-size: 0.75rem; + font-style: normal; + width: 1rem; + height: 1rem; + justify-content: center; + align-items: center; + vertical-align: text-bottom; +} + +.doc .conum[data-value]::after { + content: attr(data-value); +} + +.doc .conum[data-value] + b { + display: none; +} + +.doc pre .conum[data-value] { + background-color: #f8f8f2; + border-color: #f8f8f2; + color: #151514; +} + +.doc .colist { + margin-top: 0.5rem; +} + +.doc .colist td:first-of-type { + line-height: 1.4; + padding: 0 0.5rem; + vertical-align: top; +} + +.doc .colist td:last-of-type { + padding: 0; +} + +.doc b.button { + white-space: nowrap; +} + +.doc b.button::before { + content: "["; + padding-right: 0.25em; +} + +.doc b.button::after { + content: "]"; + padding-left: 0.25em; +} + +.doc kbd { + display: inline-block; + font-size: 0.7rem; + background-color: #fafafa; + border: 1px solid var(--color-border); + border-radius: 0.25em; + box-shadow: 0 1px 0 var(--color-border), 0 0 0 0.1em #fff inset; + padding: 0.25em 0.5em; + vertical-align: text-bottom; + white-space: nowrap; +} + +.doc kbd, +.doc .keyseq { + line-height: 1; +} + +.doc .keyseq { + font-family: "Roboto Mono", monospace; + font-size: 0.9em; +} + +.doc .keyseq kbd { + margin: 0 0.125em; +} + +.doc .keyseq kbd:first-child { + margin-left: 0; +} + +.doc .keyseq kbd:last-child { + margin-right: 0; +} + +.doc .menuseq i.caret::before { + content: ">"; + font-weight: var(--weight-medium); +} + +.body.tiles .doc { + display: flex; + flex-wrap: wrap; + margin-right: -2.5rem; +} + +.body.tiles .doc > h1.page { + flex-basis: 100%; + padding-right: 2.5rem; +} + +.body.tiles .sect1 { + flex-basis: 50%; + padding-right: 2.5rem; + margin-bottom: 0.5rem; +} + +.body.tiles .sect1 > h2 { + font-size: 1.5rem; + font-weight: var(--weight-bold); +} + +.body.tiles .sect1 > h2::after { + margin-top: 1.25rem; +} + +.doc .tabs ul { + display: flex; + flex-wrap: wrap; + list-style: none; + margin: 0 -0.25rem 0 0; + padding: 0; +} + +.doc .tabs li { + align-items: center; + border: 1px solid var(--color-border); + border-bottom: 0; + cursor: pointer; + display: flex; + font-weight: var(--weight-bold); + height: 2.5rem; + line-height: 1; + margin-right: 0.25rem; + padding: 0 1.5rem; + position: relative; +} + +.doc .tabs li + li { + margin-top: 0; +} + +.doc .tabset.is-loading .tabs li:not(:first-child), +.doc .tabset:not(.is-loading) .tabs li:not(.is-active) { + background-color: var(--color-text); + color: var(--color-brand-white); +} + +.doc .tabset.is-loading .tabs li:first-child::after, +.doc .tabs li.is-active::after { + background-color: var(--color-brand-white); + content: ""; + display: block; + height: 3px; /* Chrome doesn't always paint the line accurately, so add a little extra */ + position: absolute; + bottom: -1.5px; + left: 0; + right: 0; +} + +.doc .tabset > .content { + border: 1px solid var(--color-border); + padding: 1.25rem; +} + +.doc .tabset.is-loading .tab-pane:not(:first-child), +.doc .tabset:not(.is-loading) .tab-pane:not(.is-active) { + display: none; +} + +.doc .tab-pane > :first-child { + margin-top: 0; +} diff --git a/site-ui/src/css/docsearch-overrides.css b/site-ui/src/css/docsearch-overrides.css new file mode 100644 index 000000000..89800f70b --- /dev/null +++ b/site-ui/src/css/docsearch-overrides.css @@ -0,0 +1,28 @@ +.algolia-autocomplete .ds-dropdown-menu { + border: 1px solid #d9d9d9; + border-radius: 0; + box-shadow: none; + margin-top: 0.75rem; + max-width: none; + width: 100%; +} + +.algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-] { + border: 0; + border-radius: 0; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions { + overflow: auto; + max-height: calc(100vh - var(--height-navbar) - 2.5rem); + margin: 0 -0.5rem; + padding: 0.5rem; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions::-webkit-scrollbar { + width: 0.25rem; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions::-webkit-scrollbar-thumb { + background-color: var(--color-border); +} diff --git a/site-ui/src/css/feedback.css b/site-ui/src/css/feedback.css new file mode 100644 index 000000000..60245af76 --- /dev/null +++ b/site-ui/src/css/feedback.css @@ -0,0 +1,28 @@ +#atlwdg-trigger.atlwdg-trigger { + display: none; + background: linear-gradient(to right, var(--color-brand-pink) 0%, var(--color-brand-orange) 100%); + border: 0; + border-radius: 3px 3px 0 0; + box-shadow: -2px -4px 30px rgba(0, 0, 0, 0.25); + color: #fff !important; + font-family: inherit; + font-size: 1rem; + line-height: 1; + padding: 0.6875rem 1.5rem 0.5625rem; + right: var(--width-container-gutter); + z-index: var(--z-index-feedback); +} + +.atlwdg-trigger::after { + content: "?"; +} + +@media screen and (min-width: 769px) { + #atlwdg-trigger.atlwdg-trigger { + display: block; + } +} + +#atlwdg-container.atlwdg-popup { + width: 769px; +} diff --git a/site-ui/src/css/footer.css b/site-ui/src/css/footer.css new file mode 100644 index 000000000..0401cf995 --- /dev/null +++ b/site-ui/src/css/footer.css @@ -0,0 +1,274 @@ +.footer { + background-color: var(--color-brand-white); + color: var(--color-footer-text); + padding: 2.5rem 0; + position: relative; + z-index: var(--z-index-footer); +} + +.footer a { + color: inherit; + position: relative; + text-decoration: none; +} + +.footer a::after { + content: ""; + position: absolute; + bottom: -3px; + left: 0; + right: 0; + height: 1px; + background-color: var(--color-footer-text); + transform: scale3d(0, 0.9999, 0.9999); + transition: transform 0.1s; +} + +.footer a.icon::after, +.footer a.btn::after { + content: none; +} + +.footer a:focus::after, +.footer a:hover::after { + transform: scale3d(1, 0.9999, 0.9999); + transition-duration: 0.05s; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); +} + +.footer-links { + display: flex; + flex-wrap: wrap; + max-width: 73.75rem; + margin: 0 auto; + padding-bottom: 2rem; +} + +.footer-links .col { + flex-basis: 33.3333%; + padding-top: 0.625rem; +} + +.footer-links .col:nth-child(1) { + flex-basis: 100%; + padding: 0 0 1.25rem; +} + +.footer-links .col:nth-child(5) { + flex-basis: 100%; +} + +.footer-logo { + padding: 1rem 0; + width: 193px; +} + +.footer-logo img { + width: 100%; + vertical-align: top; +} + +.footer-links p.address { + font-weight: var(--weight-light); + margin: 0; + white-space: pre-wrap; +} + +.footer-links a.white-btn { + background-color: var(--color-footer-text); + color: var(--color-footer-bg); + margin: 1rem 0; + display: inline-block; + /* FIXME this should be max-height 52px */ + padding: 1rem 3.5rem; + border: 2px solid var(--color-footer-text); + font-weight: var(--weight-bold); + font-size: 1.125rem; + letter-spacing: -0.025em; + position: relative; + transition: background-color 0.3s, color 0.3s; + float: left; +} + +.footer-links a.white-btn:focus, +.footer-links a.white-btn:hover { + background-color: var(--color-footer-bg); + color: var(--color-footer-text); +} + +.footer-links a.btn ~ a { + display: inline-block; + font-weight: var(--weight-bold); + float: left; + clear: left; +} + +.footer-links ul { + font-size: 0.875rem; + line-height: 1; + list-style: none; + margin: 0; + padding: 0; + text-transform: uppercase; +} + +.footer-links li { + padding: 0 1rem 0.625rem 0; +} + +.footer-links li .heading { + color: var(--color-brand-red); + font-weight: var(--weight-bold); +} + +.footer-links li .heading, +.footer-links li a { + line-height: 1.7; +} + +.footer-links li a { + font-size: 0.8125rem; +} + +.footer-links .social-icons { + display: flex; + justify-content: center; + flex-wrap: wrap; + margin-top: -4px; +} + +.footer-links .social-icons li { + display: inline-block; + padding: 0; +} + +.footer-links .social-icons a { + display: flex; + align-items: center; + justify-content: center; + width: 45px; + height: 45px; + margin: 0 0.125rem; + border-radius: 50%; + transition: background-color 0.3s ease-in-out; +} + +.footer-links .social-icons svg { + fill: var(--color-footer-text); + width: 35px; + height: 29px; + transition: fill 0.3s ease-in-out; +} + +.footer-links .social-icons a:focus, +.footer-links .social-icons a:hover { + background-color: var(--color-footer-text); +} + +.footer-links .social-icons a:focus svg, +.footer-links .social-icons a:hover svg { + fill: var(--color-footer-bg); +} + +.footer-terms { + font-size: 0.875rem; + line-height: 1; + border-top: 1px solid var(--color-footer-text); + padding: 2.125rem 0 0.3125rem; + max-width: 71.25rem; + margin: 0 auto; + text-align: center; +} + +.footer-terms span { + display: block; +} + +.footer-terms a { + font-weight: var(--weight-light); + display: inline-block; + margin: 0.625rem 0.3125rem; +} + +@media screen and (min-width: 769px) { + .footer-links .col { + flex-basis: 22%; + } + + .footer-links .col:nth-child(1) { + flex-basis: 34%; + } + + .footer-links .col:nth-child(5) { + flex-basis: 100%; + } + + .footer-links li { + padding-bottom: 0.5rem; + } + + .footer-terms span { + display: inline-block; + margin-right: 0.5rem; + } + + .footer-terms a { + display: inline-block; + margin-right: 1rem; + } +} + +@media screen and (min-width: 1024px) { + footer.footer { + padding: 1rem 0 1rem; + } + + .footer-links { + padding-top: 2rem; + } + + .footer-links .col { + flex-basis: 17%; + } + + .footer-links .col:nth-child(1) { + flex-basis: 27%; + padding: 0 1.25rem; + } + + .footer-links .col:nth-child(2) { + flex-basis: 16%; + } + + .footer-links .col:nth-child(5) { + flex-basis: 23%; + } + + .footer-links li a { + font-size: inherit; + } + + .footer-links .social-icons { + justify-content: flex-end; + padding-right: 0.75rem; + } + + .footer-terms { + text-align: left; + } + + .footer-terms span { + margin-left: 0.125rem; + } +} + +@media screen and (min-width: 1200px) { + .footer-links .social-icons a { + width: 52px; + height: 52px; + } + + .footer-links .social-icons svg { + width: 50px; + } +} diff --git a/site-ui/src/css/header.css b/site-ui/src/css/header.css new file mode 100644 index 000000000..755a7bc11 --- /dev/null +++ b/site-ui/src/css/header.css @@ -0,0 +1,390 @@ +.navbar { + background-color: var(--color-navbar-bg); + height: var(--height-navbar); + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: var(--z-index-navbar); + line-height: 1; +} + +.navbar .container { + display: flex; + align-items: center; + height: inherit; +} + +.navbar, +.navbar a { + color: var(--color-navbar-text); +} + +.navbar a:focus, +.navbar a:hover { + text-decoration: none; +} + +.navbar-brand { + display: flex; + align-items: center; + flex: 1; /* FIXME this works without this property in antora-ui-default */ +} + +.navbar-brand .navbar-item { + display: flex; + height: 42px; + transition: height 0.5s ease; +} + +.navbar-brand img { + height: 100%; +} + +.navbar-burger { + width: 30px; + height: 19px; + background: none; + border: 0; + position: relative; + margin-left: auto; + padding: 0; +} + +.navbar-burger span { + display: block; + width: inherit; + height: 4px; + background: var(--color-navbar-text); + margin-bottom: 3px; + transition: all 0.3s ease-in-out; + transform: translate3d(0, 0, 0); +} + +.navbar-burger.is-active span { + position: absolute; +} + +.navbar-burger.is-active span:nth-child(1) { + top: 8px; + transform: rotate(135deg); +} + +.navbar-burger.is-active span:nth-child(2) { + opacity: 0; +} + +.navbar-burger.is-active span:nth-child(3) { + top: 8px; + transform: rotate(-135deg); +} + +.navbar-menu { + display: none; + flex-grow: 1; +} + +.navbar-start { + flex-grow: 1; + display: flex; + align-items: center; + font-size: 1.0625rem; +} + +.navbar-start > a.navbar-item, +.navbar-start > .navbar-item > .navbar-link { + display: block; + padding: 0.75rem 1.25rem; + text-transform: uppercase; + letter-spacing: -0.025em; + position: relative; +} + +.navbar-item.has-dropdown-click label.navbar-dropper { + display: block; + padding: 0.75rem 1.25rem; + text-transform: uppercase; + letter-spacing: -0.025em; + position: relative; +} + +.navbar-item.has-dropdown-click label.navbar-dropper::after { + content: ""; + position: absolute; + background: url(../img/caret-down.svg) no-repeat 50% 50%; + width: 0.875rem; + height: 1rem; + margin-left: 0.25rem; +} + +.navbar-item.has-dropdown-click input.navbar-dropper { + display: none; +} + +.navbar-dropdown { + margin: 0 1.25rem; +} + +.navbar-dropdown a { + position: relative; +} + +.navbar-dropdown a::after { + content: ""; + position: absolute; + bottom: -3px; + left: 0; + right: 0; + height: 1px; + background-color: var(--color-navbar-text); + transform: scale3d(0, 0.9999, 0.9999); + transition: transform 0.1s; +} + +.navbar-dropdown a:focus::after, +.navbar-dropdown a:hover::after { + transform: scale3d(1, 0.9999, 0.9999); + transition-duration: 0.05s; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); +} + +.navbar-dropdown.explore .title { + display: none; +} + +.navbar-dropdown ul { + list-style: none; + padding: 0.5rem 0; + margin: 0; +} + +.navbar-dropdown ul.two-cols { + display: grid; + grid-template-columns: repeat(2, auto); + align-content: flex-start; +} + +.navbar-dropdown li { + padding: 0.75rem 1rem 0.75rem 1.25rem; +} + +.navbar-link .version { + text-transform: none; +} + +.navbar-dropdown li.heading { + font-weight: var(--weight-bold); +} + +.navbar-dropdown li.current { + font-weight: var(--weight-medium); +} + +.navbar-dropdown li.current a.navbar-item::before { + content: "\2023"; + position: absolute; + left: -0.75em; +} + +.navbar-dropdown ul.two-cols li.heading { + grid-column: 1 / 3; +} + +@media screen and (min-width: 1024px) { + .navbar-start > a.navbar-item:hover, + .navbar-start > .navbar-item:hover > .navbar-link { + color: var(--color-brand-red); + } + + .navbar-dropdown { + visibility: hidden; + position: absolute; + margin-left: 1.25rem; /* FIXME can we get rid of this? */ + box-shadow: -5px 6px 10px rgba(25, 24, 24, 0.6); + opacity: 0; + } + + .navbar-dropdown.explore .title { + display: block; + font-weight: var(--weight-bold); + padding: 1rem 1.25rem 0.75rem; + background-color: #151514; + } + + .navbar-dropdown .cols { + display: flex; + background: #333 linear-gradient(#333 0%, #201a19 100%); + } + + .navbar-dropdown .cols ul + ul { + border-left: 3px solid #151514; + } + + .navbar-dropdown.versions li { + padding-right: 1.25rem; /* FIXME make this stretch to size of parent */ + } +} + +.navbar-start .navbar-item.search { + flex-grow: 1; + justify-content: flex-end; + display: flex; + padding-right: 1.25rem; +} + +.navbar-menu:not(.is-active) .reveal-search-input > .navbar-item:not(.search) { + display: none; +} + +.navbar-start input.query { + flex-grow: 1; + background-color: var(--color-navbar-bg); + color: inherit; + padding: 0.5rem 0.25rem; + font-size: 1rem; + font-weight: var(--weight-light); + border: 0; +} + +.navbar-start input.query::placeholder { + color: inherit; + padding-left: 0.125rem; +} + +.reveal-search-input .algolia-autocomplete { + flex-grow: 1; + display: flex !important; + align-items: center; + border-bottom: 1px solid var(--color-navbar-text); +} + +.reveal-search-input .algolia-autocomplete::before { + content: ""; + background: url(../img/search.svg) 50% 50% no-repeat; + background-size: cover; + border: 0; + height: 1rem; + width: 1rem; +} + +.navbar-start:not(.reveal-search-input) input.query { + display: none; +} + +.navbar-start button.search { + align-items: center; + background: none; + border: 0; + display: flex; + font-size: 1.125rem; + padding: 0 0.5rem; +} + +.navbar-start button.search::before { + content: ""; + background: url(../img/search.svg) 50% 50% no-repeat; + background-size: cover; + border: 0; + height: 1.25rem; + width: 1.25rem; +} + +.navbar-start button.search span { + color: var(--color-navbar-text); + padding-left: 0.5rem; +} + +.reveal-search-input button.search::before { + background: url(../img/search-close.svg) 50% 50% no-repeat; +} + +.navbar .red-btn { + border: 2px solid var(--color-brand-red); + background-color: var(--color-brand-red); + color: inherit; + font-weight: var(--weight-bold); + font-size: 1.125rem; + height: 3.25rem; + width: 9.75rem; /* equal to 1.875rem padding on sides */ + display: inline-flex; + align-items: center; + justify-content: center; + letter-spacing: -0.025em; + transition: background-color 0.3s; +} + +.navbar .red-btn:focus, +.navbar .red-btn:hover { + background-color: var(--color-navbar-bg); + color: var(--color-navbar-text); +} + +/* mobile menu */ + +html.is-clipped--navbar { + overflow-y: hidden; +} + +.navbar-menu.is-active { + display: block; + background-color: var(--color-navbar-bg); + position: absolute; + top: var(--height-navbar); + left: 0; + right: 0; + padding: 1.25rem 0; + height: calc(100vh - var(--height-navbar)); + overflow-y: auto; +} + +.navbar-menu.is-active .navbar-start { + display: block; +} + +.navbar-menu.is-active .navbar-item.search { /* FIXME quick hacks to get something working */ + justify-content: center; + padding-left: 1.25rem; + height: 2.5rem; +} + +.navbar-menu.is-active .navbar-end { + text-align: center; + margin: 1rem 0; +} + +@media screen and (min-width: 1024px) { + .navbar-brand { + flex: none; + margin-right: 1.25rem; + } + + .navbar-brand .navbar-item { + height: 47px; + width: 165px; + justify-content: center; + } + + .navbar-burger { + display: none; + } + + .navbar-menu { + display: flex; + } + + .navbar-item.has-dropdown-click input.navbar-dropper:checked + .navbar-dropdown { + visibility: visible; + opacity: 1; + } + + .navbar-item.has-dropdown-click input.navbar-dropper:checked + .navbar-dropdown::before { + content: ""; + display: block; + position: absolute; + top: -2.5rem; + right: 0; + left: 0; + height: 2.5rem; + z-index: var(--z-index-navbar-dropdown); + } +} diff --git a/site-ui/src/css/highlight.css b/site-ui/src/css/highlight.css new file mode 100644 index 000000000..451eb26d3 --- /dev/null +++ b/site-ui/src/css/highlight.css @@ -0,0 +1,61 @@ +/** +Steps to change the highlight.js theme: + +1. View available themes on https://highlightjs.org/static/demo/ +2. Find CSS file for the theme to use https://github.com/highlightjs/highlight.js/tree/master/src/styles +3. Copy the theme's CSS rules here (exclude rules for the `.hljs` selector otherwise it will conflict with other site styles) +4. Replace the `bold` keywoard with `var(--weight-medium)` (because the monospace font we're using has multiple weights and we don't want to use the really strong one). + +Other CSS rules for styling code blocks: + +- In doc.css, the `.doc pre:not(.highlight), .doc pre.highlight code` selector sets the code block `color` and `background-color` +- In docs.css, the `.doc .listingblock code[data-lang]::before` selector sets the language label `color` +*/ +.hljs-comment, +.hljs-meta { + color: #708090; +} + +.hljs-keyword, +.hljs-selector-tag { + color: #66d9ef; + font-weight: var(--weight-medium); +} + +.hljs-subst { + color: #66d9ef; +} + +.hljs-number, +.hljs-literal, +.hljs-variable, +.hljs-tag .hljs-attr { + color: #ae81ff; +} + +.hljs-string, +.hljs-doctag { + color: #a6e22e; +} + +.hljs-tag, +.hljs-attribute { + color: #f92672; +} + +.hljs-built_in, +.hljs-function .hljs-title { + color: #e6db74; +} + +.hljs-regexp { + color: #fd971f; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: var(--weight-medium); +} diff --git a/site-ui/src/css/home.css b/site-ui/src/css/home.css new file mode 100644 index 000000000..da378b379 --- /dev/null +++ b/site-ui/src/css/home.css @@ -0,0 +1,474 @@ + +.paragraph { + margin-top: 1.25rem; +} + +.home .paragraph.hint { + font-size: 0.9375rem; +} + +.home .ulist li + li { + margin-top: 0; +} + +.home h1, +.home h2, +.home h3 { + font-weight: var(--weight-bold); + line-height: 1.2; + margin: 0; + max-width: none; + letter-spacing: 0; +} + +.home h1.page { + font-size: 2.375rem; + margin-top: 3.5rem; + text-align: center; +} + +.home #preamble { + margin: 1rem 0 3rem; + font-size: 1.125rem; + text-align: center; +} + +.home .card ul, +.home .tile ul { + line-height: 1.25; + list-style: none; + margin: 0; + padding: 0; +} + +.home .card li, +.home .tile li { + padding: 0.25em 0; +} + +.home .card a, +.home .tile a { + text-decoration: none; + position: relative; +} + +.home .card a::before, +.home .tile a::before { + content: ""; + position: absolute; + bottom: -3px; + left: 0; + right: 0; + height: 1px; + background-color: currentColor; + transform: scale3d(0, 0.9999, 0.9999); + transition: transform 0.1s; +} + +.home .card a:focus::before, +.home .card a:hover::before, +.home .tile a:focus::before, +.home .tile a:hover::before { + transform: scale3d(1, 0.9999, 0.9999); + transition-duration: 0.05s; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); +} + +.home .conceal-title > h2 { + display: none; +} + +.home .cards > .sectionbody { + display: flex; + flex-direction: column; + flex-wrap: wrap; + margin: -0.75rem 0 0 -0.75rem; +} + +.home .card { + color: #fff; + display: flex; + flex: 1 0 auto; + flex-direction: column; + margin: 0.75rem 0 0 0.75rem; + min-width: 0; + padding: 1.25rem; +} + +.home .card:nth-of-type(1) { + background-color: var(--color-brand-pink); +} + +.home .card:nth-of-type(2) { + background-color: var(--color-brand-green); +} + +.home .card:nth-of-type(3) { + background-color: var(--color-brand-light-blue); +} + +.home .card:nth-of-type(4) { + background-color: var(--color-brand-orange); +} + +.home .card h3 { + font-size: 1.5rem; + line-height: 2rem; + padding: 0 0 1rem; + position: relative; +} + +.home .card h3::after { + content: ""; + display: block; + position: absolute; + max-width: 5.75rem; + height: 0; + left: 0; + right: 0; + bottom: 0; + border: 1px solid currentColor; + width: 25%; +} + +.home .card .paragraph { + font-weight: var(--weight-light); + margin-top: 1rem; + flex: 1 0 auto; /* NOTE must specify flex-basis for IE 11 */ +} + +.home .card .ulist { + font-weight: var(--weight-bold); + margin: 1.5rem 0 0.5rem; + /* overflow-wrap: normal; */ + word-wrap: normal; /* NOTE not sure why this is required, but without it, list items get spaced apart */ +} + +.home .card a, +.home .tile .title a { + color: inherit; +} + +.home .card a::after { + content: "\00a0>"; + width: 0; + display: inline-block; +} + +.home .tiles { + font-size: 1.125rem; + margin-top: 2.5rem; +} + +.home .tiles > h2 { + font-size: 2rem; + hyphens: auto; + padding-bottom: 3.125rem; + text-align: center; +} + +.home .tiles > h2::after { + content: ""; + display: block; + background-color: currentColor; + width: 5.625rem; + height: 1px; + margin: 0 auto; + margin-bottom: -1px; + position: relative; + top: 1.375rem; + outline: none; +} + +.home .tiles > .sectionbody { + display: flex; + flex-wrap: wrap; +} + +.home .tile { + flex: 1; + margin-top: 0; + margin-bottom: 0.25rem; + min-width: 0; + flex-basis: auto; + width: 50%; +} + +.home .tile .title { + font-weight: var(--weight-bold); + font-size: inherit; + line-height: inherit; + letter-spacing: 0; + margin-bottom: 0.25rem; +} + +@media screen and (min-width: 769px) { + .home h1.page, + .home #preamble { + margin-left: 0.5rem; + margin-right: 0.5rem; + } + + .home .cards > .sectionbody { + flex-direction: row; + } + + .home .card { + flex-basis: 0; + } + + .home .cards-4 .card { + flex-basis: calc(50% - 0.75rem); + } + + .home .tiles { + padding: 0 10%; + } + + .home .tiles > .sectionbody { + margin: 0 -0.5rem; + } + + .home .tile { + flex-basis: 0; + margin-bottom: 0; + padding: 0 0.5rem; + } +} + +@media screen and (min-width: 1024px) { + .home .cards-4 .card { + flex-basis: 0; + } +} + +/** CSS rules for tiles on the tutorial list page **/ + +/* override existing */ +.tutorials.body.tiles .doc { + justify-content: flex-start; + margin-right: 0; +} + +.tutorials .container { + min-height: 100%; +} + +/* 1 column layout on small screens */ +@media screen and (max-width: 768px) { + .tutorials.body.tiles .sect1 { + flex-basis: 100%; + } +} + +/* 3 column layout on large screens */ +@media screen and (min-width: 769px) { + .tutorials.body.tiles .sect1 { + flex-basis: calc(33% - 40px); + } +} + +.tutorials.body.tiles .sectionbody { + /* add padding immediately inside the card */ + display: flex; + height: calc(100% - 55px); + flex-direction: column; + justify-content: space-between; + padding: 0 0.9375rem; + margin-left: auto; + margin-right: auto; + text-align: center; +} + +.tutorials.body.tiles > .sectionbody > img { + display: flex; +} + +/* target the tutorial title */ +.tutorials.body.tiles .sectionbody > div.paragraph { + font-size: 1.3rem; +} + +mark { + /* background: #f4f8fd; */ +} + +.tutorials.body.tiles .sect1 { + margin: 0 0 1.25rem 0; + /* override property defined in .body.tiles .sect1 */ + padding-right: 0; + margin-right: 1.25rem; +} + +.tutorials.body.tiles .sect1 > h2 { + color: white; + line-height: 2.2rem; + min-width: 100%; + text-align: center; +} + +.tutorials.body.tiles .sect2 { + display: flex; + flex-direction: column; +} + +.tutorials.body.tiles .sect2.title { + height: 100%; +} + +/* fix the height. some titles span over 2 lines */ +.tutorials.body.tiles .title { + /* height: 62px; */ +} + +/* fix the height. tutorial summaries vary in length */ +.tutorials.body.tiles .sect2 .summary p { + height: 100%; +} + +.tutorials.body.tiles .sect1.developer > h2 { + background-color: #fff; +} + +.tutorials.body.tiles .sect1.architect > h2 { + background-color: #fff; +} + +.tutorials.body.tiles .sect1.devops > h2 { + background-color: #fff; +} + +/* layout tutorial languages and components */ +.tutorials.body.tiles .sect3 { + display: flex; + flex-direction: column; + height: 100%; + margin-bottom: 0; +} + +.tutorials.body.tiles .sect2.metadata { + align-content: flex-end; + border-top: 1px solid var(--color-brand-silver); + display: flex; + justify-content: flex-end; +} + +.metadata { + padding: 0.625rem; +} + +.metadata h3 { + font-size: 1rem; + font-weight: normal; +} + +.grad::before { + content: ""; + position: absolute; + height: 100%; + width: 100%; + background-image: url(../img/noise.png); + background-repeat: repeat; + opacity: 0.11; + z-index: -1; +} + +.grad { + background: linear-gradient(50deg, var(--color-brand-light-green) 0%, var(--color-brand-light-blue) 100%); + position: relative; + width: 100%; + z-index: -2; + align-items: center; + justify-content: center; +} + +.tutorials.body.tiles .sect3 h4 { + font-size: 1.1rem; + font-weight: normal; + margin-top: 0.5rem; +} + +.tutorials.body.tiles .sect5 { + margin: 5px 0; + width: 50%; +} + +.tutorials.body.tiles .sect5 h6 { + font-size: 1rem; + margin: 0; +} + +.tutorials .banner { + color: white; + height: calc(440px - var(--height-navbar)); +} + +.tutorials .banner h1 { + font-size: 2.375rem; + margin: 0; + padding: 0; + text-align: center; +} + +.tutorials .banner h2 { + font-weight: 300; + font-size: 1.75rem; + margin: 0; + padding: 14px 0 17px 0; + text-align: center; +} + +.tutorials .banner-content { + display: flex; + align-items: center; +} + +.tutorials footer { + margin-top: 3rem; +} + +.searchbar { + background-color: var(--color-navbar-bg); + height: 4.2rem; + line-height: 1; +} + +.searchbar .container { + display: flex; + align-items: center; + height: inherit; +} + +.searchbar, +.searchbar a { + color: var(--color-navbar-text); +} + +.searchbar-menu { + display: flex; + flex-grow: 1; +} + +.searchbar-start { + flex-grow: 1; + display: flex; + align-items: center; + font-size: 1.0625rem; +} + +.searchbar-start .searchbar-item.search { + flex-grow: 1; + justify-content: flex-end; + display: flex; + padding-right: 1.25rem; +} + +.searchbar-start input.query { + flex-grow: 1; + background-color: var(--color-navbar-bg); + color: inherit; + padding: 0; + font-size: 1.3rem; + font-weight: var(--weight-light); + border: 0; +} diff --git a/site-ui/src/css/labels.css b/site-ui/src/css/labels.css new file mode 100644 index 000000000..9f82663da --- /dev/null +++ b/site-ui/src/css/labels.css @@ -0,0 +1,74 @@ +.doc .labels ul, +.doc .labels p { + display: flex; + list-style: none; + margin: 0; + padding: 0.125rem 0 0; +} + +.doc .labels li, +.doc .labels span { + display: block; + font-size: var(--labels-font-size); + font-weight: var(--weight-bold); + line-height: var(--labels-line-height); + position: relative; +} + +.doc span.edition, +.doc span.status { + max-width: fit-content; + font-size: var(--labels-font-size); + font-weight: var(--weight-bold); + line-height: var(--labels-line-height); +} + +.doc .labels li:first-child::before, +.doc .labels span:first-child::before { + content: ""; + display: block; + position: absolute; + left: calc(50% - 0.5rem); + border: 0.5rem solid transparent; + top: -1rem; +} + +.doc .labels li > *, +.doc span.edition, +.doc span.status { + color: #fff; + display: inline-block; + padding: 0.375em 1em 0.3em; + text-transform: uppercase; +} + +.doc .labels li a, +.doc span.edition a, +.doc span.status a { + text-decoration: none; +} + +.doc span.edition *, +.doc span.status * { + color: inherit; +} + +.doc .labels li.edition, +.doc span.edition { + background-color: var(--color-brand-light-blue); +} + +.doc .labels li.edition::before, +.doc .labels span.edition::before { + border-bottom-color: var(--color-brand-light-blue); +} + +.doc .labels li.status, +.doc span.status { + background-color: var(--color-brand-orange); +} + +.doc .labels li.status:first-child::before, +.doc .labels span.status:first-child::before { + border-bottom-color: var(--color-brand-orange); +} diff --git a/site-ui/src/css/main.css b/site-ui/src/css/main.css new file mode 100644 index 000000000..e0ee1bbf2 --- /dev/null +++ b/site-ui/src/css/main.css @@ -0,0 +1,119 @@ +main { + padding-bottom: 4rem; +} + +/* IMPORTANT for this to work, the element cannot be display: flex and cannot have padding top or border top */ +main [id]::before { + content: ""; + display: inherit; + height: var(--height-to-body); + margin-top: calc(-1 * var(--height-to-body)); + visibility: hidden; + width: 0; +} + +main table[id]::before { + display: block; +} + +main a[id]::before, +main code[id]::before, +main em[id]::before, +main span[id]::before, +main strong[id]::before { + display: inline-block; + position: relative; + top: -0.75em; +} + +@supports (-moz-appearance: none) { + main a[id], + main code[id], + main em[id], + main span[id], + main strong[id] { + border-top: var(--height-to-body) solid transparent; + pointer-events: none; /* NOTE don't allow border to cover preceding lines */ + } +} + +main blockquote, +main p { + hyphens: auto; /* NOTE in Chrome, hyphens: auto is only supported on macOS and Android */ +} + +.article-banner { + align-items: center; + background: linear-gradient(to right, var(--color-brand-purple) 0%, var(--color-brand-light-blue) 100%); + color: #fff; + display: flex; + line-height: 1.2; + margin: calc(-1 * var(--height-spacer)) calc(-1 * var(--width-container-gutter)) var(--height-spacer); + padding: 1rem 1.25rem; +} + +.article-banner p { + font-size: 1.125rem; + margin: 0 1rem 0 0; +} + +.article-banner a.btn { + color: #fff; + border: 1px solid #fff; + display: block; + font-weight: var(--weight-bold); + letter-spacing: -0.025em; + line-height: 1; + margin-left: auto; + padding: 0.875rem 1.5rem; + text-decoration: none; + transition: background-color 0.3s, color 0.3s; + white-space: nowrap; +} + +.article-banner a.btn:focus, +.article-banner a.btn:hover { + background-color: #fff; + color: var(--color-brand-purple); +} + +.article-header { + display: flex; + font-size: 0.875rem; + line-height: 1; + margin-bottom: 0.5rem; + align-items: center; +} + +.article-header ul { + display: flex; + list-style: none; + margin: 0; + padding: 0; +} + +.article-header .crumbs { + flex: 1; + min-width: 0; +} + +@media screen and (min-width: 769px) { + main.article { + padding-right: var(--width-main-gutter); + padding-left: var(--width-main-gutter); + } + + main.home { + padding-right: var(--width-container-gutter); + padding-left: var(--width-container-gutter); + } + + .article-banner { + margin: 0 calc(-1 * var(--width-main-gutter)) 1rem; + } + + .article-header { + margin-bottom: 0; + align-items: flex-start; + } +} diff --git a/site-ui/src/css/nav.css b/site-ui/src/css/nav.css new file mode 100644 index 000000000..683d38ca6 --- /dev/null +++ b/site-ui/src/css/nav.css @@ -0,0 +1,151 @@ +.nav-menu { + background-color: var(--color-shade); + position: fixed; + width: inherit; + overflow-y: scroll; + font-size: 0.875rem; + line-height: 1.35; + scrollbar-width: thin; + scrollbar-color: var(--color-border) transparent; +} + +.nav-menu::-webkit-scrollbar { + width: 0.25rem; +} + +.nav-menu::-webkit-scrollbar-thumb { + background-color: var(--color-border); +} + +@media screen and (max-width: 768px) { + .nav-menu { + top: 0; + bottom: 0; + left: 0; + transform: translateX(-100%); + z-index: var(--z-index-nav-mobile); + } +} + +@media screen and (min-width: 769px) { + .nav-menu { + top: var(--height-to-body); + margin-bottom: 1.5rem; + height: var(--height-nav); + z-index: var(--z-index-nav); + } +} + +.nav-menu .nav-line, +.nav-menu .nav-link, +.nav-menu .nav-text { + display: block; +} + +.nav-menu a.nav-link { + color: var(--color-text); + text-decoration: none; +} + +.nav-menu a.nav-link:focus, +.nav-menu a.nav-link:hover { + color: var(--color-link); +} + +.nav-menu .nav-list { + list-style: none; + margin: 0; + padding: 0; +} + +.nav-menu > .nav-list { + margin: 1rem 0.5rem 1rem 0.75rem; +} + +.nav-menu .nav-item .nav-list { + /* + margin-top: -0.0625rem; + margin-bottom: 0.625rem; + */ + margin-bottom: 0.75rem; +} + +.nav-menu .nav-item:not(.is-active) > .nav-list { + display: none; +} + +.nav-menu .nav-item { + margin-left: 0.75em; +} + +/* NOTE navigation list without a category */ +.nav-item[data-depth="0"] > .nav-list:first-child { + margin-left: -0.75em; +} + +.nav-menu .nav-item[data-depth="0"] + .nav-item { + margin-top: 1.25em; +} + +.nav-menu .nav-item .nav-item { + margin-top: 0.5em; +} + +.nav-item[data-depth="0"] > .nav-line { + text-transform: uppercase; +} + +.nav-menu .nav-item.is-current-page > .nav-line > a.nav-link { + background: linear-gradient(to right, var(--color-brand-orange) 0%, var(--color-brand-orange) 100%) no-repeat bottom / 100% 1.5px; + color: var(--color-text); + font-weight: var(--weight-medium); +} + +.nav-menu .nav-toggle { + background: none; + border: 0; + padding: 0; + position: absolute; + margin-left: -1.25em; + width: 1.25em; + color: var(--color-border); + transform: scale(1.25); +} + +.nav-menu .nav-toggle::before { + content: "+"; +} + +.nav-menu .nav-item.is-active > .nav-line > .nav-toggle::before { + content: "-"; +} + +/* mobile menu */ +html.is-clipped--nav { + overflow-y: hidden; +} + +.nav.is-active { + visibility: visible; +} + +.nav.is-active .nav-menu { + transform: translateX(0); + transition: transform 0.2s; +} + +.nav-control { + border: 0; + padding: 0; + width: 1.5em; + height: 1.5em; + margin-right: 0.5rem; + background: url(../img/menu.svg) no-repeat center; + background-size: 75%; +} + +@media screen and (min-width: 769px) { + .nav-control { + display: none; + } +} diff --git a/site-ui/src/css/optanon.css b/site-ui/src/css/optanon.css new file mode 100644 index 000000000..0bd024f13 --- /dev/null +++ b/site-ui/src/css/optanon.css @@ -0,0 +1,6 @@ +.optanon-alert-box-wrapper, +#optanon-popup-wrapper { + position: fixed; + overflow: hidden; + width: 0; +} diff --git a/site-ui/src/css/site.css b/site-ui/src/css/site.css new file mode 100644 index 000000000..5e33e1708 --- /dev/null +++ b/site-ui/src/css/site.css @@ -0,0 +1,17 @@ +@import "vars.css"; +@import "base.css"; +@import "body.css"; +@import "nav.css"; +@import "toc.css"; +@import "main.css"; +@import "crumbs.css"; +@import "toolbar.css"; +@import "labels.css"; +@import "doc.css"; +@import "swagger-ui.css"; +@import "home.css"; +@import "header.css"; +@import "footer.css"; +@import "optanon.css"; +@import "highlight.css"; +@import "feedback.css"; diff --git a/site-ui/src/css/swagger-ui.css b/site-ui/src/css/swagger-ui.css new file mode 100644 index 000000000..cdd55f439 --- /dev/null +++ b/site-ui/src/css/swagger-ui.css @@ -0,0 +1,60 @@ +.doc .swagger-ui .topbar, +.doc .swagger-ui .wrapper.information-container { + display: none; +} + +.doc .swagger-ui .wrapper { + padding: 0; + max-width: none; +} + +.doc .swagger-ui .scheme-container { + box-shadow: none; + margin: 0; + padding: 0; +} + +.doc .swagger-ui .scheme-container .schemes > label { + margin: 0; +} + +.doc .swagger-ui .opblock .opblock-summary-method { + word-wrap: normal; +} + +/* NOTE long paths are unwieldy; time to get forceful */ +.doc .swagger-ui .opblock .opblock-summary-path { + word-break: break-all; +} + +.doc .swagger-ui .opblock-body select { + min-width: auto; +} + +.doc .swagger-ui .opblock-tag-section { + display: block; +} + +.doc .swagger-ui .models .model-container { + background: none !important; +} + +.doc .swagger-ui .models .model-box { + background-color: rgba(0, 0, 0, 0.05); + position: relative; + transition: background-color 0.5s; +} + +.doc .swagger-ui .models .model-box:hover { + background-color: rgba(0, 0, 0, 0.07); +} + +.doc .docs-ui a { + border: 0; +} + +.doc .docs-ui p, +.doc .docs-ui ul, +.doc .docs-ui pre { + margin-top: 1rem; /* NOTE quick hack to space out content in config UI */ +} diff --git a/site-ui/src/css/toc.css b/site-ui/src/css/toc.css new file mode 100644 index 000000000..1b629e800 --- /dev/null +++ b/site-ui/src/css/toc.css @@ -0,0 +1,76 @@ +.toc { + font-size: 0.875rem; +} + +.toc-menu { + border: 0 solid var(--color-brand-red); + border-width: 0 0 0 0.25rem; + border-image: linear-gradient(var(--color-brand-purple), var(--color-brand-pink)) 0 0 0 100%; + margin-bottom: 1.5rem; +} + +.toc.sidebar .toc-menu { + position: fixed; /* for IE */ + width: inherit; /* for position: fixed */ + position: sticky; /* stylelint-disable-line declaration-block-no-duplicate-properties */ + top: var(--height-to-body); + max-height: var(--height-nav); + overflow-y: auto; + -ms-overflow-style: none; + scrollbar-width: none; +} + +.toc-menu::-webkit-scrollbar { + width: 0; +} + +.toc.embedded .toc-menu { + margin-top: 1rem; +} + +.toc .toc-menu h3 { + font-size: inherit; + font-weight: var(--weight-medium); + margin: 0; + line-height: 1; +} + +.toc .toc-menu ul { + list-style: none; + margin: 0; + padding: 0; + line-height: 1.2; +} + +.toc .toc-menu h3, +.toc .toc-menu ul { + margin-left: 0.75rem; +} + +.toc .toc-menu li { + margin: 0.5rem 0 0; +} + +.toc a { + color: inherit; + display: block; + text-decoration: none; +} + +.toc a:hover { + color: var(--color-link); +} + +.toc a.is-active { + font-weight: var(--weight-medium); + letter-spacing: -0.008em; +} + +.toc a.is-active:focus, +.toc a.is-active:hover { + color: inherit; +} + +.toc code { + line-height: 1.125; +} diff --git a/site-ui/src/css/toolbar.css b/site-ui/src/css/toolbar.css new file mode 100644 index 000000000..9397b8ef9 --- /dev/null +++ b/site-ui/src/css/toolbar.css @@ -0,0 +1,17 @@ +.tools .edit { + width: 1.5em; + height: 1.5em; +} + +.tools .edit a { + display: inline-block; + width: 100%; + height: 100%; + text-indent: 100%; + white-space: nowrap; + overflow: hidden; + background: url(../img/edit.svg) no-repeat center; + background-size: 75%; + vertical-align: top; + color: transparent; +} diff --git a/site-ui/src/css/typeface-roboto-mono.css b/site-ui/src/css/typeface-roboto-mono.css new file mode 100644 index 000000000..c51a7fc84 --- /dev/null +++ b/site-ui/src/css/typeface-roboto-mono.css @@ -0,0 +1,21 @@ +@font-face { + font-family: "Roboto Mono"; + font-style: normal; + font-weight: 400; + src: + local("Roboto Mono Regular"), + local("RobotoMono-Regular"), + url(~typeface-roboto-mono/files/roboto-mono-latin-400.woff2) format("woff2"), + url(~typeface-roboto-mono/files/roboto-mono-latin-400.woff) format("woff"); +} + +@font-face { + font-family: "Roboto Mono"; + font-style: normal; + font-weight: 500; + src: + local("Roboto Mono Medium"), + local("RobotoMono-Medium"), + url(~typeface-roboto-mono/files/roboto-mono-latin-500.woff2) format("woff2"), + url(~typeface-roboto-mono/files/roboto-mono-latin-500.woff) format("woff"); +} diff --git a/site-ui/src/css/typeface-roboto.css b/site-ui/src/css/typeface-roboto.css new file mode 100644 index 000000000..78254868d --- /dev/null +++ b/site-ui/src/css/typeface-roboto.css @@ -0,0 +1,43 @@ +@font-face { + font-family: "Roboto"; + font-style: normal; + font-weight: 400; + src: + local("Roboto Regular"), + local("Roboto-Regular"), + url(~typeface-roboto/files/roboto-latin-400.woff2) format("woff2"), + url(~typeface-roboto/files/roboto-latin-400.woff) format("woff"); +} + +@font-face { + font-family: "Roboto"; + font-style: italic; + font-weight: 400; + src: + local("Roboto Italic"), + local("Roboto-Italic"), + url(~typeface-roboto/files/roboto-latin-400italic.woff2) format("woff2"), + url(~typeface-roboto/files/roboto-latin-400italic.woff) format("woff"); +} + +@font-face { + font-family: "Roboto"; + font-style: normal; + font-weight: 500; + src: + local("Roboto Medium"), + local("Roboto-Medium"), + url(~typeface-roboto/files/roboto-latin-500.woff2) format("woff2"), + url(~typeface-roboto/files/roboto-latin-500.woff) format("woff"); +} + +@font-face { + font-family: "Roboto"; + font-style: italic; + font-weight: 500; + src: + local("Roboto Medium Italic"), + local("Roboto-MediumItalic"), + url(~typeface-roboto/files/roboto-latin-500italic.woff2) format("woff2"), + url(~typeface-roboto/files/roboto-latin-500italic.woff) format("woff"); +} diff --git a/site-ui/src/css/vars.css b/site-ui/src/css/vars.css new file mode 100644 index 000000000..140601c20 --- /dev/null +++ b/site-ui/src/css/vars.css @@ -0,0 +1,61 @@ +:root { + /* NOTE tint colors are 15% transparent */ + /* primary */ + --color-brand-black: #000; + --color-brand-red: #ea2328; + --color-brand-red-tint: #fcdedf; + --color-brand-white: #fff; + /* secondary */ + --color-brand-light-blue: #1c81a0; + --color-brand-light-blue-tint: #d9f3fb; + --color-brand-pink: #eb4971; + --color-brand-green: #6eba91; + --color-brand-light-green: #92b25e; + --color-brand-purple: #b36cdb; + --color-brand-purple-tint: #f4e9fa; + /* tertiary */ + --color-brand-gray: #666; + /* --color-brand-silver: #ccc; */ + --color-brand-silver: #c1c1c1; + --color-brand-blue: #0074e0; + --color-brand-blue-tint: #d9eafb; + --color-brand-orange: #fc9c0c; + --color-brand-orange-tint: #fff0da; + --color-muted: var(--color-brand-gray); + --color-text: #fff; + --color-link: var(--color-brand-light-blue); + --color-code: var(--color-brand-pink); + --color-border: var(--color-brand-silver); + /* --color-border-table: #666; */ + --color-border-table: #dadada; + --color-shade: #f3f3f3; + --color-navbar-bg: var(--color-brand-black); + --color-navbar-text: var(--color-brand-white); + --color-footer-bg: var(--color-brand-black); + --color-footer-text: var(--color-brand-white); + --height-spacer: 1.5rem; + /* --height-navbar: 4rem; */ + --height-navbar: 4.875rem; + --height-to-body: calc(var(--height-navbar) + var(--height-spacer)); + --height-min-body: calc(100vh - var(--height-to-body)); + --height-nav: calc(var(--height-min-body) - var(--height-spacer)); + --labels-font-size: 0.75rem; + --labels-line-height: 1; + /* --width-main-gutter: 1.5rem; */ + --width-main-gutter: 2.5rem; + --width-container: 90rem; + --width-container-gutter: 1.25rem; + --width-nav: 16rem; + --width-toc: 12rem; + --weight-light: 300; + --weight-normal: 400; + --weight-medium: 500; + --weight-bold: 700; + --z-index-feedback: 7; + --z-index-nav-mobile: 6; + --z-index-navbar-dropdown-link: 5; + --z-index-navbar-dropdown: 4; + --z-index-navbar: 3; + --z-index-footer: 2; + --z-index-nav: 1; +} diff --git a/site-ui/src/css/vendor/docsearch.css b/site-ui/src/css/vendor/docsearch.css new file mode 100644 index 000000000..38c0daa80 --- /dev/null +++ b/site-ui/src/css/vendor/docsearch.css @@ -0,0 +1,3 @@ +@import "docsearch.js/dist/cdn/docsearch.css"; +@import "../vars.css"; +@import "../docsearch-overrides.css"; diff --git a/site-ui/src/helpers/add.js b/site-ui/src/helpers/add.js new file mode 100644 index 000000000..a731983c2 --- /dev/null +++ b/site-ui/src/helpers/add.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (a, b) => (a || 0) + (b || 0) diff --git a/site-ui/src/helpers/and.js b/site-ui/src/helpers/and.js new file mode 100644 index 000000000..2ad2237d4 --- /dev/null +++ b/site-ui/src/helpers/and.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (a, b) => a && b diff --git a/site-ui/src/helpers/canonical-url.js b/site-ui/src/helpers/canonical-url.js new file mode 100644 index 000000000..45528ca8b --- /dev/null +++ b/site-ui/src/helpers/canonical-url.js @@ -0,0 +1,40 @@ +'use strict' + +const VERSIONED_ROOT_RELATIVE_URL_RX = /^(\/[^/]+)\/[^/]+(?=\/)/ + +module.exports = ({ data: { root } }) => { + const { contentCatalog, env, page, site } = root + const siteUrl = site.url + if (!siteUrl || siteUrl.charAt() === '/') return + let { url, version, missing } = page.versions ? (page.latest || { url: page.url }) : page + const latestVersion = version + if (missing) { + const family = 'alias' + const baseAliasId = { component: page.component.name, module: page.module, family, relative: page.relativeSrcPath } + let latestReached + for (const it of page.versions) { + if (!(latestReached || (latestReached = it.latest))) continue + if (it.missing) { + const alias = contentCatalog.getById({ ...baseAliasId, version: it.version }) + if (alias) { + url = alias.rel.pub.url + version = it.version + break + } + } else { + ;({ url, version } = it) + break + } + } + } + const targetSiteUrl = url.charAt() === '/' ? siteUrl : page.componentVersion.asciidoc.attributes['primary-site-url'] + if (version === 'master' || version !== latestVersion) { + return targetSiteUrl + url + } else if (siteUrl === targetSiteUrl) { + if (env.SUPPORTS_CURRENT_URL === 'true') return siteUrl + url.replace(VERSIONED_ROOT_RELATIVE_URL_RX, '$1/current') + return siteUrl + url + } else if (env.PRIMARY_SITE_SUPPORTS_CURRENT_URL === 'true') { + return targetSiteUrl + url.substr(targetSiteUrl.length).replace(VERSIONED_ROOT_RELATIVE_URL_RX, '$1/current') + } + return targetSiteUrl + url +} diff --git a/site-ui/src/helpers/detag.js b/site-ui/src/helpers/detag.js new file mode 100644 index 000000000..ca283a2bc --- /dev/null +++ b/site-ui/src/helpers/detag.js @@ -0,0 +1,4 @@ +'use strict' + +const TAG_ALL_RX = /<[^>]+>/g +module.exports = (html) => html && html.replace(TAG_ALL_RX, '') diff --git a/site-ui/src/helpers/ends-with.js b/site-ui/src/helpers/ends-with.js new file mode 100644 index 000000000..472a34f4a --- /dev/null +++ b/site-ui/src/helpers/ends-with.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (str, suffix) => str.endsWith(suffix) diff --git a/site-ui/src/helpers/eq.js b/site-ui/src/helpers/eq.js new file mode 100644 index 000000000..16dc28701 --- /dev/null +++ b/site-ui/src/helpers/eq.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (a, b) => a === b diff --git a/site-ui/src/helpers/includes.js b/site-ui/src/helpers/includes.js new file mode 100644 index 000000000..4020b03bf --- /dev/null +++ b/site-ui/src/helpers/includes.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (haystack, needle) => ~(haystack || '').indexOf(needle) diff --git a/site-ui/src/helpers/last.js b/site-ui/src/helpers/last.js new file mode 100644 index 000000000..3375b5e10 --- /dev/null +++ b/site-ui/src/helpers/last.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (c) => c[c.length - 1] diff --git a/site-ui/src/helpers/latest-page-url.js b/site-ui/src/helpers/latest-page-url.js new file mode 100644 index 000000000..bdb9ac539 --- /dev/null +++ b/site-ui/src/helpers/latest-page-url.js @@ -0,0 +1,26 @@ +'use strict' + +const VERSIONED_ROOT_RELATIVE_URL_RX = /^(\/[^/]+)\/[^/]+(?=\/)/ + +module.exports = ({ data: { root } }) => { + const { contentCatalog, env, page } = root + let { url, version, missing } = page.latest || { url: page.url } + if (missing) { + const latestAlias = contentCatalog.getById({ + component: page.component.name, + version, + module: page.module, + family: 'alias', + relative: page.relativeSrcPath, + }) + if (!latestAlias) return + url = latestAlias.rel.pub.url + } + if (url.charAt() === '/') { + return env.SUPPORTS_CURRENT_URL === 'true' ? url.replace(VERSIONED_ROOT_RELATIVE_URL_RX, '$1/current') : url + } else if (env.PRIMARY_SITE_SUPPORTS_CURRENT_URL === 'true') { + const primarySiteUrl = page.componentVersion.asciidoc.attributes['primary-site-url'] + return primarySiteUrl + url.substr(primarySiteUrl.length).replace(VERSIONED_ROOT_RELATIVE_URL_RX, '$1/current') + } + return url +} diff --git a/site-ui/src/helpers/ne.js b/site-ui/src/helpers/ne.js new file mode 100644 index 000000000..245f03b44 --- /dev/null +++ b/site-ui/src/helpers/ne.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (a, b) => a !== b diff --git a/site-ui/src/helpers/not.js b/site-ui/src/helpers/not.js new file mode 100644 index 000000000..8b3aa917b --- /dev/null +++ b/site-ui/src/helpers/not.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (val) => !val diff --git a/site-ui/src/helpers/or.js b/site-ui/src/helpers/or.js new file mode 100644 index 000000000..354612b23 --- /dev/null +++ b/site-ui/src/helpers/or.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = (a, b) => a || b diff --git a/site-ui/src/helpers/related-sdk-pages.js b/site-ui/src/helpers/related-sdk-pages.js new file mode 100644 index 000000000..6e3ffade0 --- /dev/null +++ b/site-ui/src/helpers/related-sdk-pages.js @@ -0,0 +1,21 @@ +'use strict' + +module.exports = (langs, { data: { root } }) => { + const { contentCatalog, page, site } = root + const components = site.components + const thisComponentName = page.component.name + return langs + .split(',') + .map((lang) => lang + '-sdk') + .filter((componentName) => !(componentName === thisComponentName || (components[componentName] || {}).origin)) + .map((componentName) => { + const component = components[componentName] + if (component) { + const lookupContext = { component: componentName, version: component.latest.version, module: page.module } + const relatedPage = contentCatalog && contentCatalog.resolvePage(page.relativeSrcPath, lookupContext) + return { url: relatedPage ? relatedPage.pub.url : component.url, title: component.title } + } else { + return { title: componentName } + } + }) +} diff --git a/site-ui/src/helpers/year.js b/site-ui/src/helpers/year.js new file mode 100644 index 000000000..aa38992cc --- /dev/null +++ b/site-ui/src/helpers/year.js @@ -0,0 +1,3 @@ +'use strict' + +module.exports = () => new Date().getFullYear().toString() diff --git a/site-ui/src/img/back.svg b/site-ui/src/img/back.svg new file mode 100644 index 000000000..1a66e8df3 --- /dev/null +++ b/site-ui/src/img/back.svg @@ -0,0 +1,86 @@ + + + + +Left arrow + + + + +image/svg+xml + +Left arrow + + +Sarah White + + + + +OpenDevise Inc. + + + + + + + + + + + + + + + + + + diff --git a/site-ui/src/img/caret-down.svg b/site-ui/src/img/caret-down.svg new file mode 100644 index 000000000..149ddb98e --- /dev/null +++ b/site-ui/src/img/caret-down.svg @@ -0,0 +1,3 @@ + + + diff --git a/site-ui/src/img/caution.svg b/site-ui/src/img/caution.svg new file mode 100644 index 000000000..8d6fcfa24 --- /dev/null +++ b/site-ui/src/img/caution.svg @@ -0,0 +1,18 @@ + + + + + + + diff --git a/site-ui/src/img/close.svg b/site-ui/src/img/close.svg new file mode 100644 index 000000000..bbe94c7ea --- /dev/null +++ b/site-ui/src/img/close.svg @@ -0,0 +1,166 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/site-ui/src/img/edit.svg b/site-ui/src/img/edit.svg new file mode 100644 index 000000000..45ea6b15e --- /dev/null +++ b/site-ui/src/img/edit.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/site-ui/src/img/favicon.ico b/site-ui/src/img/favicon.ico new file mode 100644 index 000000000..77de4c7df Binary files /dev/null and b/site-ui/src/img/favicon.ico differ diff --git a/site-ui/src/img/important.svg b/site-ui/src/img/important.svg new file mode 100644 index 000000000..90cc4f8f9 --- /dev/null +++ b/site-ui/src/img/important.svg @@ -0,0 +1,24 @@ + + + + + + + + + diff --git a/site-ui/src/img/logo.svg b/site-ui/src/img/logo.svg new file mode 100644 index 000000000..62c0b217e --- /dev/null +++ b/site-ui/src/img/logo.svg @@ -0,0 +1 @@ +Artboard 1 \ No newline at end of file diff --git a/site-ui/src/img/menu.svg b/site-ui/src/img/menu.svg new file mode 100644 index 000000000..6a51e24db --- /dev/null +++ b/site-ui/src/img/menu.svg @@ -0,0 +1,25 @@ + + + + +image/svg+xml + + + + + + + + + + + + + + + + + + + + diff --git a/site-ui/src/img/noise.png b/site-ui/src/img/noise.png new file mode 100644 index 000000000..c26c488e1 Binary files /dev/null and b/site-ui/src/img/noise.png differ diff --git a/site-ui/src/img/note.svg b/site-ui/src/img/note.svg new file mode 100644 index 000000000..2baa1900b --- /dev/null +++ b/site-ui/src/img/note.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + diff --git a/site-ui/src/img/search-close.svg b/site-ui/src/img/search-close.svg new file mode 100644 index 000000000..de2e8a9b0 --- /dev/null +++ b/site-ui/src/img/search-close.svg @@ -0,0 +1,166 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/site-ui/src/img/search.svg b/site-ui/src/img/search.svg new file mode 100644 index 000000000..392bc1094 --- /dev/null +++ b/site-ui/src/img/search.svg @@ -0,0 +1 @@ + diff --git a/site-ui/src/img/tip.svg b/site-ui/src/img/tip.svg new file mode 100644 index 000000000..4e3dc80d7 --- /dev/null +++ b/site-ui/src/img/tip.svg @@ -0,0 +1,144 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/site-ui/src/img/warning.svg b/site-ui/src/img/warning.svg new file mode 100644 index 000000000..ffa416060 --- /dev/null +++ b/site-ui/src/img/warning.svg @@ -0,0 +1,16 @@ + + + + + diff --git a/site-ui/src/js/01-nav.js b/site-ui/src/js/01-nav.js new file mode 100644 index 000000000..c91cc89e7 --- /dev/null +++ b/site-ui/src/js/01-nav.js @@ -0,0 +1,115 @@ +;(function () { + 'use strict' + + // closes open menus in navbar on loss of focus + window.addEventListener('click', function (e) { + find('input.navbar-dropper').forEach(function (input) { + if (input !== e.target) input.checked = false + }) + }) + + var nav = document.querySelector('nav.nav') + var navMenu = {} + if (!(navMenu.element = nav && nav.querySelector('.nav-menu'))) return + var navControl + var currentPageItem = navMenu.element.querySelector('.is-current-page') + + // NOTE prevent text from being selected by double click + navMenu.element.addEventListener('mousedown', function (e) { + if (e.detail > 1) e.preventDefault() + }) + + find('.nav-toggle', navMenu.element).forEach(function (toggleBtn) { + var navItem = findAncestorWithClass('nav-item', toggleBtn, navMenu.element) + toggleBtn.addEventListener('click', toggleActive.bind(navItem)) + var navItemSpan = findNextElement(toggleBtn) + if (navItemSpan.classList.contains('nav-text')) { + navItemSpan.style.cursor = 'pointer' + navItemSpan.addEventListener('click', toggleActive.bind(navItem)) + } + }) + + fitNavMenuInit({}) + window.addEventListener('load', fitNavMenuInit) + window.addEventListener('resize', fitNavMenuInit) + + if ((navControl = document.querySelector('main .nav-control'))) navControl.addEventListener('click', revealNav) + + function scrollItemToMiddle (el, parentEl) { + var adjustment = (el.getBoundingClientRect().height - parentEl.getBoundingClientRect().height) * 0.5 + el.offsetTop + if (adjustment > 0) parentEl.scrollTop = adjustment + } + + function fitNavMenuInit (e) { + window.removeEventListener('scroll', fitNavMenuOnScroll) + navMenu.element.style.height = '' + if ((navMenu.preferredHeight = navMenu.element.getBoundingClientRect().height) > 0) { + // QUESTION should we check if x value > 0 instead? + if (window.getComputedStyle(nav).visibility === 'visible') { + if (!navMenu.encroachingElement) navMenu.encroachingElement = document.querySelector('footer.footer') + fitNavMenu(navMenu.preferredHeight, (navMenu.viewHeight = window.innerHeight), navMenu.encroachingElement) + window.addEventListener('scroll', fitNavMenuOnScroll) + } + if (currentPageItem && e.type !== 'resize') { + scrollItemToMiddle(currentPageItem.querySelector('.nav-link'), navMenu.element) + } + } + } + + function fitNavMenuOnScroll () { + fitNavMenu(navMenu.preferredHeight, navMenu.viewHeight, navMenu.encroachingElement) + } + + function fitNavMenu (preferredHeight, availableHeight, encroachingElement) { + var reclaimedHeight = availableHeight - encroachingElement.getBoundingClientRect().top + navMenu.element.style.height = reclaimedHeight > 0 ? Math.max(0, preferredHeight - reclaimedHeight) + 'px' : '' + } + + function toggleActive (e) { + this.classList.toggle('is-active') + concealEvent(e) + } + + function revealNav (e) { + if (nav.classList.contains('is-active')) return hideNav(e) + document.documentElement.classList.add('is-clipped--nav') + nav.classList.add('is-active') + nav.addEventListener('click', concealEvent) + window.addEventListener('click', hideNav) + concealEvent(e) // NOTE don't let event get picked up by window click listener + } + + function hideNav (e) { + if (e.which === 3 || e.button === 2) return + document.documentElement.classList.remove('is-clipped--nav') + nav.classList.remove('is-active') + nav.removeEventListener('click', concealEvent) + window.removeEventListener('click', hideNav) + concealEvent(e) // NOTE don't let event get picked up by window click listener + } + + function find (selector, from) { + return [].slice.call((from || document).querySelectorAll(selector)) + } + + function findAncestorWithClass (className, from, scope) { + if ((from = from.parentNode) !== scope) { + if (from.classList.contains(className)) { + return from + } else { + return findAncestorWithClass(className, from, scope) + } + } + } + + function findNextElement (from, el) { + if ((el = from.nextElementSibling)) return el + el = from + while ((el = el.nextSibling) && el.nodeType !== 1); + return el + } + + function concealEvent (e) { + e.stopPropagation() + } +})() diff --git a/site-ui/src/js/02-on-this-page.js b/site-ui/src/js/02-on-this-page.js new file mode 100644 index 000000000..deb4f0a9f --- /dev/null +++ b/site-ui/src/js/02-on-this-page.js @@ -0,0 +1,94 @@ +;(function () { + 'use strict' + + var sidebar = document.querySelector('aside.toc.sidebar') + if (!sidebar) return + var doc + var headings + if ( + document.querySelector('.body.-toc') || + !(headings = find('h1[id].sect0, .sect1 > h2[id]', (doc = document.querySelector('article.doc')))).length + ) { + sidebar.parentNode.removeChild(sidebar) + return + } + var lastActiveFragment + var links = {} + var menu + + var list = headings.reduce(function (accum, heading) { + var link = toArray(heading.childNodes).reduce(function (target, child) { + if (child.nodeName !== 'A') target.appendChild(child.cloneNode(true)) + return target + }, document.createElement('a')) + links[(link.href = '#' + heading.id)] = link + var listItem = document.createElement('li') + listItem.appendChild(link) + accum.appendChild(listItem) + return accum + }, document.createElement('ul')) + + if (!(menu = sidebar && sidebar.querySelector('.toc-menu'))) { + menu = document.createElement('div') + menu.className = 'toc-menu' + } + + var title = document.createElement('h3') + title.textContent = 'On This Page' + menu.appendChild(title) + menu.appendChild(list) + + if (sidebar) { + window.addEventListener('load', function () { + onScroll() + window.addEventListener('scroll', onScroll) + }) + } + + var startOfContent = doc.querySelector('h1.page ~ :not(.labels)') + if (startOfContent) { + var embeddedToc = document.createElement('aside') + embeddedToc.className = 'toc embedded' + embeddedToc.appendChild(menu.cloneNode(true)) + doc.insertBefore(embeddedToc, startOfContent) + } + + function onScroll () { + // NOTE doc.parentNode.offsetTop ~= doc.parentNode.getBoundingClientRect().top + window.pageYOffset + //var targetPosition = doc.parentNode.offsetTop + // NOTE no need to compensate wheen using spacer above [id] elements + var targetPosition = 0 + var activeFragment + headings.some(function (heading) { + if (Math.floor(heading.getBoundingClientRect().top) <= targetPosition) { + activeFragment = '#' + heading.id + } else { + return true + } + }) + if (activeFragment) { + if (activeFragment !== lastActiveFragment) { + if (lastActiveFragment) { + links[lastActiveFragment].classList.remove('is-active') + } + var activeLink = links[activeFragment] + activeLink.classList.add('is-active') + if (menu.scrollHeight > menu.offsetHeight) { + menu.scrollTop = Math.max(0, activeLink.offsetTop + activeLink.offsetHeight - menu.offsetHeight) + } + lastActiveFragment = activeFragment + } + } else if (lastActiveFragment) { + links[lastActiveFragment].classList.remove('is-active') + lastActiveFragment = undefined + } + } + + function find (selector, from) { + return toArray((from || document).querySelectorAll(selector)) + } + + function toArray (collection) { + return [].slice.call(collection) + } +})() diff --git a/site-ui/src/js/03-fragment-jumper.js b/site-ui/src/js/03-fragment-jumper.js new file mode 100644 index 000000000..076600a15 --- /dev/null +++ b/site-ui/src/js/03-fragment-jumper.js @@ -0,0 +1,14 @@ +;(function () { + 'use strict' + + if ('MozAppearance' in document.body.style) { + Array.prototype.slice.call(document.querySelectorAll('main [id]')).forEach(function (el) { + if (el.firstChild && ~window.getComputedStyle(el).display.indexOf('inline')) { + var anchor = document.createElement('a') + anchor.id = el.id + el.removeAttribute('id') + el.parentNode.insertBefore(anchor, el) + } + }) + } +})() diff --git a/site-ui/src/js/04-mobile-navbar.js b/site-ui/src/js/04-mobile-navbar.js new file mode 100644 index 000000000..494c37c2b --- /dev/null +++ b/site-ui/src/js/04-mobile-navbar.js @@ -0,0 +1,14 @@ +;(function () { + 'use strict' + + document.addEventListener('DOMContentLoaded', function () { + var navbarToggle = document.querySelector('.navbar-burger') + if (!navbarToggle) return + navbarToggle.addEventListener('click', function (e) { + e.stopPropagation() + navbarToggle.classList.toggle('is-active') + document.getElementById(navbarToggle.dataset.target).classList.toggle('is-active') + document.documentElement.classList.toggle('is-clipped--navbar') + }) + }) +})() diff --git a/site-ui/src/js/05-tabset.js b/site-ui/src/js/05-tabset.js new file mode 100644 index 000000000..4f27c2fc4 --- /dev/null +++ b/site-ui/src/js/05-tabset.js @@ -0,0 +1,50 @@ +;(function () { + 'use strict' + + var hash = window.location.hash + find('.doc .tabset').forEach(function (tabset) { + var active + var tabs = tabset.querySelector('.tabs') + if (tabs) { + var first + find('li', tabs).forEach(function (tab, idx) { + var id = (tab.querySelector('a[id]') || tab).id + if (!id) return + var pane = getPane(id, tabset) + if (!idx) first = { tab: tab, pane: pane } + if (!active && hash === '#' + id && (active = true)) { + tab.classList.add('is-active') + if (pane) pane.classList.add('is-active') + } else if (!idx) { + tab.classList.remove('is-active') + if (pane) pane.classList.remove('is-active') + } + tab.addEventListener('click', activateTab.bind({ tabset: tabset, tab: tab, pane: pane })) + }) + if (!active && first) { + first.tab.classList.add('is-active') + if (first.pane) first.pane.classList.add('is-active') + } + } + tabset.classList.remove('is-loading') + }) + + function activateTab (e) { + var tab = this.tab + var pane = this.pane + find('.tabs li, .tab-pane', this.tabset).forEach(function (it) { + it === tab || it === pane ? it.classList.add('is-active') : it.classList.remove('is-active') + }) + e.preventDefault() + } + + function find (selector, from) { + return Array.prototype.slice.call((from || document).querySelectorAll(selector)) + } + + function getPane (id, tabset) { + return find('.tab-pane', tabset).find(function (it) { + return it.getAttribute('aria-labelledby') === id + }) + } +})() diff --git a/site-ui/src/js/vendor/docsearch.bundle.js b/site-ui/src/js/vendor/docsearch.bundle.js new file mode 100644 index 000000000..9e06c9175 --- /dev/null +++ b/site-ui/src/js/vendor/docsearch.bundle.js @@ -0,0 +1,26 @@ +;(function () { + 'use strict' + + var docsearch = require('docsearch.js/dist/cdn/docsearch.js') + window.addEventListener('load', function () { + var config = document.getElementById('search-script').dataset + var link = document.createElement('link') + link.rel = 'stylesheet' + link.href = config.stylesheet + document.head.appendChild(link) + var ds = docsearch({ + appId: config.appId, + apiKey: config.apiKey, + indexName: config.indexName, + inputSelector: '#search-query', + algoliaOptions: { hitsPerPage: 25 }, + debug: false, + }) + document.querySelector('button.search').addEventListener('click', function (e) { + if (document.querySelector('.navbar-start').classList.toggle('reveal-search-input')) { + ds.autocomplete.autocomplete.setVal('') + ds.input.focus() + } + }) + }) +})() diff --git a/site-ui/src/js/vendor/feedback.js b/site-ui/src/js/vendor/feedback.js new file mode 100644 index 000000000..1a0cc9c25 --- /dev/null +++ b/site-ui/src/js/vendor/feedback.js @@ -0,0 +1,12 @@ +;(function () { + 'use strict' + + if (Math.max(window.screen.availHeight, window.screen.availWidth) < 769) return + + window.addEventListener('load', function () { + var config = document.getElementById('feedback-script').dataset + var script = document.createElement('script') + script.src = 'https://issues.apache.org/jira/projects/CASSANDRA/issues' + document.body.appendChild(script) + }) +})() diff --git a/site-ui/src/js/vendor/highlight.bundle.js b/site-ui/src/js/vendor/highlight.bundle.js new file mode 100644 index 000000000..2cd5d1047 --- /dev/null +++ b/site-ui/src/js/vendor/highlight.bundle.js @@ -0,0 +1,30 @@ +;(function () { + 'use strict' + + var hljs = require('highlight.js/lib/highlight') + hljs.registerLanguage('asciidoc', require('highlight.js/lib/languages/asciidoc')) + hljs.registerLanguage('bash', require('highlight.js/lib/languages/bash')) + hljs.registerLanguage('cpp', require('highlight.js/lib/languages/cpp')) + hljs.registerLanguage('cs', require('highlight.js/lib/languages/cs')) + hljs.registerLanguage('dockerfile', require('highlight.js/lib/languages/dockerfile')) + hljs.registerLanguage('go', require('highlight.js/lib/languages/go')) + hljs.registerLanguage('groovy', require('highlight.js/lib/languages/groovy')) + hljs.registerLanguage('java', require('highlight.js/lib/languages/java')) + hljs.registerLanguage('javascript', require('highlight.js/lib/languages/javascript')) + hljs.registerLanguage('json', require('highlight.js/lib/languages/json')) + hljs.registerLanguage('kotlin', require('highlight.js/lib/languages/kotlin')) + hljs.registerLanguage('n1ql', require('highlight.js/lib/languages/sql')) + hljs.registerLanguage('objectivec', require('highlight.js/lib/languages/objectivec')) + hljs.registerLanguage('perl', require('highlight.js/lib/languages/perl')) + hljs.registerLanguage('php', require('highlight.js/lib/languages/php')) + hljs.registerLanguage('powershell', require('highlight.js/lib/languages/powershell')) + hljs.registerLanguage('python', require('highlight.js/lib/languages/python')) + hljs.registerLanguage('ruby', require('highlight.js/lib/languages/ruby')) + hljs.registerLanguage('scala', require('highlight.js/lib/languages/scala')) + hljs.registerLanguage('shell', require('highlight.js/lib/languages/shell')) + hljs.registerLanguage('sql', require('highlight.js/lib/languages/sql')) + hljs.registerLanguage('swift', require('highlight.js/lib/languages/swift')) + hljs.registerLanguage('xml', require('highlight.js/lib/languages/xml')) + hljs.registerLanguage('yaml', require('highlight.js/lib/languages/yaml')) + hljs.initHighlighting() +})() diff --git a/site-ui/src/js/vendor/mark.bundle.js b/site-ui/src/js/vendor/mark.bundle.js new file mode 100644 index 000000000..09d0ba696 --- /dev/null +++ b/site-ui/src/js/vendor/mark.bundle.js @@ -0,0 +1,48 @@ +;(function () { + 'use strict' + + var Mark = require('mark.js') + window.addEventListener('load', function () { + // Create an instance of mark.js and pass an argument containing + // the DOM object of the context (where to search for matches) + var markInstance = new Mark(document.querySelectorAll('.sect1')) + + /* All tutorials */ + var tiles = document.querySelectorAll('.doc .sect1') + + /* Search every time the input text is modified */ + document.getElementById('search-tutorials').addEventListener('input', function () { + var doc = document.getElementsByClassName('doc')[0] + var name = document.getElementById('search-tutorials').value + var pattern = name.toLowerCase() + + markInstance.unmark({ + done: function () { + markInstance.mark(pattern) + + var resultDivs = [] + for (var j = 0; j < tiles.length; j++) { + /* display the card if it contains the pattern */ + if (tiles[j].querySelectorAll('mark').length > 0) { + resultDivs.push(tiles[j]) + } + } + + /* update list of tiles */ + doc.innerHTML = '' + for (var k = 0; k < resultDivs.length; k++) { + doc.appendChild(resultDivs[k]) + } + + /* if the search term is empty, return all tutorials */ + if (pattern === '') { + doc.innerHTML = '' + for (var i = 0; i < tiles.length; i++) { + doc.appendChild(tiles[i]) + } + } + }, + }) + }) + }) +})() diff --git a/site-ui/src/layouts/404.hbs b/site-ui/src/layouts/404.hbs new file mode 100644 index 000000000..b26f38700 --- /dev/null +++ b/site-ui/src/layouts/404.hbs @@ -0,0 +1,13 @@ + + + +{{> head-first}} + {{{detag (or page.title 'Page Not Found')}}}{{#with site.title}} | {{this}}{{/with}} +{{> head-last}} + + +{{> header}} +{{> body-404}} +{{> footer}} + + diff --git a/site-ui/src/layouts/default.hbs b/site-ui/src/layouts/default.hbs new file mode 100644 index 000000000..e6249e354 --- /dev/null +++ b/site-ui/src/layouts/default.hbs @@ -0,0 +1,16 @@ + + + +{{> head-first}} + {{{detag (or page.title 'Untitled')}}}{{#with site.title}} | {{this}}{{/with}} + {{#with (canonical-url)}} + + {{/with}} +{{> head-last}} + + +{{> header}} +{{> body}} +{{> footer}} + + diff --git a/site-ui/src/layouts/home.hbs b/site-ui/src/layouts/home.hbs new file mode 100644 index 000000000..ce78b9eaa --- /dev/null +++ b/site-ui/src/layouts/home.hbs @@ -0,0 +1,16 @@ + + + +{{> head-first}} + Apache Cassandra{{#with site.title}} | {{this}}{{/with}} + {{#with (canonical-url)}} + + + {{/with}} +{{> head-last}} + + +{{> header}} +{{> body-tutorials}} + + diff --git a/site-ui/src/layouts/tutorials.hbs b/site-ui/src/layouts/tutorials.hbs new file mode 100644 index 000000000..9366c4d4d --- /dev/null +++ b/site-ui/src/layouts/tutorials.hbs @@ -0,0 +1,16 @@ + + + +{{> head-first}} + Tutorials{{#with site.title}} | {{this}}{{/with}} + {{#with (canonical-url)}} + + + {{/with}} +{{> head-last}} + + +{{> header}} +{{> body-tutorials}} + + diff --git a/site-ui/src/partials/article.hbs b/site-ui/src/partials/article.hbs new file mode 100644 index 000000000..bdb488eeb --- /dev/null +++ b/site-ui/src/partials/article.hbs @@ -0,0 +1,7 @@ +
+{{#if page.title}} +

{{{page.title}}}

+{{> labels}} +{{/if}} +{{{page.contents}}} +
diff --git a/site-ui/src/partials/body-404.hbs b/site-ui/src/partials/body-404.hbs new file mode 100644 index 000000000..f5af560cd --- /dev/null +++ b/site-ui/src/partials/body-404.hbs @@ -0,0 +1,14 @@ +
+
+
+

{{{or page.title 'Page Not Found'}}}

+
+

The page you're looking for does not exist. It may have been moved.

+
+
+

If you arrived on this page by clicking on a link, please notify the owner of the site that the link is broken. +If you typed the URL of this page manually, please double check that you entered the address correctly.

+
+
+
+
diff --git a/site-ui/src/partials/body-home.hbs b/site-ui/src/partials/body-home.hbs new file mode 100644 index 000000000..bb27f7e24 --- /dev/null +++ b/site-ui/src/partials/body-home.hbs @@ -0,0 +1,8 @@ +
+
+
+

{{{page.title}}}

+{{{page.contents}}} +
+
+
diff --git a/site-ui/src/partials/body-tutorials.hbs b/site-ui/src/partials/body-tutorials.hbs new file mode 100644 index 000000000..3a4bbf2fd --- /dev/null +++ b/site-ui/src/partials/body-tutorials.hbs @@ -0,0 +1,18 @@ + + + +
+
+
+
+{{{page.contents}}} +
+
+
diff --git a/site-ui/src/partials/body.hbs b/site-ui/src/partials/body.hbs new file mode 100644 index 000000000..4ab4e12c1 --- /dev/null +++ b/site-ui/src/partials/body.hbs @@ -0,0 +1,5 @@ +
+{{> nav}} +{{> toc}} +{{> main}} +
diff --git a/site-ui/src/partials/crumbs.hbs b/site-ui/src/partials/crumbs.hbs new file mode 100644 index 000000000..dc1c1ed58 --- /dev/null +++ b/site-ui/src/partials/crumbs.hbs @@ -0,0 +1,13 @@ + diff --git a/site-ui/src/partials/footer-content.hbs b/site-ui/src/partials/footer-content.hbs new file mode 100644 index 000000000..7b55a4b04 --- /dev/null +++ b/site-ui/src/partials/footer-content.hbs @@ -0,0 +1,2 @@ +
+
diff --git a/site-ui/src/partials/footer-scripts.hbs b/site-ui/src/partials/footer-scripts.hbs new file mode 100644 index 000000000..db6732250 --- /dev/null +++ b/site-ui/src/partials/footer-scripts.hbs @@ -0,0 +1,8 @@ + +{{#with page.attributes.content-scripts}} +{{{this}}} +{{/with}} + +{{#if (eq page.layout 'tutorials')}} + +{{/if}} diff --git a/site-ui/src/partials/footer.hbs b/site-ui/src/partials/footer.hbs new file mode 100644 index 000000000..9d4901793 --- /dev/null +++ b/site-ui/src/partials/footer.hbs @@ -0,0 +1,2 @@ +{{> footer-content}} +{{> footer-scripts}} diff --git a/site-ui/src/partials/head-first.hbs b/site-ui/src/partials/head-first.hbs new file mode 100644 index 000000000..ac174a48e --- /dev/null +++ b/site-ui/src/partials/head-first.hbs @@ -0,0 +1,5 @@ + + {{#if (eq env.FORCE_HTTPS 'true')}} + + {{/if}} + diff --git a/site-ui/src/partials/head-last.hbs b/site-ui/src/partials/head-last.hbs new file mode 100644 index 000000000..fea335a1a --- /dev/null +++ b/site-ui/src/partials/head-last.hbs @@ -0,0 +1,12 @@ + + {{#with env.OPTANON_SCRIPT_URL}} + + + + {{/with}} + {{#with site.keys.googleAnalytics}} + + + {{/with}} +{{> head-meta}} +{{> head-scripts}} diff --git a/site-ui/src/partials/head-meta.hbs b/site-ui/src/partials/head-meta.hbs new file mode 100644 index 000000000..1ba7d2ded --- /dev/null +++ b/site-ui/src/partials/head-meta.hbs @@ -0,0 +1,13 @@ + {{#with page.description}} + + {{/with}} + {{#with page.keywords}} + + {{/with}} + {{#if page.component}} + + + + {{/if}} + + diff --git a/site-ui/src/partials/head-scripts.hbs b/site-ui/src/partials/head-scripts.hbs new file mode 100644 index 000000000..e69de29bb diff --git a/site-ui/src/partials/header-content.hbs b/site-ui/src/partials/header-content.hbs new file mode 100644 index 000000000..f45fd70f5 --- /dev/null +++ b/site-ui/src/partials/header-content.hbs @@ -0,0 +1,2 @@ +
+
diff --git a/site-ui/src/partials/header-scripts.hbs b/site-ui/src/partials/header-scripts.hbs new file mode 100644 index 000000000..54dab846d --- /dev/null +++ b/site-ui/src/partials/header-scripts.hbs @@ -0,0 +1,3 @@ +{{#with site.keys.googleAnalytics}} + +{{/with}} diff --git a/site-ui/src/partials/header.hbs b/site-ui/src/partials/header.hbs new file mode 100644 index 000000000..8a394222e --- /dev/null +++ b/site-ui/src/partials/header.hbs @@ -0,0 +1,2 @@ +{{> header-scripts}} +{{> header-content}} diff --git a/site-ui/src/partials/labels.hbs b/site-ui/src/partials/labels.hbs new file mode 100644 index 000000000..19a1aa3a4 --- /dev/null +++ b/site-ui/src/partials/labels.hbs @@ -0,0 +1,12 @@ +{{#if (or page.attributes.edition page.attributes.status)}} +
+
    + {{#with page.attributes.edition}} +
  • {{{this}}}
  • + {{/with}} + {{#with page.attributes.status}} +
  • {{{this}}}
  • + {{/with}} +
+
+{{/if}} diff --git a/site-ui/src/partials/main.hbs b/site-ui/src/partials/main.hbs new file mode 100644 index 000000000..2140cf354 --- /dev/null +++ b/site-ui/src/partials/main.hbs @@ -0,0 +1,24 @@ +
+ {{#unless (or (ne page.attributes.hide-view-latest undefined) (eq page.componentVersion page.component.latest))}} + {{#with (latest-page-url)}} +
+ {{#if (and @root.page.componentVersion.prerelease (not @root.page.latest.prerelease))}} +

You are viewing the documentation for a prerelease version.

+ {{else}} +

A newer version of this documentation is available.

+ {{/if}} + View Latest +
+ {{else if (and page.componentVersion.prerelease (not page.latest.prerelease))}} +
+

You are viewing the documentation for a prerelease version.

+
+ {{/with}} + {{/unless}} +
+{{> nav-control}} +{{> crumbs}} +{{> toolbar}} +
+{{> article}} +
diff --git a/site-ui/src/partials/nav-control.hbs b/site-ui/src/partials/nav-control.hbs new file mode 100644 index 000000000..9a8780670 --- /dev/null +++ b/site-ui/src/partials/nav-control.hbs @@ -0,0 +1 @@ + diff --git a/site-ui/src/partials/nav-menu.hbs b/site-ui/src/partials/nav-menu.hbs new file mode 100644 index 000000000..102eb31a7 --- /dev/null +++ b/site-ui/src/partials/nav-menu.hbs @@ -0,0 +1,3 @@ + diff --git a/site-ui/src/partials/nav-tree.hbs b/site-ui/src/partials/nav-tree.hbs new file mode 100644 index 000000000..90de41c08 --- /dev/null +++ b/site-ui/src/partials/nav-tree.hbs @@ -0,0 +1,23 @@ +{{#if navigation}} + +{{/if}} diff --git a/site-ui/src/partials/nav.hbs b/site-ui/src/partials/nav.hbs new file mode 100644 index 000000000..a6910dccf --- /dev/null +++ b/site-ui/src/partials/nav.hbs @@ -0,0 +1,5 @@ + diff --git a/site-ui/src/partials/toc.hbs b/site-ui/src/partials/toc.hbs new file mode 100644 index 000000000..142e791af --- /dev/null +++ b/site-ui/src/partials/toc.hbs @@ -0,0 +1,3 @@ + diff --git a/site-ui/src/partials/toolbar.hbs b/site-ui/src/partials/toolbar.hbs new file mode 100644 index 000000000..03eb19814 --- /dev/null +++ b/site-ui/src/partials/toolbar.hbs @@ -0,0 +1,7 @@ +{{#with (or (and (not env.CI) page.fileUri) (and (not page.origin.private) page.editUrl))}} + +{{/with}} diff --git a/src/.htaccess b/src/.htaccess deleted file mode 100644 index dd8c1829d..000000000 --- a/src/.htaccess +++ /dev/null @@ -1,4 +0,0 @@ -RewriteEngine On - -RewriteCond %{HTTPS} !=on -RewriteRule ^/?(.*) https://%{SERVER_NAME}/$1 [R,L] diff --git a/src/Gemfile b/src/Gemfile deleted file mode 100644 index dcac142b2..000000000 --- a/src/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source "https://rubygems.org" -gem "jekyll", "3.4.3" -gem "jekyll-paginate-v2" -gem 'jekyll-feed' \ No newline at end of file diff --git a/src/Gemfile.lock b/src/Gemfile.lock deleted file mode 100644 index 479a0ba54..000000000 --- a/src/Gemfile.lock +++ /dev/null @@ -1,59 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - addressable (2.5.2) - public_suffix (>= 2.0.2, < 4.0) - colorator (1.1.0) - ffi (1.9.25) - forwardable-extended (2.6.0) - jekyll (3.4.3) - addressable (~> 2.4) - colorator (~> 1.0) - jekyll-sass-converter (~> 1.0) - jekyll-watch (~> 1.1) - kramdown (~> 1.3) - liquid (~> 3.0) - mercenary (~> 0.3.3) - pathutil (~> 0.9) - rouge (~> 1.7) - safe_yaml (~> 1.0) - jekyll-feed (0.10.0) - jekyll (~> 3.3) - jekyll-paginate-v2 (1.9.4) - jekyll (~> 3.0) - jekyll-sass-converter (1.5.2) - sass (~> 3.4) - jekyll-watch (1.5.1) - listen (~> 3.0) - kramdown (1.17.0) - liquid (3.0.6) - listen (3.1.5) - rb-fsevent (~> 0.9, >= 0.9.4) - rb-inotify (~> 0.9, >= 0.9.7) - ruby_dep (~> 1.2) - mercenary (0.3.6) - pathutil (0.16.1) - forwardable-extended (~> 2.6) - public_suffix (3.0.3) - rb-fsevent (0.10.3) - rb-inotify (0.9.10) - ffi (>= 0.5.0, < 2) - rouge (1.11.1) - ruby_dep (1.5.0) - safe_yaml (1.0.4) - sass (3.5.7) - sass-listen (~> 4.0.0) - sass-listen (4.0.0) - rb-fsevent (~> 0.9, >= 0.9.4) - rb-inotify (~> 0.9, >= 0.9.7) - -PLATFORMS - ruby - -DEPENDENCIES - jekyll (= 3.4.3) - jekyll-feed - jekyll-paginate-v2 - -BUNDLED WITH - 1.16.3 diff --git a/src/Makefile b/src/Makefile deleted file mode 100644 index 6758bfa2e..000000000 --- a/src/Makefile +++ /dev/null @@ -1,49 +0,0 @@ -# -# This contains targets to build the website and add the documentation generated from the Cassandra source. -# -# The add-doc command build the document from the cassandra sources pointed by the CASSANDRA_DIR environment variable -# and copy it in doc//. The add-latest-doc does the same but also create/modify the doc/latest symlink so it -# points to the doc it just created (so to use to replace the "current" documentation). -# -# Note that the add-doc and add-latest-doc are not called automatically from other targets and needs to be called -# manually. That's because there is many cases where you want to just update the website, without reconstructing the -# documentation. Note however that the doc target do rebuild the website. -# - -CASSANDRA_DOC_DIR="$(CASSANDRA_DIR)/doc" - -.check-env: -ifndef CASSANDRA_DIR - $(error You should set the CASSANDRA_DIR environment variable to the git source dir, checkout on the proper branch) -endif - -.build-doc: .check-env -# Not declaring DOC_VERSION at top-level cause it calls ant and that's stupidly slow - $(eval DOC_VERSION=$(shell echo `cd $(CASSANDRA_DOC_DIR)/..; ant echo-base-version | grep '\[echo\]' | awk '{print $$2}'`)) - $(eval DOC_DIR="doc/$(DOC_VERSION)") - # Nodetool docs are autogenerated, but that needs nodetool to be built - @cd $(CASSANDRA_DIR); ant jar - @cd $(CASSANDRA_DOC_DIR); make website - @if [ -d $(DOC_DIR) ]; then rm -rf $(DOC_DIR); fi - @cp -r $(CASSANDRA_DOC_DIR)/build/html $(DOC_DIR) - -.latest-doc-link: - @ln -s -f -T $(DOC_VERSION) "doc/latest" - -.stable-doc-link: - @ln -s -f -T $(DOC_VERSION) "doc/stable" - -build: - @bundle exec jekyll build - -clean: - @bundle exec jekyll clean - -serve: - @bundle exec jekyll serve - -add-doc: .build-doc build - -add-latest-doc: .build-doc .latest-doc-link build - -add-stable-doc: .build-doc .stable-doc-link build diff --git a/src/README b/src/README deleted file mode 100644 index 80ff3af05..000000000 --- a/src/README +++ /dev/null @@ -1,137 +0,0 @@ -Apache Cassandra website sources -================================ - -Prerequisite ------------- - -The site depends on Jekyll (https://jekyllrb.com/) which should be installed. - -Please use Jekyll 3.4.x for compiling the html pages, so we get identical results for everyone. - -You may need to add the ruby executable that ends up being installed to your firewall on MacOS otherwise -the web server started by 'make serve' will be unreachable. - -Installing Jekyll independently from the version shipped with your OS can be done as follows: - -# Use rbenv to use a specific Ruby version with Jekyll: -# See: https://github.com/rbenv/rbenv/#installation -git clone -b v1.1.1 https://github.com/rbenv/rbenv.git ~/.rbenv -echo 'export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$HOME/.rbenv/versions/2.4.2/bin:$PATH"' >> ~/.zshrc # change to bashrc if needed -# Install ruby-build plugin -mkdir ~/.rbenv/plugins -git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build - -# Install ruby -# See: https://github.com/rbenv/rbenv/#installing-ruby-versions -rbenv install 2.4.2 - -# Associate with Cassandra svn directory -cd ~/svn/cassandra-site/src # change as needed -rbenv local 2.4.2 -which rbenv # should point to shim -ruby --version - -# Install bundler with local rbenv version -gem install bundler - -# Install needed gems -bundle install - -# Confirm things are working -jekyll --version - - -Updating the site ------------------ - -Updates to the "core" website are done by simply: - -1. editing whatever local files needs to be edited (see layout below too) -2. testing the change locally using `make serve` -3. building for real using: `make` -4. committing. - -One exception is the documentation which is automatically built and imported from the Cassandra sources. If you need to -update the documentation, you need to: -1. checkout the proper branch (the one for which you want to build and import the doc) in your local checkout of the - cassandra git sources -2. set the CASSANDRA_DIR environment variable to that git source checkout (pro-tip: it's a good idea to export that - variable in your bashrc/zshrc/whatev). -3. use `make add-doc` (or `make add-latest-doc` or `add-stable-doc`) from this repository. - The `add-stable-doc` target should be used when you want the generated doc replace the "default" documentation, - the one you get when you click "documentation" on the website. Use `add-latest-doc` for trunk. - You should use `add-doc` when you want to rebuild the doc for an old version. - -Layout ------- - -Outside of the documentation, the website is mostly only a few pages that are at top-level. The `index.html` file is the -landing page, and other pages are markdown processed by Jekyll (the index page isn't markdown because it's layout is -slightly more fancy). - -Further, the `_data/` directory contains "data" used by the pages. Mainly, the `_data/releases.yaml` file contains the -versions and release dates of the currently released version. It's the file to edit when a new release is published. - -The documentation is in the doc/ directory. It mostly contains sub-directories for each version for which we have a doc, -and those sub-directories are automatically imported by `make add-doc`/`make add-lastest-doc` (so don't edit any file -withing those version sub-directories). The doc/ directory also contains a `lastest` symlink pointing to the last -version, the one the website documentation actually points to (that symlink is automatically updated when you use `make -add-latest-doc`), as well as a few "static" files used by the doc (as of this writing, only links to legacy CQL doc). - -The rest of the layout is standard to Jekyll: - -* `_layout/` contains the page (full) layouts. -* `_includes/` contains fragments used by the different layouts (that's where the header, navigation and footer are). -* `css/`, `js/` and `img/` are what ones would expect (they are included as-is by Jekyll). -* `_sass/` is to `css/` what `_includes` is to `_layout`; it contains sass fragments imported by the main css files - (currently only the pygments theme for syntax highligthing in the documentation). -* `_plugins/` contains a tiny plugin that make it easier to input download links in the `download.md` file. - -How this work -------------- - -Most of the site is a simple Jekyll installation and you should refer to the Jekyll documentation for detail. The site -however includes the in-tree Sphinx documentation and this section attemps to provide more detail on that "integration". - -That doc integration is somewhat simplistic, which has both pros and cons. The pros is that there isn't really any -complex mechanism involved, the cons is that it's a bit of a hack. The in-tree doc uses Sphinx, which normally generates -complete (standalone) html files (from the textile source), while Jekyll generates it's pages from Liquid templates -(https://jekyllrb.com/docs/templates/). The intregation between the 2 works by having a special Sphinx theme (that theme -is in doc/source/_theme/cassandra_theme in the Cassandra git repository) which, instead of creating standalone html -files, creates liquid template (that reuse the elements of the website). Note that this means in particular that this -special theme is only ever useful for the website. In other words, the processus of integration of the doc in the -website is: - 1) the in-tree doc is compiled by sphinx using the special theme. - 2) the generated files, which thanks to the theme are Liquid templates (mainly, if you look at the theme files, they - simply have a proper Jekyll yaml 'Front Matter'), are copied to the website doc. - 3) Jekyl is run normally. It then simply picks up the copied doc file and process them as any normal file. - -And there is just a bit of automation to make that process easier: - - the in-tree sphinx doc Makefile has a 'website' target that simply trigger the use of the special website Sphinx - theme. - - the website Makefile has the 'add-doc' and 'add-latest-doc' targets mentioned above to automate the generation and - copy of the doc files. - -And that's mostly what there is to it. I'll note however the following technical gotchas: - - once copied and processed by Jekyll, the doc files uses the website html header and navigation. Sphinx has a bunch of - custom css class that can be styled, as well as a few js files, and those are in the website css/ and js/ - directories. In particular, most doc specific styling is in the css/sphinx.css (some of it have been simply copied - for the css of the default sphinx theme), and the sphinx js files have been manually copied into js/. Those aren't - updated by the integration process described above, and in particular, the sphinx generation puts a bunch of css/js - files in a _static/ directory of the generated files (see src/doc/latest/_static for instance) and those are - completely ignored (they are not even copied by Jekyll due to my next point). - - sphinx uses underscores at the start of some of the directories that have special meaning, like the _images directory - where it puts images, but Jekyll have the same convention for its own special directories and won't (by default) copy - any such directory when generated its result. In practice, this means we have to force manually force the inclusion - of those sphinx directories we want to include so Jekyll doesn't skip them, which is done in the _config.yml Jekyll - configuration file (the only sphinx dir we care about so far is _images). - -Contributing a Blog Post ------------------------- -To contribute a new blog post, create a new Markdown file in /_posts, using -_templates/2018-06-22-blog_post_template.markdown as a template. Follow the YYYY-MM-DD-blog-post-slug.markdown filename -convention, and annotate your post using the yaml frontmatter of the Markdown file (this functionality is provided by -Jekyll). - -Follow the instructions above during development -- `make serve` for local development, and `make` to generate the final -product. diff --git a/src/_config.yml b/src/_config.yml deleted file mode 100644 index 4363b679d..000000000 --- a/src/_config.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Apache Cassandra Website config - -# Site settings -title: Apache Cassandra Website -description: > # this means to ignore newlines until "baseurl:" - The Apache Cassandra database is the right choice when you need scalability and high availability without - compromising performance. Linear scalability and proven fault-tolerance on commodity hardware or cloud - infrastructure make it the perfect platform for mission-critical data. Cassandra's support for replicating across - multiple datacenters is best-in-class, providing lower latency for your users and the peace of mind of knowing that - you can survive regional outages. -baseurl: "" -url: "http://cassandra.apache.org" -twitter_username: cassandra - -destination: ../publish - -exclude: [Makefile, README, Gemfile, Gemfile.lock, vendor] - -include: - - _images - - .htaccess - -gems: - - jekyll-paginate-v2 - - jekyll-feed - -# Build settings -markdown: kramdown - -kramdown: - input: GFM - syntax_highlighter: rouge - hard_wrap: false - -# Pagination threshold for blog post listings -pagination: - enabled: true - per_page: 5 - title: ' - page :num' - limit: 0 - sort_field: 'date' - sort_reverse: true diff --git a/src/_data/releases.yaml b/src/_data/releases.yaml deleted file mode 100644 index 36f5d3bf1..000000000 --- a/src/_data/releases.yaml +++ /dev/null @@ -1,19 +0,0 @@ -latest: - name: "4.0-beta1" - date: 2020-07-20 - -"3.11": - name: "3.11.7" - date: 2020-07-25 - -"3.0": - name: "3.0.20" - date: 2020-02-14 - -"2.2": - name: "2.2.17" - date: 2020-07-24 - -"2.1": - name: "2.1.21" - date: 2019-02-11 diff --git a/src/_data/urls.yaml b/src/_data/urls.yaml deleted file mode 100644 index 0cf8df411..000000000 --- a/src/_data/urls.yaml +++ /dev/null @@ -1,4 +0,0 @@ -git_url: "https://gitbox.apache.org/repos/asf?p=cassandra.git" -apache_url: "https://downloads.apache.org" -changelog_url: "https://gitbox.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=refs/tags/cassandra" -download_url: "https://www.apache.org/dyn/closer.lua/cassandra" diff --git a/src/_includes/base.html b/src/_includes/base.html deleted file mode 100644 index 464a192e2..000000000 --- a/src/_includes/base.html +++ /dev/null @@ -1,13 +0,0 @@ -{% assign split_url = page.url | split:'/' %} -{% if split_url.size > 0 %} - {% assign splitted_last = split_url | last | split:'.' %} - {% if splitted_last.size > 1 %} - {% assign depth = split_url | size | minus: 2 %} - {% else %} - {% assign depth = split_url | size | minus: 1 %} - {% endif %} - {% capture base %}./{% for i in (1..depth) %}../{% endfor %}{% endcapture %} -{% else %} - {% capture base %}./{% endcapture %} -{% endif %} - diff --git a/src/_includes/footer.html b/src/_includes/footer.html deleted file mode 100644 index 26aa404bd..000000000 --- a/src/_includes/footer.html +++ /dev/null @@ -1,53 +0,0 @@ -
- -
-
- - -
-

© 2016 The Apache Software Foundation. - Apache, the Apache feather logo, and Apache Cassandra are trademarks of The Apache Software Foundation. -

-

-
-
- - - - - - -{% if page.is_sphinx_doc %} - - -{% if page.extra-footer %} -{{ page.extra-footer }} -{% endif %} -{% endif %} - - - diff --git a/src/_includes/head.html b/src/_includes/head.html deleted file mode 100644 index 46c234c91..000000000 --- a/src/_includes/head.html +++ /dev/null @@ -1,25 +0,0 @@ -{% include base.html %} - - - - - - - - - - {% if page.title %}{{ page.title | escape }}{% else %}{{ site.title | escape }}{% endif %} - - - - - - {% if page.is_sphinx_doc %} - - {% endif %} - - {% if page.doc-header-links %}{{ page.doc-header-links }}{% endif %} - - - {% feed_meta %} - diff --git a/src/_includes/nav.html b/src/_includes/nav.html deleted file mode 100644 index e3f34dc26..000000000 --- a/src/_includes/nav.html +++ /dev/null @@ -1,86 +0,0 @@ - -
- - - - -
diff --git a/src/_layouts/blog.html b/src/_layouts/blog.html deleted file mode 100644 index ac098607a..000000000 --- a/src/_layouts/blog.html +++ /dev/null @@ -1,33 +0,0 @@ ---- -layout: default ---- -
-
- {{ content }} -
    - {% for post in paginator.posts %} -
  • -

    {{ post.title }}

    -

    Posted on {{ post.date | date: '%B %d, %Y' }} by {{ post.author }}

    - {{ post.excerpt }} -
    Read more »
    -
  • - {% endfor %} -
- - {% if paginator.total_pages > 1 %} - - {% endif %} -
-
diff --git a/src/_layouts/default.html b/src/_layouts/default.html deleted file mode 100644 index efd42fcd3..000000000 --- a/src/_layouts/default.html +++ /dev/null @@ -1,9 +0,0 @@ - - - {% include head.html %} - - {% include nav.html %} - {{ content }} - {% include footer.html %} - - diff --git a/src/_layouts/doclandingpage.html b/src/_layouts/doclandingpage.html deleted file mode 100644 index ed975ccca..000000000 --- a/src/_layouts/doclandingpage.html +++ /dev/null @@ -1,15 +0,0 @@ ---- -layout: page ---- -{{ content }} - -

Documentation for older releases

- -

The Cassandra Query Language (CQL) documentation for older releases are: - -

-

diff --git a/src/_layouts/docpage.html b/src/_layouts/docpage.html deleted file mode 100644 index 73dae1778..000000000 --- a/src/_layouts/docpage.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: default ---- -{{ content }} diff --git a/src/_layouts/page.html b/src/_layouts/page.html deleted file mode 100644 index 8af98d2b0..000000000 --- a/src/_layouts/page.html +++ /dev/null @@ -1,8 +0,0 @@ ---- -layout: default ---- -
-
- {{ content }} -
-
diff --git a/src/_layouts/post.html b/src/_layouts/post.html deleted file mode 100644 index c83fcb4de..000000000 --- a/src/_layouts/post.html +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: default ---- -
-
-

{{ page.title }}

-

Posted on {{ page.date | date: '%B %d, %Y' }} by {{ page.author }}

-
« Back to the Apache Cassandra Blog
-
- {{ content }} -
-
diff --git a/src/_plugins/font_awesome.rb b/src/_plugins/font_awesome.rb deleted file mode 100644 index b6657dcd0..000000000 --- a/src/_plugins/font_awesome.rb +++ /dev/null @@ -1,71 +0,0 @@ -## -# The MIT License (MIT) -# -# Copyright (c) 2014 Ryan Morrissey -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -# -# Font Awesome Icons Liquid Tag -# Documentation can be found at http://fontawesome.io/ -# -# Example: -# {% icon fa-camera-retro %} -# {% icon fa-camera-retro fa-lg %} -# {% icon fa-spinner fa-spin %} -# {% icon fa-shield fa-rotate-90 %} - -module Jekyll - class FontAwesomeTag < Liquid::Tag - - def render(context) - if tag_contents = determine_arguments(@markup.strip) - icon_class, icon_extra = tag_contents[0], tag_contents[1] - icon_tag(icon_class, icon_extra) - else - raise ArgumentError.new <<-eos -Syntax error in tag 'icon' while parsing the following markup: - - #{@markup} - -Valid syntax: - for icons: {% icon fa-camera-retro %} - for icons with size/spin/rotate: {% icon fa-camera-retro fa-lg %} -eos - end - end - - private - - def determine_arguments(input) - matched = input.match(/\A(\S+) ?(\S+)?\Z/) - [matched[1].to_s.strip, matched[2].to_s.strip] if matched && matched.length >= 3 - end - - def icon_tag(icon_class, icon_extra = nil) - if icon_extra.empty? - "" - else - "" - end - end - end -end - -Liquid::Template.register_tag('icon', Jekyll::FontAwesomeTag) \ No newline at end of file diff --git a/src/_plugins/release_link.rb b/src/_plugins/release_link.rb deleted file mode 100644 index 2e05c52f8..000000000 --- a/src/_plugins/release_link.rb +++ /dev/null @@ -1,40 +0,0 @@ -module Jekyll - module FullReleaseLinkFilter - def full_release_link(input) - data = @context.registers[:site].data - apache_url = data['urls']['apache_url'] - download_url = data['urls']['download_url'] - name = data['releases'][input]['name'] - date = data['releases'][input]['date'] - dl_link = "#{download_url}/#{name}/apache-cassandra-#{name}-bin.tar.gz" - - verif = Proc.new { |h, e| "#{h}" } - - pgp = verif.call("pgp", "asc") - sha256 = verif.call("sha256", "sha256") - sha512 = verif.call("sha512", "sha512") - "#{name} (#{pgp}, #{sha256} and #{sha512}), released on #{date}" - end - end - - module ReleaseLinkFilter - def release_link(input) - data = @context.registers[:site].data - download_url = data['urls']['download_url'] - name = data['releases'][input]['name'] - "#{download_url}/#{name}/apache-cassandra-#{name}-bin.tar.gz" - end - end - - module ChangelogLinkFilter - def changelog_link(input) - data = @context.registers[:site].data - url = data['urls']['changelog_url'] - "#{url}-#{input}" - end - end -end - -Liquid::Template.register_filter(Jekyll::FullReleaseLinkFilter) -Liquid::Template.register_filter(Jekyll::ReleaseLinkFilter) -Liquid::Template.register_filter(Jekyll::ChangelogLinkFilter) diff --git a/src/_posts/2018-08-07-faster_streaming_in_cassandra.markdown b/src/_posts/2018-08-07-faster_streaming_in_cassandra.markdown deleted file mode 100644 index ee13432b2..000000000 --- a/src/_posts/2018-08-07-faster_streaming_in_cassandra.markdown +++ /dev/null @@ -1,77 +0,0 @@ ---- -layout: post -title: "Hardware-bound Zero Copy Streaming in Apache Cassandra 4.0" -date: 2018-08-07 12:00:00 -0700 -author: The Apache Cassandra Community -categories: blog ---- - -Streaming in Apache Cassandra powers host replacement, range movements, and cluster expansions. Streaming plays a crucial role in the cluster and as such its performance is key to not only the speed of the operations its used in but the cluster's health generally. In Apache Cassandra 4.0, we have introduced an improved streaming implementation that reduces GC pressure and increases throughput several folds and are now limited, in some cases, only by the disk / network IO (See: [CASSANDRA-14556](https://issues.apache.org/jira/browse/CASSANDRA-14556)). - -![Fig 1. Cassandra Streaming](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAU8AAAFRCAYAAAD0ES8ZAAAAAXNSR0IArs4c6QAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAAA7EAAAOxAGVKw4bAAACC2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjU8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KhtKuJAAAQABJREFUeAHsfQmAFMXVf3X3XHtf3JeIoMDG2xzeHBovbtxNYmLMCcmnQeAfBbwYFAQkgqCJwSPRL1Hz7SqnRzy4vI0ajcoqBgURudn7mLP7/3vVXcsAe8zMzs727FbB7PR0V1VX/br716/ee/VKYTJJBNoLAcNQWHGpyqsvLQ4fe5rcZatzVb/Wl4X1frqm9VQMPZsxJUNRjEydqVn4remM1aiKUqvqeq0D3wFDOWAY2m63y79n/02TDhxbJ6NzluKcRUU6Q0XHHZc7JAIJQsCRoHpkNRKBIwiUlGisrMwAeYH7GCfNvEUv5zAWGMDU8LeYrow0mHE687M83TDymKp0U9xpjKngWUMH/4FC8U0JxIkfCoqoLIADhr+BMT1U4Q9rlbmL1lUYhrENebboivF+OOz8qlZRDopzojKFeedpzDsXbZBEygGVfxKGAO5MmSQCCUBASHzFRyTMwSuezz5cG7oIDDjOMPRzQGanK640UxIlUtRBlHoI9Mr5lf4QbdJXU/elkCIdTNXAqhpTNHxTEfofCqAu4zNU/h/w5LNBLfxSbaRkKgjd6zVZOQFdllV0bQSaukm7NiKy9zEiAKIrwTA5gjR7LVw/IqCyMbqhTwAPnqR40pgRBkkGA1Q3htOgO6JCzpP8L21Fdy+aZU3GPCJNUlmVOZxMcbiYEfCBSPV94OcXsH9tX5/vha3eYn5yRiQqh/SARaa2IhDdDdvWs8jynRMBIiKLNPstfTOtPnjgKt1Qf4vOjlTc6YoR9EOqBGkyhj+GiuE3UV873nM01ueqAi6dKk43l0gVPfQhzvtoyAg/VXPLpMP8YvC2Qy96hID5bvlHIhAtAu14I0fbBJkv5RAoAmmWmAaZQm+Ja0+a51cYMk+B1Hc66S05aRoGsSYIE//alTCbQE9ItqaUC4nUrSjUroBvBwj8cbfqenD/TZeZxqaIF0ATNcldEoFmEZDk2Sw08kATCGCIXtI4RM9fuPZyDNpvZ073eTQMB2mSPpEG5KTXtMe9RURKJEp/HU5N0Rwg0YYvNEOZf3jOuMewlzGvV2Vz55KBi3LJJBGICgF73OBRNVVm6lAEvBh2ezm5GN0Wv3BKmAXuMphapGhOkKZPDH/JgmPjxIf1BidRWO+NkP9VaBPmVs4Zu5k3WkqhNr529muaJE/7XRP7tcjrdUA648rLvEXrpsLaswg6zVzDV09tJSu5zUnzWEg5iTLF7VENv485VWVBYUOWd7N3ZIgblCKMX8eWlL8lAgIBSZ4CCfndNAKWNJaz8Nk8eFsuV5zOaw1yLdJhCVJAmsnWZzbdynj3oiOKxlxuuKD6XtVU7TeHZ435VA7j44Wza5WT5Nm1rncMvSUHc9CjV9HzFq2/AH6aj8KR/WQ4qZNek5Lpr2lup/JfrhNVXB4NUmiloqk3VMwa+wTvEOlCpV9oKl/bdm27JM92hTdFKyeHd0owoOQtWvtDbP0FRqE0I+CHtAlJTRznmTrNnxDTHHzGHWxMt1bcPOZu3jPyLGhiammn6bXsSNwISPKMG7pOWpCI0bI6Q+K8HhLnA3xGDw3TYa/upL02u6VAf2swDVIoLPL+FZVzxt3ID0gC7dSXPd7OSfKMF7nOWO4o4ly3wNAct3And0zXgRjaWYbprV05UksoUFEoLOD/R59Bg67bWlwYYKa3gVBZtFaHPN4FEOgqD0QXuJRt7GLEUDx/0dq7mCsNxBmEYZ1bprvSfcL7avjrdcPl+eHeL778u5frPjFziQhUJomAhYC8GeStQAgobN48PlwvWLx+mqE5bwN50PxzMqZ0xXsEIzJMJfXXhw23u2i5+8wH+W0C41kn1ffy7sk/sSEgh+2x4dU5c1s6vfzF63+OEHF/4Z3kEmeXJM7Ia0wvDwNDeJX5/Qsr5oy9hZMneJX2R2aU210Pga4oVXS9q9xSj72bHGRNzlm8fjSiID1MApc5VO/yxEmomSxJMURdrjkFi9fewI1pNEVVpi6PgJQ8u/ItYEmcve5+vrtPCb0NB/hBRjBIEZA6t1U99muOMHqqisDLPs2hjCq/aexbciZS7CB2thLyDdrZrmjU/YFLkuW/6FOCKzE0BXEGJHE2jR8mwuthxenyhMPGozTbiofiI0OSTF0WAXnxu+ql95pD0ryFa+fAr3Gi4a8jNxwpcTZ/P2gs6A+p7vRhqqKv4NkoEpNMXRYBSZ5d8dLTcB2W4x5L1p6G7t8GiRNfUoPT6q2AufyGrw7xnrWf5C9aV2zpP1MsKEqrvZQZokRAkmeUQHWibI3D9WCY3c3caenmQkLSQNTqNeZBUCBswqgGMX1+1t2rCuTwvVXUOm0GSZ6d9tI20zHLUoypl9cwh+sqBPogSpDSUzNwHb8bM61CwRBC8g1xKM5Z/PjW4VJsPx6oTr9HXvROf4kjOmhNvxyEZYArWP2bhuYazkIU/b3LTL2MAKNNm4gDjQWPdaMOSyadWzF7wsdMzn9vE6CpWFhKnql41eJtcylWuUSqZA2/Zi4PiDNA8SzlPRA7nhi3I6ipJy0DYvt0XrwQ69TL1KUQkJJnV7ncltSZ612dyzzavxB+bQjIU0qd8V9/8KaK6HV6LYJQncelTxk8JH40U7CklDpS8KLF1WRL6jTStWIsySuJMy4QjypExBmGf2wm3ktTzCPzjsogf3RuBKTk2UmvrwG5qLS0iL8cS1kR/OGLw2zF8+7c+uCbIM+zEK8yBdcest3F4rpPDOGrnEw55+Cc8dv5Eh4y+rztLlR7NEg6RbcHqh1Up9fL1K3Di5QinF9RSkGO9KFUyv/mNxgX6Yp2FpYIJgs7Zs3w3fJP/AhY0md6Tsjvm4xqFrPh0vIeP5ypVVKSZ2pdr6Zaq3gNrzK8dKtSXMwJk1NliVGk7dkQPstQ9UsM3bFj5iVP/0MxQuPhYsOw6iXNJpLuSU2hGes+6wWE6H1jUHQx9/vEuwvb8tUUK5Ypll8O21PsgonmFpUUaSRhmoRp7l3yn2sznJV1ZxshYxL2XOl0q0M86U5WW1n78PRl3ukF5+3+THe4+huhIJGn1HebsLX1L6R4HooqqIaUi8pvG/e2DBrSVkhTo7yUPFPjOvFWCj1mWVmp4YWUSYNxinKedeEn31NZeCI7XFOUluk8IeALs2BQ/yAYMO4IBJRXZ456dgtb+NsfYVmN/kY4REN2DDdTqOP2biqwNMKKJ8Ops/rvo6lvs7Iiia69r1lCWifJMyEwtm8lpMscDl1mpB5z6aZxgxXmGMP0D3/h8jhOpUUt/b7Qew214YfgOrOu+rUzykCsJGHylK+ELzZcGYwhA3bI627hkpgvQzHCQRqrX4H67qS4AfiWQ/fEgGvbWuSw3baXhiklJUVqUVGpjkEhl2T+uulnnmpWNUIPG7/BozkeUia4MLgdrPqkUzFWXz9yzYeR3fFu8jq8I72hwSuezz5UF9rIHI6zU8G304G70mEJxwGI2ykgxunwm1WZHixnhvvsyjlX7LQizqdA0yPvGLkdCwJSAokFrSTkFUNz0mUKfea9W8b3V8PKTyr1yl9kZLsG11UHQiDM/w3UBp+oUis3ekdupmWBeSJDUdm8QmPuXK+hFA/nD295gPWD/eJMzMkmM7xth+zUWLJi4eXA9ukkICusOzgJrbZ3IjUIrcysavmKFjoPjd3JTL9ay9vB3s2XrYsPAUme8eGW8FKCNI8amr8y/kIEMJ/KQqw4PdvpbKgJftZQHZzjchhPXH/xmq9FI4TxyJRSTYs7hvpQiEL3BsWoEgqcozhcKgxFJMbZdrRBFiySNGs9TnZ6uouFdINtrW5gedhvaxHOxJSChTgQsu7baO6T+MjUyRGQ5NnBF5gMPsOHb23UZ9LQvEqvGotm3eDO0C7y18PAw5Q1kDb/nJHTY9PUcx7igpgg22IM60vh02l6ch7Tma2lnCh1QzkXUhEOciu7LV2UiDhd+FSAMF8dcyYr7FOA1hrs4dfL2O3vfMn6Z7hYDX7bNlnSJ5bqGMpoXajikSE5dLft1UpIwyR5JgTG2CsRpFlc7OVDu2WbJuTCzEBD8+mZue6T6ir91b760EJD0f8yc9S67eIM0INqZWWFhqKQMUg4wYujx3xTsArOqsYgkzztK8B5oE3Y4w+xawYWsG8P7Mk8LtOTalxhf3b7v3YwF94Wtk6QPuHJQGL9oExXQ14tYwf5cs42F5ptjanNGyfJM8kXqFFitEhz6ZtF+Up98FeIrjszI9fds7bKv7220j8j5FGevOn81QeoeYJoI4flrTbbDASiF3pLXN8oajeF6xDtO2TPIhkZQZ6uHtaXE+e/dx1iQ7rnsMG98ti1Jxawv+04zE6Az2qVXaVPkjxNjAdqDn82enOQsbl09fCRqTMiIMkzSVdVEKDQad7/+g/7BAP+qUpD8IaMHFd+fXXgg/oq30zNF3hm2pUv+KlZmDnExS8vlzJjbOi8eURHxjeZGb1ZMNzNQAQ1JD6Mj7Gmds/uQau2Iaz90G4Z7JxBvfj5Hnnnv+yHp57ALhrah11+Sm/2t+0HmbPdW9KGE3C9J6RjDcplZvRBTV8wS23ShlplURsjIMmznS8OlzRZkVqsmMPzP2wa283BtN9gLvTsrBxXBnSZ79TVhhfNGL1mjWgKWcwRygMuSkf8NMWxqL+tOdZaWO+J8X0PQ+cGeZuSJ5qFOCU/OK0H61+Qwb4+VMse/GQvOy0/k5PnhYPBRdmfs/JQmGkY3tvYhE26BcUw9IH4fo0Vmga7qK+ZzJhSCJiKpZRqcuo0lvST5BhUDIPO/754bcayjRNnarr2WUa2+y704qOG6tCYG0euPnfGyGc4cVJ+IlvKT+Xa1NOyMk6UsFhnGqrqgfHCpnLnEY/9KzBkp/T+V9BWYBXkJ/67n1XVBUComWze0F6svCHIshFC09YJzVMMhYbtMnVyBCR5tsMFJhIklyDy0/SWFLmWb5wwpdxR81lmtutePFpf1VYFJkPSPG/aqFXPwcxuROZvM2k29udivqUaukdx8AGGTm1qPGyTjSyQ4XaQ4uSB+WxY325cbfjsp98wluZkrx+oYR98DdUhWj36ZBCrA44CeAnY+KaFIY+3LovDK4ftNrnL2qcZNr4P26fD7Vkr6TVJT8md2yE5Lt84+cqcgtC7mfmelYbCfDAGXTNj9OpzZoxavYraEUmaIIi2SZrNdAxSp5OpnDzbpf5mThv1bg/lDOlszMm9WHaag338zWH26O5KNtSDNkMP+tb2fbyu0wZ0Z1f3zWU7MLxPxxvGlomuISdPwyRPWzZSNipRCEjyTASSeGSICEGeOhl3VmyZfCaG6M96srTnICoNqD7sm5GeHT4d0uZTOB2XNPnwnELItRNpim6h+jRaKteOiRxOufUcPpwXkF4T6WOSNOsDWClEZd0gfT6x4xDbV9nAskCsY2A8ouG8y57dwZVFB2joYBh82F5EYa+43My/5Z9OhoA0GLXxghJpko6yGD6XK14t6q6HQnMU1ZhBoSEaqgP3ejzKPb+1XI54Xji1U/42nrbJ4qZFf7hSVlZmbEYO+igGw2RHe6YMDNl3Qac56/T+7KQeOayyNsDmvr+TsbDOPqyFwwFI8tCOcvbBzv3sijMGsgtOgiU+63PWgOMaytquY4LUFcwJQ+LR+/HNRyQRQVromEypj4AkzzivodeKdCTmny/fNPE6PRi8F76aBfDTXIV1aW+ZPnrdNqqeSPOIj2acJ2y9mEKSb2O2khL+KCtKOEB6QrsRKDXOZUlql0OipNHu4TofG9+/gHkGdkd7oduEFFcL388D+Bh43QyE3+etQ3qwBR9+zU7KdLNDJL43dtgGG+RbSzpZg1VRa7zev0Ir8VXgqOtig2bKJiQGAUmeceDIJUgacmP6Dg3RsQrtEvhqjq6p8H8BY9AUDM8bdZplRYWwtptuSnGcqtkiGBpa5AjPGGyT4enuu+8uCAaDxfjt9zL2OBVWDc0fQrg02sSQEpxklqMdHZlIb7kdQ/DLemWzU/t350Pek3pks3sn0NTwoxPXOuC1gJE8G3FKX7bgP7sZ+cqTzsleLwWsbWLo7DBTOHky9evnMPlhyJ133rlo9+7dDz/0EJ9aSy85ZThcyYqKiuCOhmsiU0oiIMkzhsuGm16lSSNEhuYc9MpbITHdBsdDo7bCf3uumvuHn498zEdVRhJsDKeIKiu1Aw8dlzLF9oIFC3qCOF/Lz88fUl5e/sKIsu7/uxm1aRrc8CmaEg2Cwaq0YYfE9ZaQKCcN68MKstysviHEvjhUxf04qX2iofSGCIMpc9LdbABcls46oTv7Ts9s9q+KOtYDEZcCIqMdOgWMdZBnjsLqTfZkpbhOt+m6ftOQIUOeRhNpxpiBa2avVtsDu5Rrha1GPXZGz7tpBGJjmqHflr8yaSTmnD+Qle8ZDmnznxhSzpxx6epPqf0WaRKxtesDsnjx4qyGhoYMPIjcHE2/fT7fDTjvbnDkM9hfT+3JXbT+dCyRu4Wpag7T+dzGDr/mJDGSOforxDz59OcXsqF989nLH+9i3//7WzjgRky6COiotZjzPnZwd/bUDy9gGbDCP7qljP3q5TI2JNvNDiBvh3cITbSSwTQnVN7hovJZY4ks2cqVK5179+4tENcJ3+RgMFfTtOqMjIyVM2fOLMdv6gKRqooPdT4CAPySyZYISMmzlctCarV5CA2Of6ElL34/Q3Nm3O1K16b5avX66vLAr+F69AhVQaRJATtEoI9Wqo36MD1QNMQrLi4WhEzDvutBnAsg1fx36dKlF+IBbJg1a1YNKl0oKrYeRD0Y1vc5NeWAojpyDB2rZnaw6Z1YIQfGnh0wFP1uWC82sEcuuJ2x5z4zfTuHuDRWG0GeyMqc2Lce7kvbD1Sx0wcUsO8M6slY5nZWCcORCxkQaK/jk1CJoDOID/IlNWjKlJXOqVOnkti/T6hWsrOzlerq6u9lZWWNqKqqKga5fsfKw3DN6BrTN5Eo36bfMtkTARICZGoGASJE8jwh96PlmyeNdDoyPoCj+zRfffApl9MzdMboVZw4KZ4md4hP8A1PDxw9RCBOss5zioCEmYntn+NzGOT5ByJOq/mkQ9NKSkrIA4jK8fx1t47fD63oYcy5pt0dTjN0w/GbDi255JQ+zONU2bb9lezPXx1mmSDJGrytaGK/+DQgn5uUnpA+3/jvHhxh7JRe+eyGE7uxg/D5TOMKUb67Y/+QRkRFz7CGO6L/HaLGPNRnL103UkrjUnHsFet6XVpZWUmOTH+CVEp5SKOiQGcN5S8nT51+07ZM9kVAXqCmrg35bZZiPjrNEMJwPc/IvdOV7pyD2Jq1ENx+NX3kqv+jYnyIDtcj0iY2VU0i9oEET1NV9aza2tpn7rnnHpIuGfblQhqtsUi1+dMUgUhLi8O5i9atRqDeCQjUSy40HTracOKOq4SPURp8OD/72QjWOy+dPfXm5+ya5z9ip2AYvj9C6hQdywBBfgPyJOPS3390AeuGof0/P/qKXfF//2KDczzsoD2G7jpz4E0QDr2vsLTRFbMvreLGueMNQvTMRd4v/Pddd901DLrR10Ca7+L4HFzjDwWBzkOQF/ymMpHlBDzyu4MQ4EJAB53blqf1UiQjkCER54qNE0/P0XPfzMzzzPHVBddrmuM0Ik4aygtpM1HESQ8KHhAHPjRk49cFD82tbrf7Pzj2QHp6OpdKSLrE8UoiTpGvWSApMAUSyn/Kl4lgRodf72wQYRDTMZeceQInTl9AZ/+gIbtbY1XNUEMdAB8MXeeLGLp/dbCSd/e7J/Vm5/bL5RZ7IlcbJEPh0r2xgxMnNajpZuFyGJhUUULXUcU2b3pOTg5AYP9wOByXQ0qdTjuJNEliRT4+hKdyPLP8YwsEOlQKsQUCEY3gRiHoNmkX/DZ/g9v6Qbzr9ZrKwG/gfrSS9lPEIxFWjn4nItFDQQ8J6uLnJoKkerE/z+/3P+d0Ou+49dZbuR6ttLQ0bOVn4qFqtg3W3GqY5l+DYnEOnuYOf/jqiSzcDvYJ4nX+6fn32QE4w6/bX83ynVj90yKSpvrDZyKBYBdt+ISd/8kuvlyHB1GWqEsI2d5UkeTv4/AaH/MTUzR5r8Kv57ENsa41H67jGtJhZdq0adX4vgES6DJcX9Ja8OtLhkB4UeTfdtttX6EcH95b5SmLTB2IgCRPAl8M00eWhiiiO36vyMxzX1tz2Pe+U9Guu2HkM1spG5c2Ezw7SBAnHqJcnOKnGKJvueOOO0jaxDOi/J7Oe2yK+uFpjCSvbDVCgSqmaDkgUWKaDiPRWpy9D0a39yO4Mft0P6xBKuuO9Yq4M9WxHY34TWzSE65JT++rZk9j1hHvAYIjk7uSD3V2sEiNO0jRDF99QHM43uTN3nowFkbn1wT3gHL77bd/QeWxTaOQEDwofuNyue6BFPow7olbcO0xN6DRx5eyxnIeyi9TghDo4HsuQb1oQzW4QTkGNEy/b+Pk78Kp+b3MHPe1mI9+vzrAf/4No57ZShIphCKllDvGt+FkVlG6+em89CEihBP12fgugyV2OY6dRdkwLKd2cWODkESt4tF/zZ3LH6yKvD174b79juKkVYLQww5M9LauQav6Y5G3ofnpbDBmCrVGnNRcAoOMR31dDnZKnocNga6zG7zmyX/ABjcxXJQcxOeHQiH2PrWXlRTFijMfnltqGXXu3LlcMoXUuQqjj6fh2vRr3CPnUtUgUox++EiFuzfRPpmSj0CHSSDJ7+rxZ7R8MvlNet+Gib9WHcpDelCvx1SWXxxlFEoQaVILLNLkD5bYxsMwGqRJrkfLIXX+w3ow6Npw8ju+5THsIes79KN5C9fezTzpcyAddbjRSLRedC7WmzDecuK8if82dMXlUY1gYG3l7HETElU/vWSte4Hum8Godw++uf8uvjPxqcM+giMx90qiGt5F6on1vu00sAind9JhfrMptDw73319dbnvPbdT+8H/XPjMlyRpzpsHJ6UEux8RgPPnz+8Ly2ovEKUppRyPauIeBi+MRF5Fz7vn2QuxFMcrmETu6uih+/HdTfk9YcWTrimBhl+Vzxr3KNww+AsrEb3C/Xecz+eKFSvcmEX2PIiVJNDrcR9tjSTaRJxX1tE6AjYY8bTeyATnMMPHYbbQfa/8qOeejaGXcwrc10O/+XeEjbuYiJOIlXTzCSTOxpcUpMwfh0Kh7bjZ/4YPx59ufPSR+2nSNz5CuGp7172mb+clA696E3q5T5iDBs4tWGbafsauVgNCPDmg72w4GNbVTbzzZaaXQyKAsO5Bfm9Y9wmDcckP3fimtLS0i3GOp2g/SagW0ULFJK3yicC+tTq6FHmSNIkbjEd4hzX9Aqb4/u1M00ZWlQdmTB+95tqp56yvp6G8mIbZGnitHaebmW5kOiflJYkBv3+DzX24+W/ADc+doenGxz6DLOn0jU8ikwFLl1ZarGBpD+UfiuZE3SahJvIkXbguA/6dmJNvvFY1Z8yXDNecJP0E48HvDes+4fcSpM359fX1Z2LfdLEf95mOT4h+SwJN8BVoorouY23HTQXjDPnLeY3lmyZfh+Dqj4X84fqgz7h8xqhVLxI25OOZwAhIfMiP81LVnBBJYsDvyfhdC6sq111ZNz7lab9UhKpL8VH1Fw1f7TxY3dPk0D1hcPMlh/Fe5hMn2NbhnNwSVvvxFfF7ie5nfD6kw0SUdB/h90X4Hgnf4PvwXSX2H1+F3JMIBNr7QieijW2ug0iRplhSRcs3TrrZk+VYjKV+P1YUZ9H0UaXbSNo0420mRuqzbmwd0+1ODgQC90LKLIWk8L9kSbWkSxa53eYORlMBDeXwgPHZRq60CYa/nqRc7k8aTXGZpykEMJHd4VZZKFiG1T/Pr/ROrASTcZybyp3ofZFeGHRfwUf015gz/xCmfu7AuS7BffildS8S4XLSTXQbunJ9nX7YTgYhIk66iZZtnPCnzHzX4oaawPMu1XkeESfpN8lNiXScbb0RrBuVK/jhflQIN5OP4KM3BsYhLmUWFhaK4ZQiSLSt54y6fHGpea0VdYURwHR4a6WyqMvLjE0iAAmPtCAPceIkQ5Gpgmkyb6J30j1kfbhggNHMwwg28hO0iSI1kd+wSMKlqUsIS6LT7f3dqcEUFvWlJUVpSkHwybweaRMqD/j+euOoVb/EDWaI4wkAmes1QZ6Nui5sD0S9MyB1Pgmp852OH0KREcHUdcJt6QXm8lxuBHxS+oz/4sNQRHPZg1/CyfMc6Dsrkil1NtFsepaPFQCa2tdEUbkrHgQI3E6ZBDH+cVNRr6AefCYz130eYm/eNeOSNXdQhyN9PNsIQOMNioDEvbt163ZIhBgT9XY8cVotsVxocu9eOw6+rGsR3xN8igcOCjvRVvkdNQLcPUn1++Ycnj12USLdk6JuwfEZG1/iQi1EEzBw/z2Oz314oT+CIo336/HF5Z5YEOiUw3ZBnMtemTwoYARfd6c7zqutxPIYFnHS8JqG6rEA1UxefiNSfbhJ74AL0n8PHDhwKeWlwA90A9MxknKbKZ/c3WZoO6XylvHrQJyItJRGskqjtJzcxqTw2bD2HHMgfp6/fluWO/Rn3hMsumeDHjW615GKiNqDkQ8Fy/bl5uY+DDe532Kb8jjovsS2TG1AoNNJHII4//Ta5EF+f3iz063110NG0bSRq54mVyXCKgH6TeF+RPrNEJzeixAVvAT6pldQ/bXY1xj8tg3Xpn2KWtJn3qLnTkUckncMRU0DkRKByocpesRhKEIY5lDwmoo5455iICN8mgwCEn2Vic+J+5Dr35csWZIBtyZyaXoVKiQKe4fNI+5Mtnm5Jx6Cdq2xU5EnGYdoWd8/bpk4DGuLvYygtH1BmGOnj1z9bKIs6uLGi7wqIM8TwuHw93CMlr8INZUnMn+Hb1sEWrBo3ULdnT7bDnE+OxyT6BsQgsTuYAHf+orZ48aZxY7ok6OvJjk5BYFGnE2MlnJxrNLaz/dF5JGbUSDQachTSJzWapYb0PdcpiqXTR+x+uX20G+CMPtjmL4fN2AgEmfbEyc1loZsMG7lLXo5x2D1b8DwUQh3G2k8iryQTW/rTEU0EiNcpWjaBRU3jfmEJiBQwOmms9tjL92TGLJrCKCNOLU8DuxQSJuv4PM4LPS3WZKnJNAYL1encJJvJM6XJ58JCXAL9HhpWAJ29I2j1m1KNHGSLvPTTz+9CzfcHASuvRh4vwoCdWzdupVmgaTGUrLkFYB+VBRfWtVt8drfYWniFzHn3QnHeTl8b+4BMg1rTMFsIhZkczhxWhJ8c0Xsst8iRz4istq0G98fIgDzLdDVk070ftzXKnT0qXH/2gTYlNdzCeJ8YOPkQl0Lb8BL1qmC1G68ZN0mOpYowxBuLI7VZ599VoSbbg5Ieh3I8z90HSl8mOW3aQejQXS3FhmP8PAfmjV+E0SO27DKmml5j65018tlsLCSloG1jv1PVcwe8yAHoDjmsHMdihuRKEmheNnXQvc5Fs7007DvPWqUIE4cS3lOSBbIKT1sF8S5fMvkIXoo/Dokzm6q03HhjRc//aY41lYg6WbChyQyPqyx9JtnDRs2bB0NgVJimN4cCHiQhFM3Zh49DV3eZISso/CaNAFepkYEEBHe6XQomPRgeMIXV87ATCIrWlVjlhTaaOKe5fc2jaronk6hrnRoU1OWPAU5kh9nQA+9geWyB+kh5fLpo555URxrB2T5TSbqbeImFIdS59vS2fVcsqpHIKxuZs60YXCet03MTxsAGaaoSdBzVjqYcSkk9fds4tPZVmgoUpMKlyaSREPkDwqni79AEr0DI6m1neLebitCrZRPSRGd9JgU+eiBVyYWBI3gCy6PNsgIskmJJE4xfMH3BVC2f4Wb6xpgaUyZMsWJfaQrxn1mE//NVi5yi4fJ2AGJY/9Nkw4gOOREI+D/SnFhkSFrPaUWy3b+g2EYiDSGYQ2u9DWcOOnadw7pTETx4n6+8Ael5ZJ7IajIGtzvI+neFs9A57/M8fUw5chTrFq54vkrsoOK8VxapvOMQEP4JzeOXrVakGp8UBwpRTcNPhTYYyj2voZPAd7KZZSjT58+tGolSWapo9+khreUjug/t+GVMF4P+A8izFrXJlByhEewYRjRyNH8x3BLeoFLnDb052zp0rZ2jO5zut9pgTnk/TbWTPoLlvxA8APGYJ2PjDHbWlVd7jg9ICmT6CJ7i71cJ6O7PY8h+vt3ayoC18+4ZPUTPJwcOzK3PN5OYShDLh28OCIi1eMNvAyfRzGU2crPb+o/463evuWIQLHiY+Xskf/pt2R9cV0ouN5wuDJZCB6zjKXUfZIAkEGc8BJWVETxC00pv3nc/3EdZ3HC43QmoKltr0IQKL53obZfUo3WsL1R/ymH8cfjnDo6T8wOIlGPZgchOtLinALPzdXlgflYa+h2r5dhwSyGaRNtlgYjdZqR26xTE2fkfWG53/S65/mLfXrwKcXp6d3FdKCmjlMPG/Do/MXhm8c9hotPfrF0+3We0UbkNbe26R4Xu7FNEukICA7DIDhw7wI6TvtFnq7+nTLkScN1Wr3yvo0Tp2FZ4OW15b6/U/R3uoA07TIBxMnvBeh7fo63LM1Jf4TetrSKJc0T7lI3jUWgBXevH6qrxipEYBpm+BtC8DfAMNac4trpHhzSXxsG5qxDXREOVqGrGKqPeY51EeKMvJ7C6o57fnZBQcHCw4cPL8X2/6M8UgI9glTjm+bILvttkfWcE+emiWM86Y7ltRX+t1V//19RS4lU20qcuDE4DjAKTYMP51+g47pCoECO7zjetd621hD+8C1jP9PcrtGQPF/HAmcwNoM4SRfY+RKuL1bApD6GQl9qivp9Tpz0EukCEuexlxMCA7/fMfFjCYjzIayVNBPPwJ2UD1IK+JOmo8pkexDEDKH7t4w/NRRS3sXTe1BVlXMR6GO3ONaWy2i9SakKki5fwHd2Zmbm+N///veHuvxb1pJAey55McMf9i3GNM7rOdahAAgUxpTOkbhhCEsHM+b3/TPfpU/54v9N+Jr0v8w70nbBPpIFeeS9j+diGSTQ6SDS+7E9DW04SqWVrDbZ7Ty2ljy5EQhD9UfeLMqHs8gqXDPMK1av4sRJQUDaGFYON4IIF8d1WYj6/hNcoNFEnBHH7HbNktcekkDhB7r/psvqKmePv0ENhX+ohEP7FFcaESdJJ6kshVL7Q3DLQl+MBt3fcDMiJF3BiZNLnF2XOOkGIwmTngHaxvcMEOdj2EfeJ5QMMqyam133r20lz0g95rINE0szc1xXN9QEeWi5RDrB48Zw4EMSRuPbFL+lYjzymbAeIgb1Rc6SdScqYbZccbjG4hliRpCkUI6drV/EEd2hFyVfLhh9YFjL6QNVMW4onz3hTZ4nBQJ9RPSl3Tcjn4VIadQ6seCPTm1Iaw5k297wpaXmXPLlGyfOzOnmubquyn8vxeTkF3Pk5jZJPFQHAQLjEOk2v8DvS/BNb1q+5jW+uc6H8sgEBAgP/tnkqLpp3I5KCsUWCl0LN6ZtmNKpMQ12aVMKtTNu9ICTC5KieDI0Fg7tZ/662W6H50JOnCRtki7P5hGSkn0/0rNApInzQvA0J4VgnwOrJvTEPsK0SxInXQdbkidJljQkX7550kgsTnhv1SHfxspRZ9xMDcaTTH/ivmC48FyqxEqDw1DP8/i48fkcH0oyqoyJQ9N/SQdIc7qRYFD5u6awb0NPeBsL6wcVdzpI1KFaBiXSI8Z9jZo+ebx74eUuSBNErxh6A/PV/VkNG9+umDNhMakkeFg5UlHYps3x9rV9ylmkaZAVns6AgDjnIRwj+T3/gn7TM0XfXS3ZrtOk56Spl/e//sM+elh/CmuUlXvSHD+lFTD5DCIv17XFe50UmjVBhREViaTXx2FZvxQXfxfpcPBtZ8kp3j4ntpyXHMUhieBBwnTFGugJFxia41w90HA308O7FCcIytQj0nkJY8I0eURq+vqahEnTg5xulRM706tYsOHPGK9fWDFn/G/Lb4NRSEqbdI2iTnhG+POBZ2YrpNG9sBE8SnPi6bnpigQqdBZRA9ieGSP1nPdtmPi8J8NxBdwLr7hx5Op/tlXP2YS+prErdOElcTbCEf0GSRz0MiKpDanHgrU9A6pyDaYz/BTG+DMUpxvxNBCkKQyVsmEIvTK9sBN934GccfcwEDtNqdQc+OsAbevQyfq3Q3J62qEqfzl489j/8s5xSWkuRKbOOWOI97Gd/ohnBd8Dget0PFcPYHu75Rua3BdlO/Ux2moTfRNHe94m84llNJZtmDQ9p8C1rKbC571x5Jp5XBqF5NlkoSh2CuLERfZg3m5/CJ2k52x8W9J2FNXILM0hQGS0FSRq6QsHejd5Ktw152Fp+DFgz7GgyhMVTybpGZkRQuB9XBD+MSVSkkrpPhT3IvmSRvwkXqTf1l+SLOmfmUhNgNx4jClIsWkAQs7wbmZoGzSDrQmw4Gs1t0w6zLNL0rRga9sXnpdmhQ3xrLXtDKlRWtywHd5a4bO54tWi4Tr0KbgI7/Qd5Tyf1iSKlEjjaCj1kT9suOhP4DG7CgRaiEAI39BQ3QpiHEe1sshxCBA5RUiidLzPyvXpDRXKSBDaxaC503AtvwPJMI+8zhQHpEPiPj2Mw1w6xZXCe4yESOua8QyYBUH5wZH4gtqNpErKQFItpEslHPaBT9/D8Q/Arf8KZWa8UHPDJSZhUkYantPqlvIlSWgkJBFJ4tnBKrTFYZI6y8rKzsQiiJ/edNNNdV2FQM27MCFwxl+JSY6luAhF2jfB0EM0i10xtClEnLRPwXe8tYMgVSJIWNavxfKr11RUVPyJiJPqo9lD8dYryzWBgCAnbrUuVYmw9kwdW4+cz/HPyvec+ZVfwUqrDjdC+tmQQk/Bhe4HVizAJx95svBJBwm6+dCbpo6FaLSvB8GU9TDn1RrhcAWs/Idg+NkPJt0Gsv3QqRkfOeode/d4+bmQHymiDUKtYB6QfxOBAIQQ8k7hbzkQZ2/U+XJdXd3LIM4f0DH8bhRaEnE+O9ZhC/KcB+UTFFDG3u7B23LyPedXHvRNn3HJMx8JaTRe4OgNCNKkNyRZCLdi2YGZvXv3foDq6ypvx3ixa1M58+GxXnhkXAKRUio+J1jO2G5s0ecl2kWJz2AK1XdXFS0jrGi0/pRH18NuQw/jOVQCuqL6NCPcoGvOeqXef7jSi0juTSXy0SzCATJsHNWGpjLLfW1FQJAnvnfjOXsUU5v/H7xY1qHevwuhpa3nkOVbQIAIkg4ve2XiuX98YzJFTHpZZCeJVGzH+k3k2EKZlo61UEweahMCdE1oaE/uTkR0wtodc6WmtZ/XwZfDQJ0tX++YzyALRIeAeM5o6A7L+w34FFJJy62pUz9nHdo5c7hOQiBT7tsw4V+gytMURT9t+qh120QUpegu4dG5hFS5ZMmSDAwlTqRYnJBgaJErGsLTUJ0PN44uJX91GAJEfBghcKMTSY6Uyrqb9+bwg+a1KsW+wiKDzcW1M6VKnk3+sQUCdK2afKbEs2iLVnamRgipE2HmZjzywQ+MZRsm/5b6R25Jbehn4wsBQ4kn8alctmxZLtUn3pJtqFsWlQhIBJpGgNZC4s+tJYWeTsKLlbXxmWy6aGru7TAneRH04/4Xi040dGNhTYX/XzNGPfNngnHuiPinX+ICcjUAvq+GgehHIMzHZ8yYwXVkJH3KJBGQCLQLAiR5cgPszp07u+O5+7C+vn55u5zJJpV2FHkqmDHExfywK3S3J8MJ44DxewzbMQWsbfE5MYNIDB+2V1VVzQXOswhrS+oUx2wCv2yGRKDzIACBhT9fN9988z48b49kZ2f/Egak76OHPG5E5+mp2ZO2DI/jxgLO8Cq5Ia3YPH4sFnD7YX118I8zL1n7GsCH35g3brckIkgkKk9DiA/xTR9OnNgviZPAkEki0H4I4BHkz6BRW1s7Hc/c5/i9k04XIdS039k7e83Cgv7XTT/z3Ldxwmf4HLzvlXEUoYUReba1/wsXLsyjC4h65Mp/bQVTlpcIxIGA9fwdV7K5/cdlTJEdbSarWPspQs1VGVW/zchxn6Iq7Kbpl6zbT1MzQZ5xOa3TTCFqB8pf7ff7P8FQ4Tv4KSRN8R1rU2V+iYBEIA4EaJRHRBnxXDqwjHdBZxv9JZU8SbKkUHN/3FTUC9fkjrpK/7vTRq55jK5PMYtvtg9dJJpBhLqpzidxgcJOp5OHmIPVLy4ypvbIJBGQCMSPABElyJNXgOnQl2AZ74/wjF5EOwSpxl+7PUomlTy3Dt/Kzd0hIzgtI9uVi0g3dxIMJHWSsSgeSGgGEZXDDCJM41TW4fPjOXPmVNAFogsYT52yjERAItB2BDBtkz9/CMRDwkx3PI9PgkA9JOzgt3R9iRZick2ivPdvGXvisg0TgstembCpsWz8M4lofMA/jXWZG/LCHAOI/CkR6AgEhJQJ0rwas4/WLl68mOIXCO+XjmhS6p1TkCcc4v/84FtXGyteGX8h9UI4yieqR0SmiapL1iMRkAgkBIGjnkkQKYW0S+qoNyG96IhKBEE+sHFy4YotkwwEOi6hdgjLe1vaBOPQ7Xij/Q/VIS9IW5CUZSUC7YdAM4R5FKm239nbp+aksH9RkWkMChrhW6kbsLAvoO9ia5E32o4liaEASLMYa6zfqev6KbGUl3klAhKB5CIA8qTg41wHCjvFrXh2f4EWYKCYuiPFdidPGq5jVqSxfOOEs51u7UfhkPHItFGr/0P7S9u47jpI81LMItrpdru9yb0V5NkkAhKBWBEAUYoio2GBp/WPziSjbqqOGNudPNk8rwWY8j8koytO9X7aMbzUtLxbB2P6KiwsFFdhDgp+l6zr9Aajt1tMFcnMEgGJQNIQEJ4xeFanY/VNyD46Lf1Ns49SevjeLgAKI9GKDRNOgoUdus4Jq/mJTOt6wgBLZdG/XYCXlUoEbI4ABJ1BNBuQmpmqkme7QiwMRcs2TFzy4NtFIM9J3ElW7I/35BD3T1+0aFGOBD5eBGU5iUCHInCc4JSKAlC7DdvpbUKzibCgG6LaGlN89aEt00evepUuGfbHPLwWRiJY16+EvuTDYDB4KdUlRX5CQSaJQEohQLOP+JRqkj7BFQOsCS3Hkaqde9Vu5Dncmk2kh8M/Tc92ZWP+z1ICwpI6hc4yKmzorUSzEijIKvQki6Av8QPst6iwmMUQVUUyk0RAImALBITdAtM2b0SD3gKBYiFAllLGo3YhT/Lf5FLn21dkY2nYmXU1gQ9/d/EqWhiKFVtuS7QdbQJR8qxElEjkI/pjWgGTpFt8YpZioz2vzCcRkAi0DwJbt5oGY1VVX8fCcX3wPZfOJPa3z1lToFah01y+ecK1D7//A2P55ok/p2aL/XF0oTHEf0TZlBLxI9otNyUCEoGIue2wws+HHWMmgZKKus/EXcyIeeowFG1AvM6KlZt+1I1OQJJiHCc6iiRJV9K1AY4DQVlEIpACCBA/WKtupkBr26GJtOolVbti48TTzaWEJ/J10uOROgVJUixAvJ3ugbGIzySKk4TbobeySomARKCtCNBzfswzfZTA1Nb626t8PJJgi20RK8eGmXEN6SoVpv6NCpQVNTq2t1g+8qBwqoVl/UboRW6CsYiUytLCHgmS3JYIpDgC9JyDPHUM3S/ENqn4YjIod1T3E8rwZCiiqZhLXrw2w+Go3ga1xjczRq/5Lu8cDedjiNlJbyMkvnAUvstQRznWX/8e1SWO8XrlH4mARCClESBVnBXQfFF6evosWOAvhEH4dZJGiVTt2rmESp6lrIjX53TUXoklNvpqiraSOh5PsGMiThSlN1IIZPkzWON+THURoNYx+imTREAikOIICLcldOORhoYGBlfE66hLXcmHu1GKRczOFzAds4oc5AkEIjz6jjE11ifKxVmPKC6/JQISAZsjALvG5Ri+89Fql3neRUfvfWXiySBPY9nGCY/SdRLz2+O5ZqJO+hbb8dQjy0gEJAL2R4DUcaKV1vPe+Fvst9N3PBJhk+0fPlc4vRoTsBY7BtwObiiKI3oSB2zFihVunGg9FMjXA0jb6j2aBEPulAhIBGJGgNRxwhXReuZtbThKCHnyGUUKX9SJrDnj6msCe7SGun8TemVlsVnZ4efF21ReXn5dfn7+laiC/+7yMw9ivhVlAYlA6iEAw5FOJAryXIDh+xTqQaREaqceJYQ8RUT4+zaOO8Xh1M6Ho8Gz0658oZp8O+OVGgHggIqKio8B3OMEWIRS2U74ybZIBCQCCUQAz7uo7RK4Jv5pwYIFvYlM7UigDtHStnyTb2cpr0C70uFSWThgPE0/Td9O8wg/HMWf4uJiPkSHy8LCurq6P4B8qwk4JDl0jwI/mUUikMoI4PkngY6WJr4Lz/waWN7PwPZey+e7kVnt0MfEKGQtH85lGye+rjD9pGCwYfBNl71UJ/w+Y+gotecogCziPGpfDPXJrBIBiUBqIcA5gHSfp5566hmQPj+FAFWfWl2IsrViOuYfNk0einnsFC2eL7OBDselEkA5Bz5tmQsfZctlNomARCCFEEiMoJfADsdFcJHnF9Mx1XD4Mk+GkxmqsZaOC+t7ZN6WtiPI9jfIt4Hms2OfbkddR0v9kMckAhKBxCAQwQlUIY0+bUWgbSVPHreTeqaqbKyvNrjHcLveoN9FLKZo8XxuK7knQc9xB4rnYYpWFdWD3/Qlk0RAItCFEKBhOwlP1nz3NWQ4QvdtFSy5TeQpDGM0k0g3lPPh4vr6zPNKG+iNQXPco73WkC55VkzNcmH7K/y4E3WErDdP1PVEez6ZTyIgEbA3AiBP3kDoPLOys7PHIzjQz2lHp3FZFGHmYCiaRAu8Ld8w6VrqoNhP29EmMTwHYbqiLSPzSQQkAp0WATHkpFHpu/g8YbeeJsRVCV5YI0OBMNMM5ztxdhCjcx4IhAGkQJx1yGISAYlA50FA6DgNh8MxCS5L5L5EiUjVFqNRwe68VbH8EW5I3pIiV063EGYTGcG+h5zn0NpF4lgs9YE0Mz0ejzZ79mzSddoGoFj6IPNKBCQCiUWARqRCsLJqtg03xK3zLC01w8/l9Ayd5HKrhQjWuYmIk4bsMeo7BYGv9Pv9D1sAibdOYq+ErE0iIBFIKQSIOCFYRfKULaROAjHuYbuYN6TqyvmqStYhZQNVGMusIgIF4Oj4Ptnlcl0DCzutjEnJNm8Xsznyr0RAItARCFiSpz5//vy+GLrfhzYsBl+8R9yBT4fOOoxk9JiwKbQCfsAadpWvPlRneDS+jvpcBKGLqSIzczqI8xNsPkg/YWmLu11mdfKvREAi0BkQsKZlUnAQaPU8V6NPM1K6X6TTpA6UbC1yIW7nLnxebuyQdazxdysbwsq+dOnStFayysMSAYlA10NAqPUYiPRZfN63IGjc31GQxCXhiShKe/bqQzVV7Q9955vUAe6iFIN/J4o0KoNnzpzZ0FEgyPNKBCQCtkUA8pUZJFnTtKnY5u6QaG08I9yEdjIu8izsfpCzvqKGvuXOcDDV0N+jVpVZ+2NooUHrNK9cuRLRk3nq8LdJDG2XWSUCEoEkIGBZ2xUsCvcN9JxlpO/EaTucK+IiT7ZlBFfUwong3PrqgF8J6aSvZGI/3279D+98WVnZPfv27ZNW9tbxkjkkAl0ZgUaru2Uo6nDJM/aLEaHTXLphwr8RRYmWBRYpqreB9eZgmK/aE9sEymtWBVQ+qjrECeW3REAi0PkREJyxaNGiAdB7rsDvAdRrMaTvCARiljwF3T/4xsQe0HV+ixnKu9Rwq3PicFR9wXzVDIjk+5H5USpgWdljqiOqE8lMEgGJQEojIJYhhldO35ycnN+hMz+lDmHZjpg5LFFAxHzieczLJUOfzzjbnaY5MSfzVWrM1uHmAnDRNAxEK/yzdkIJ/G2U+TuVo4Xvoykv80gEJAJdCwGo97hQBdfIjysrK3ej92cTAmJ/R6ARu5P85s1EuLqqqOcocI5XQxpf6O3IUhzRdcMSt2nu1dfRlZC5JAISga6KAAlcxBlItdj+saqqfsIC2x02Uo1Z8mQjLGORrp/uqwtV+XX9G+pEjKtkEgiEBO94R+otqO0ySQQkAvZHwOILirL06h133PEOvkn46zDyjAmxo5zjN0z4eNmGCY1RlMSx1ipEhzlhI8jpufj8Fb/5khuSQFtDTh6XCEgECAFyb4xAosMMzDFJnkLf+c3hcAEaPwQu7v81OxNbMBAqA7K8DvrOn2EzFx+aPdBhIND5ZZIISATsjwAJWVhhM0zCFzhjMlpMkmeHcEdM5Dm81DQKwaO9d1qG041B98cxws2X26AyAOFkWNs/hOS5nX7jWxiR6KdMEgGJgETgOASEkAX++CUOPgXe6IHvRh/Q4wq0446YyFO0IxxkheGwDi8lhTvHm5GUxNEWvxvfElD4zsCHuxugRIe8OVpsqTwoEZAI2BEBzlkgz39lZGQ4oQc9lRopXJmS2eCYrO0i3JyhGKeGAgZzO9VPeWPnxdRkPiUTovd/qBSJ4cJwFFMtMrNEQCLQ5RCYO3duGNImQ3T55+rq6uZB9fc5gQAfcXuPXNFozvr3bZj40rINE/cvLSnikZCiNRZZV7pRyjxG8dvlbgTZYYmARCAuBBo5hEqTABZXLUkrZE3LJAKFlX0HPjz4MT9/xJTNVtrDOwm9xUTUM4by2r/jrfRIHpYISASSjgDxhhDmkn5y64RR6zy988yZRfkXf9QHDNgN/vHbqA5I0Co0lq36WlkdNZYsWZKBjj+AojdSeaEApm2ZJAISAYlAFAgIVZ8LvHIGPjGpH6OoP6osUZPncGv6pWHoPTWXmol12r/kZ7h4RFR1iPWWfT5fH0SE7oOyPKBIRyh6o0JGZpIISARsiQDUfZxzYHA+Gyz6Dj6jqaGWgJa0NkdFfNQaEasTS2P2cbkdTNHYV7G0Eh3mCt1wOLwXC73NRlkehs72it5YOinzSgQkAslEoD4rK8uFkewl1kmj5rNENDJmcReRlPqFQ3BT0pU91IDhB7u3OmSnfJZFnfQUtfi5mPYhCfHb/CX/SgQkAhKBVhAQAheChGyrrq5+Gtm5ChGj26i4qJXqoz4cPVNbc9rBd4P8DaGAZuj76Cwx+Hge1ShLxE5qZ49qgPwhEZAIpCQCEYJYPTrwA3DJI9SRZEdli9bET/k40cFNaRWCKp2blqOfNPWc9fXkphTLOu3o6ADURZFRyvHdWC+2ZZIISAQkArEgEMkfkdux1BF33ugkT0s+NP05jYHgy0NEnDhrVMQp3JFAmB4UeAkf0nky/KYOyyQRkAhIBOJBgDOTxS9JH8VGRZ6iVcveKvKAOE+A3pNb2qMlP8wmEuc52e12n4LO8iqFBT4e1GQZiYBEoOsiAO7hnHL33XcPRZCQ9+fPn38eoSH2JwMZQWpRnUsJhXMRgjMPg+2dvMDcqIqxwsJCwb/VcFV6G6X4Ou9Q/EZXgcwlEZAISASaQCAUCuVgjvuZMB5xi3syBbKoyHOe5SCvhYweiB6PYEoGNxY10Zcmd+FtwN2U8L1zz549F+H7FcpIoaWaLCB3SgQkAhKBFhDAHHcukLlcrs8wx32vGM1GCGotlE7MoajIk1kSpq6zHpoDRRR1L51ehKiLpSkPPfRQMJb8Mq9EQCIgETgWAdhNOHnOnj27CsR5HtSBKygPBDMxyj22SMJ/R+XnKUjS0FgPB8jTCOicPBPeGlmhREAiIBGIEQEa0VIRkCg41STVGKuIK3t0kqdVNZrVg5yLggo7HM/ZoNi9DXPZuRxLHY2nDllGIiARkAhEINDII8kkTjp/VOQppmbCv7N3wBdmRlivpsKl9Kf1xDs3ZcoUJ5S61yP7+NaLyBwSAYmARKBlBCBxEn8ZEMpOh1A2C789VolGQm25hrYdjYo8xSkwJbMHpmb6XXqYpliywqJGK7rI0v8K8qAAAEAASURBVOx3nz4UjIkFIHF+QJlkNKVmoZIHJAISgegQEPw1Oi0tbREChRRSMXjxiP3R1RJnruhOsmUEt5YzVcnCeWqCmW4fP9+8qM7KFbh4KwSQuwiK3buoFH4nTbEbVStlJomARCAlEYBAdpAajmF7Hn0nywWydfGWAh1b8TqXbZzwMiZpDgqF6k+76bKX6mKZmnmMMpfOK8mTrrRMEgGJQFwICE5ZsGBBb0Rrm4ylOf7vlltuOSj2x1VpDIVaJU9BkPS9fOOEd/Dt7FvuPKe4uBTR6aKbnon2HEuWx/6Oockyq0RAIiARaETgWC459ndjxkRvRDdsx1kf2/wzN6gyCzRYQ8RJDaFWRpF4ZzCNqgB6zqLFixfT0J+kziiLR3EGmUUiIBHoqgh0yLLDBHar5ClmFwWy6tyYWZSDMXwVFfRGufyGUN5ijfbLodQtwTdfKlREg6a6ZJIISAQkAvEiAPuJvnDhwsFLly7NRx1JE8xaJU/RIX89c6FZmfjNLe3DhxdFJTkK5S30EL2dTieDu5KckilAld8SAYlAWxDgHARBzBUIBF6qqam5hSoDmUbFTW05MZVtnTznmqcIs7ADLaKQ937ac8T30zze3N+ysjJhGHq1qqrqgYaGhu2UF6RqWvCbKyj3SwQkAhKBFhAAF/GjO3bscGO7Jz7DaAdJovhqdwKNanomNUj1a05dNZxMNSiOZ9RJdATf/0Ih+lBK6jQq85Tyr0RAItCZEBAzimbNmlUDe8pz6NvH1D+QaFL4pVXy3Fq6lTO4ogadqqqoGHQ3xHEBhPSZtI7F0UZZRCIgEUhRBBBlqVg0XZCq+N1e362SJ0XcpGmYgZDmdKdhgqauc/KMduE30XBInnQuiiRPOlMi5EZCFXnkt0RAIiARiBOByGF6UrildZ2n1RNVDTk1aD0VQ4lp2A5lrkZVYOrU7/H1AkiUjE6QrGVgEMJFJomARCAhCBBhik9CKmytkqjJU3EoGobtVB83GLVWsTgOgxEvBLI8BzMALsD+XDom57YLhOS3REAiEC8CQgiDUHYLOOXWeOuJp1zU5KmFdJqmCRHSiMlKPnz4cC5Co5PbEDL/Q9RQSQ0VkaBpWyaJgERAItAWBKAO/BnK/xwkKjgtchjflqqbLStO1GwGcSCsOrDCMP4psZGnWGqDop6grivQOe4nmiylrmi//JYISAQ6NQL7IaAdBL/EJNy1BZFWDUai8rBiSp5GOL5IzeROgLroQ2+EpCh0Rdvlt0RAItA5EYAQxjsG4vwd7CpC2kwKx0RNnqqimQ2LUfK0LllkZyRxds77WPZKItARCBCfKJA4SSVIKZJrzD3t9DfqYTvTw8iLdsLVM5a2WDoImrw/Ap/ZK1eudFrlTTKOpTKZVyIgEZAI2ASB6MlTxdqZIHXd0KMvY3aS54d4XQxr+8Kvv/66H+0GkUrytMlNIJshEUhhBLikOX/+fFrS/Az0AyP45LhBRk2EhsoQv9NAMHlFSI6x4s0NRSBQ7veZzMXpY22ozC8RkAikBAKNQ3R48jyMFt9OrU6WMTpq8tQUZ9jQKaw85rfHkOCSxKMooXOPI9rzL1B0FxUvLS2NafgfwyllVomARKALIEDCHCVInLTwWx+QppvvMP+0+8g2aoNRsEHXnZoGmRih6WJI1ltAueuuu7aiGH1EkoYjgYT8lghIBGJGQEiYIE8fHOQfxG9uNMJvFZ92F86iJk+HB8N2SJ6QiWOSPC1EjiXKRnE7ZsRkAYmAREAicAwCGOHeLHYlgzjpXFGTpzPIwroTHGjopuRJEUNiSOiQA8GQhyKS/HZs0+qbkkBjwE9mlQhIBOyFQKs6zzJrbXYjHArqYTK4q+nUhbLNB6PSKYAo+TlAnKdB7/kfiNZXUXmxPAdtyyQRkAhIBNqCAHgmJnViW84lyrZKniJj0OEJwAMgCFdPWsAt5gRjkdPj8dD5hsdcWBaQCEgEJALHICAEszvvvPN0HHoX3+dSFghm3KPnmOwJ/9k6ec4zz+lwsgDG7A1YBC6D9kQbz1MEAHG5XJ/5fL4tKFpG5QsLC4/Vg9JumSQCEgGJQLQIcP5CjOHTc3JyToP1vQcVBLdENSqO9iTN5Yta5xkOB4NwVEIsT8MctpeVRkV+wiI2e/ZsWnVzhGgI3hrtbg0T55LfEgGJQOdFABzTgPXRyjG3na+Php4mhVtaZWi4JqFtzFj53pT0+qqDWCPE2Dlj9JrR/FKQ2yeORXlZ6Fwib+R2lMVlNomAREAicDwCK1ascFdXV/e47bbbduOo4JjjMyZ4T9SSJ3ufBZXBrBYt48N2age1slX2PdLgyE5Fbh/JIbckAhIBiUBsCCjTpk2jAO1fx1as7blb1XkKcpw69SEM240qiKE5f900gjz6o05CsQtH1uvweSxi7qmoPuq6ZEaJgERAIhCBQKQgllQ+aZU8aVhOQ3ersRQFPreS5XLynDfPK/ZH9KXFzW9jbjsR6EmUC6Qaa/kWK5cHJQISga6HAPhkKrjk++g5uCo5QUEI5dbJE5nmbR7BTf9Y/K0CY/VcpqlYRxNpLv/b6h8sxSFI8n04ydPqmzFJrq2eQGaQCEgEuhQCgiQXLlyYh+0/o/M/IwBApIJr6Ge7pqjIU7TAUIxyRWUeLahzi7tY010cb+4bS3Fw61d9ff0qWMRGI99nlBdvi6RYxZprl9wvEZAIpCYCgiQx8aYbJuCQVfvTZPckKvIUPp1QLhx2ujUW0k13pRhmaJJeQlm8eHHVHXfc8Ra2JWkm+0rL80kEOhECELy4rjMjI2M3CHQ6pM8nqHvCrzwZXY3e2o7WqEzZr9JqHIq5fHCMDaRo8jzaiZQ4Y0ROZpcISASORYCT58yZMylOxnLrIATQ+NZYO7byaH5HJXmK+e2QjffrIQRE1lgfqrx794NKhDGpxfORjoJIE+L2aHxiEFpbrFYelAhIBLouAqTf5CRKgpnYThYcUZHn8NKtXAmrGOqhUEjHUhwqJ8+RIzeHyIE+msYKHQVI9H+Q/+ElS5Zwf1Ei1WjKyzwSAYmARCACAU6ciBM8DMQ5kAQzHEsql0RFnsXFpTwavGaEt+lQeKqGzueQPvDKxIL7XhnXM6JDzW6KZTdIsYs13HMwz50TsCDVZgvKAxIBiYBEIAIBS+Ayli5dmoaAQ2txyEuHkx2prVWdp9fwqj3f/E83X8DoAwY9HaYeiq40edmGCYOCinEWQoK+jHb/j5jGSZ1oKpWUlOggTgZr+99AnD63272P8uGN0bLkSpIpdz+Yizj0pS2/WQqLUNc80hrTpNKW622qkXKfREAi0A4I4Bmmp3EeJMPYnmFqy3HPMZbwIaEvjCmZJ2VmZg6pq6v7J2WkgCA4RptJSc2SpyDDHq+/m+MPOp8DaZ6pYyY72EtxuNQhmkMdQpb32urASmppcWkROmRKqE213FLkKrfffvs2HJ9v5SEyjAAHIHvhp0V+oWXdcWyLjjOSOI48XqtIC18CNy/yerFkHdussq0HDVZSRPVQwYhztVCPPCQRkAjEiwBNqzGJkp6/4Xj+ipWwNaBu/fmLfIYZ8cFmB6+jrMxgXghFTDHKaNtMlXB/fB6S6Crrd1K9eDijNIdSUUmRVooh+7KNE2d60rV7fXVQeJLR3YDaE6sQOxyqA0tzXHjjyNWvlyCvGN43Vx/th6R59PoipqJXhbQIgI+XFgeveN5dXRPM9enBbIfmcOqq5oTVPwMXKBNcDh2CXptuaHUNih/qWJffcDsqq9+qrmKlxVzVcFRbWjnXUXnlD4mARCBKBMAGJZAGifiaeu6wO29RSY7hyMgNB/Q0h6E4VLeeFgqyLE1RnRBz6h2aWhsOhgJBXQu6tVCtq0Gt3OMdiyhuxyWcq0Qli3NpcRPP+HHZ229Hi+QppE9EVHI2VB14x+HWzgz6wyAlRdEcihoO63vx53s3Xrp+Fw3vvQpX2rbaWq/3D92Y09/He8stHx9FmN5Nju5u/8CgGjgXs5m+BYLsjVdMX7y1BsCs389QFA+Z+i0psvE8ZPJHpHv6XQNu34lfu/H624vfX4Po/52lsfe+njV+T2MB2igq4bOmLKlUvMmOyiJ/SAQkAs0gwNVp1jD8GMLMXvrPfCWsn+EIh79jsPBASFq98cz2RU0n4pnOxTOsQn/HKyYC4g8fSasGZDM9HMQDvg+TcXYyQ/0Gcto+RVG3GUbwrVxf3rad3pHkmnQkoR1FGMZ3BJG2SJ7UwhIDEqVSGl6+cdI1mkt5AuRJ0qfuTnc4/A2hzX0POS8hiVMQ7ZFeHb81ZeVK50NTpwbvmnfbjZDI53cLfjX0twW/aRgQ3H9VtaF+FyheBAQHYKmPHMWTgQoAaCgETMHX9CGAuXRKQFv1m+ib/QDiDCt8KkSwGjQSuBi6ryEEIq1ChjKM3V/FuvPvKOnalvJpV1Y3trAERFrEh/ai1sZDckMiIBGIRMCSMiOlPgg9ue7aCzAYPRekeIFuGGdBoMlTXB43QxR1BsHGIJlLh4BDi0jSw4uH8qhnmMb1RMikXgOxKiqeX3qGsW34MaNbD9Ug/34UftNQ1NfDeviNPzn/9NVPb3qpjreOyhaT9Js8adQknUhsjtmOJMX7Nkzc6PRoIwO+kD8t0+luqAv9ccao1Te0OmQncuI6C1MynXnnvdMaXBnLXzPy3/1ESetW4PacCMAtgC2iBG3ypgBOgEbtNIGlnQRUZDqaUK2rwy8NldG4tEoXRHMyI+jHYD8EvauyGdLtKi07882D14+s5dWRNEpGJy/Xs0aeQW5LBLo2AqTyIluEIE0QZn5azTl4bMdD53gZwDlDSYMmDWPxRmEHTzSeQnoe8bzSMwtm5E8u/TzmGSZ0jzzH9MRTSSpLCc+wCmuzJRjhO+ivPzTQ8FeAVZ/4xJn5GPv9VV+ZWfGX+KYYwhD0o4372mGDd6W1egU5Lt84YTTAepkAUCDChUPGr2ZesuZRcfy4eshoMxcAWLrMrD+sH+oO6b+uNpQfYE2Pvm6Xm2VAosTacmGrIfRlAmwS5nFVxrHDAhAtN8FUFaebzP7mGzEU/Aj7H3OlGU8emD6e3mwm+BFkH8c5ZRGJQOdAgEiOrNsWafZc8mKGX/dPggT5SzzXFytOivEDlgvw0TQND4kdqO/ms5wIFIg/6PGlE3E+xW9V0wKQamnVNyXgqzFUdRVUBQ9X3DL+jcZTchJtP0mU97LxZC1soO3kaWTct3Hik54Mx498tcF6UOP5M0au+fA4I9AxInT+3avO1RVtKqqfoLjTcujtpGGqEsmYps8WjbeTlMy3m86vARm/nC70Cx0L+nZjEsDjuoM9WnXTuB28Ne0MfpJ6LE8jEYgPgYj7P9tbkq96PD8GeU1lmrOQVGMYxRGZmc8SxBE8U1HzSXwNOqoUnH+IUkHWiqYqEMQwvA9CqHsZM8gfPHTzuGd5btNITJZqUjcmNEXdWWF5X75p4rd03fgAfLM7GKz/1k2XvVQniJW3LALwbovXnhIylNno47WKO13jbyedlJcA2no9JbQ3cVVGWmokGPEVp4suQDl+PcDc4WWVMyZS/FIAj9eEHMpzKOSfLoBABOEU4Xl+6Yv0X2KcebPiSjuJ6y5DARID6bkxja4dD4nZHqjoSBI2oFtVwsEXIZotqLhlwmu8eSYvUZspb0JS1ORJZxPDc+g+/4Kf/aaPXk0BSM0UIW1295ZkBj2emWjnTMWVnmMEoPA1SNC0E2mKhjd+m8BiOEBvMeZr+Ayj/AUVs8f/neeAjod5R1IfEgZ+45nlhkTALgh4vbjPvdze0HPhulGYCDNPd3ouYDoej1CA7n+yI2Co1oTOsuP7IEhdxQhXAe/oGOA/Am+ou8pnXbmbt92LdidIEIppuCwChKgO9X4MtHkIKJI6uYKWhsOwdOUsXj866Pa8AUvbPLKaG/56mNqQy3xLxUTWSb4WhIUGq75u+OpDhsMxFNLo3/IWrlufu/CFgSBO6geJ/zFhluQ+yNNJBOJEAKRCBlMQZ/c/bsrMW7T2T37FeEV3uC9gICEW5sRJkiaG57YkTuo38QusSpBw/A0gerCUO22KrofeyV285uc4ZhqDSQpNQIqLzDhh4uT0/mGmRBYiUsn1nAXzkHEHH/4GrbeUKW0moKnJroKG83jJutNVPVC/T2XajIrZY/7BWxGhmkh2q+T5JAIJR4AEAksn2GPRs+cFjPCDiif9NAgRdCqSNhNCNglvd+sVktAWhssTvNIh8wSDfwu7XdOrZ15eblrk22ZMios8RZtHgDg3QyLrfs/6IUHd+LPi9owy/LC6mUP0VAVcdI/eDmTl0wE++UiQHmVlhss3Y/fM4obGl8aR3HJLIpB6CEQIAnmL1s/G7T4P3iguGIOINGmU1SaOsAcgXBDCFJsMhfnqPoPf028PzRm7mY8i2xAHIz5gILYLr/68e569EErkp6Bj6AtRmXQlRJrx1WsPpI9vhcLfvtCjpCuGr2GzSzd+eOBWuDVF3HjHF5J7JAI2R8C6f8ko9PKXnpVQtf2SG3U7i/ATCT+NkjHdCYE5HEYoEMBrYUrlrPGP8ywRkndkkda24yA5UrjOw8er59/zbBFUhI9hFkE6lMlEnJgS0GkTV0bjBiOvgc8VxXF1xeyrPpYE2mmvd+fumGUY6vOH9d0aQvrfmSvtMgg/ZDSl1Jn1+mH4eGuY8gkDmD6n4paxi3iP4yDQ2MiTFMW8hGLkLlr3O5x0BQ1nYWRJZb0Ixy6GPyEQqAN+oYcQlmQSd4WQEmgM8MmsHY6AZafotXD1QJ+irWEuz+mMDCzJ99XsIChoGI8pS+QbGvTfXzl73DTekBhdEmN5wyCuJlEniPPu9dMUh3MFxGAQJ4+0lPr6zegvowOSZwjifzcEKlmfv2jNeXz2BRGoTBIBuyNA9ynsFAVL1/f1K9rzzOkm4jTVbcl1cu9ApEjiQzy4gE+HuvF38Kj5I28MuTCRBBplil7ytKSr/IVrf6FrjkfNQB3k/EUN6ZIpxKA/gQvHYcOhXVb1+zHvA3gHPtxHrksiIjttbwSsZ7jHgrU9Axr7J3N6zmAQBNDozqxua+makPhngEBVFvQtrpg1DhN6aA+NsGEsbiVFJy0J4iQdJ2N/g8QJ9WuXJk6CVcULBEN4dyYLhq7KGnHN+vq5Uw9xHWhpaavAt3Jd5GGJQGIRoCHp9d/SaW46XJHWgTC+08WJk/A1hUeakYSJAJ5RPwj7XnnqVRCnyjZvbhX/1qVGkqYQFCDvnrXnI1rK46gYxElh6bqsxBkJKobw/hBzufsGnUZpP8Qx5EN4ulFlkgjYBQGSpKxZNQHd91cQ5wWWZ0xXlTgjrwy3w5OXAYIU35WzeN0vMXrUIQS1+gy3nIErUL2hXiue747JlbCqu9MwTcv03I88fdfedtAbHEak0+sCgQc5FHSj0g0rk0TADghQnEuk3IXr5zJnWpHhr0No25R1fG8PRIEPdKCgNhiB78/+w7pvR2PHaJ48I95WmK1IMw4GwzJlKpbbo/mpXSckUFgrXZ5iKJ9n8a5YN2xqd0u2PuURIJUbpk3nLXr2Kth6vZA40SU+N1C+3I+6uBhJw2sIEwTS1KDxaM7CZ/PMUWTzBqTmyZNb1hnLuXvtzfDjnIw56rTGUHQ60qMa1UV+QA9Mor+hGHd1X7T2Ih7Rmm5cmSQCHYUAjRyhcstf/Hw/+L3/iUdmN6dbSuJs+ppoJCBCrXEqmHQ5z2JOW20Sr6bJkx56DD0LIL7iHbWAlsIwjURyKNo05nwvBUygN5czyNQHKLgCf3PJ4XsLkMlD7YoAIk3w+o3gMkxNHIA4unLk2BrgEBAhKOrM5bo2d/Ha63h2r7dJIagJ8gRBWlGjESl+AVjYwQPkde5ZB61BGu1xzD7yh2GBPzVYXf17XkgO36PFTuZLJAIkAMHdJn/husm66rja8NWRj0yTJJDI06Z8XVzY4TEtSGD0Zt29qoC7Hzbh/3k8edISokgFi9b/FH6Ml0JHAtDlcD2Gm0IFgSK7MjNv0ZpT+fBdrNQZQyUyq0QgbgSIAMhDZtHLOZjQDQUcRp1YnY2PHuOutEsVxPA9ACEobaDD4TB9P7Ge0LHpaPK0QO+24JneYaZ7MQxFfvyRQ89jcWvpN1wfDOhNPFmAbT7PyFf0kyqPlkCTxxKIgGWvYKzhZuZOL8RwnRZik1JnbBBDCIINQ2e/IzdN7up1jBB0NHnSQk9IYdV5PVj3RA669OeMDXLKjeERrO+4XR3j8hc9SysLIgy/iS3fln8kAu2FAHcvVPSey54dBKnnehhA6ExNGjzaqwmdpF5TCHKlueGmeTPvk7mscSOWR8iTxvQQ9Xvc90pPgH6dgUXaIHU2ZuwkgCSnGySpGwxr3jkxVtKv5yeldeFlkgi0NwLDS/kzG/Drv0QIxRwraM+R57y9z9+Z6ichyI+A0Aobm79k/bm8a0VHnOePgEprMiNhPeSfwaezH9YDls7wbbsRVEReQg3Glbn3rLuYlPd86mbb6pSlJQLNI2AJQP3mr+8L1dHPobejvFIAah6xlo+QiQ0DcYrji4i+ZuSlwjLTgwEHTPK0dJ253tW50HD+0giTR4OUOltGttWjJPaT0llTwoyWXWaWF4O8mVuFTmaIC4GtpgBU5zCuw9o9vU0ByHrG46pQFgICpv8208fnLnz+TD5109J9muQ5bzNXJisedYzicA+B+CnnrifivkF8RK50Zsb4gj+sH8qr9HoleSYCW1nH0QiQAASdHPcvNtiPTQFISp1HgxTXL/jLI3KIOz1NUULFvAZL+iTyRNAArAyJBCl1sgl36+GYeCXyT8sIkNiP6FOY954eDupFPLOlHmm5oDwqEYgRAcvCHqqpu9hQ1eEw9uLJJh8lmRKAAFRwAfAjmzx4xfPZXPrEy4pWzeMAd1+4djCOjsL6HnSuI7rQBJy5a1eBFxHuYfy/vHHJDpISZJIIJBKBraahCKt8F1GEdIhCcE+S91lCIMbjCxUIqlKGVNSHRvI6MflFZZYkFGLqRMXpzIaIiogr8tlOCOhUCQ3dEYwBuuRz83d4vs3rtVzCEnYOWVHXRoAMRTRkv+e5XtDQXWx6yuDOkykxCJiGI/LdxnNsTOSVYujO3ZP4D0UfbQUOkOHUEgO5WYsJfBgeDIh2ZVzCd5YVNVrsEnkqWVcXRUB4yujh70JFNBCLMUqbReJvBT501xXlQv6SQsAQPjzPnf/cCZCMzjCnFdp7JoIb79NsVWG5+OTgkwEp2Wn3dyy1jwR6xkbxa+qVOuXE39tduMYy031GMYzzyXJht0TWaAdufoq83NSHjtv9EUYTVQzdDQRMHhTS9cH4beo2DS04BNGAesKhlpC3XT+oUUSaPUCW1WjiroYg21kfZF/h8w2teKwbrADH6ALZMkH3RIFW4f01uOeSVT3QRjnl1ZYXKkUbZYZNo8ZfxLkzivV3ktlTJyQzD55RN74jPx7aj08mGtMNz25PTWEwaduQ/gVaaKyKeCuG/l3aQy8CsKVykannxEGbKTyJOPNBjDswPXc/Fvlz5KWza3tksiyPk4WwGsiuugD7Z22AHazxsaw0J8vRVFZNfaSO2SbhjoD1E6tt9guEnQT8embqPWnJZpkkAvEjQPpOkGfOknUnGiFjIDdsmKqi+OtMYEmSZ/ZF2lDogT42kcyG55sF8EEQt4FuBysH2drrGaZGm8ZfmOFG48e9nDyxcb7JmfYaThLO3QD8FyDI83pms5svG8SG9evGTuqezTS8pShVkvRZUcve3r6X/er9naym1s9O8Jjgc+dVnqvD/1BjQ4ip6FD9dadhez0rKzM70OFNkw1IaQSE61uInYVnuCcnT7ION0VSSewoEYsfQgxWlmUlowvZyd1zWACTFo+96amZJASFETO4sraBPfbJN+yZnYfZQJBoOQ7aikCp8VC/oVtD8r3PZzvgt+Q+XBfoTXvslKg1nDjrA+zO7wxkUy8YznrkpPEm7q/2sTpfgKkg1twMDyvsm8c/3//WAHbHCx+wxz7fzwZlutkhe729EBQMNwljA3knvF6SOuly2At43jj5J2UQKOvO+UgxlBOUtAxmNNSEcEcJoajDuoHBIkUkYj58n9G3gA2B8BNNumDYAFa44SN25793gUCdrAK8xDsYTeH2zoOm8MkHoB3DEzjBAR7qh2c4104zEoA5129ux3B80XknsZmXnM6cUGjuPlzLHn9rG1u7q5y9CwmTQfr8fpaHTTq5F5t01kmsf0EmWzDmHFb+zNts3d5K1tfpYHW2eSlA2MdEBUQKGcTwwmLTrvTjhcUdQNv7msv6OzMCW+hxgSBhwOjLN23RWS4RgPVoLNvAA9gz9uZ/97GXPtrJspyYtIP9lEeDAKRB1Zafn8UuH96fC0hzLj+L7a/1sZX/PcBOAIFWUWY7JJLoyXahqt3wZhjgCITD/dGubnynKQl1eDNzAOh2X4iNG5DHpl44nBPnNpDh9eveYxt2ljOW4WK9QZzkt/pSfTV7acdh9tbucrYQxNknN43dfNEwtu4fb8PB0gxiSCJexydo8fnCo8bJ2SE1oxqjmo5vk2xBaiOAly+t1Iq51rrCTlDogbDBkJ0wFdIi0R41idKuijo27/UdjGU6uZGX76Q/JKZC3/nrrw6ye8Z+B6NJJ5tw6gC28tN99hq2m0Iw/D3THUpDXV/VxfS++IHeYEbCkT439ivZGwQzGsPB/c13BwNIF6uAlPn7Z99nG/ZUsdMK0lkvp8aCAFwHgfaDcvnsbuns8U/2suc+/JI39zsDe7Jr+uWyr3FByHpni0TNwLAdr9semCOby9s0b55NGmcLhGQjYkXAEsjyCzMxXmeDueRpo1Husd1Jw3PL8tPYWTkedkquh51sfYZme/hz/fDnB9iuQ1W82MDcdLAR7Kzoo80eEppyTWGTToRuRMmB3EwPtXUpju1ycn+TS9KXsLyN7pXNvndiT37yLdu+Yc/uqmSF2S62M6Qf9TaqhXQZpKF5los9VvYNG9gzj+WmuVhOpgdlEVQzuc2P4myGw+HTC5DRZPooSsgsEoEmETBfvkbAxdwaM7or9HK2Gddw4rPYj4++8acWH9JlEuHQITLs0kiSxvLhsDlOrMTIk5KtDEa8RaR+I6OR0cPhV4xMGt4iWV3kOTrsj4skRZDn+f3zWV66m9XCPalk627uZVuFZjYFpg/7u0Nv8kZVA7v06Xd4V0ivku9ysHqzbx3Wn6NOTG1RVCVsKBnm/rn48h6VRf6QCMSKQJoDntoYZJGwYMtkNQuEg+iYIHgiH0uqoXAbfuzeVxlgg/vksN75pmHpnV0H0RWo3UAHdnqEOU2i8aCpdIdmqJm8UzZBnSRPIs9ze2NkC6Ykq/pT5XUsFyK/rxkUqQiFM+mOHrldGldG1+K3dX2wZYdErcRdgmEVkkWedmiXbEOqI+AyFAdiKLno9rJlolsfSYWAw9xO9jmEGt5Wep6x7yy4Fl40uAe77nsns16wWXz09WG2+MNdjMGX2z4GX7MP9NfkSzXLoSt6NiwZR4508JbKbwCF5UPqpBQgSx10ni4HZke1cHNwzkV+kYeGAi1kp6o7LGEaHU2qkEkikBAEfOFwJsaNYCbc8dY7OiEVJ6gSizvZiCG92Y4p+dzFUDybNNB0OhysW4abG4ZfLNvNLl/7b3L+ZP1hz6jEUJ6eZRsltJhab+Q44Epgm7Y1XncQJWbjcLxCpAOBqK9hGE7K45YSHaa57jRNswFvNSzBZg9dRGSjTehtg3lk0+R2aiJgqAZUnva71QWaIqwo2SLIANxksuS30/rksydHDWM3vraN7fGHWD64INjKc99kfe25kzwMwZuQn1kNDeDtkAgj3hIYhSCd8SZ5XLC9Q3z3Q09CTvHNyciUmy7L1z4MYFCeYZjfA2p00qfYo3e8O/wPVOV1R37JLYlA2xDIcWi1FUG66ZHMx6ZtFSaqtNUWRBPjNW74bDd74I1trDeG7SFrHx3QQJDZmWnsspN7s1FD+7EffXcIJFEP+/7T77JMzFCqsFWn+GuKKKXGVuRJQMJfjacDdbR4GgGrsf4Q6Q9X1bMMEimbuDmoCHXkIET8W87oD71JBvt8XxV74PN9rCd0oESgtkm4aTRVJZWsTBKBhCCg+30BRXVAQFMgP9joZhePq9WkcswWXLP9MIaH5BmJnaKp9ADj9z1v72ArrzqVTblgGBsxtD+bMfxrtmzrXjYQ0ioN322TIMQh1E+1iuFxrbU0uy1aR5Zz5lLZW3squd9+Ht5AP+mbw+rhs5lFjW4CQfKx1ehiQBl9/UWF7HcXD2fnDIA3EMqk20SqNu8UfpfoRjiI9UwpzTO/5F+JQBsQOOyCAwczfHYZQUZ2he54MfRzIvY6EeepFPwDM4dOgHs5/2D7ZAhILNPFnvpoF6tELAtMDmTf7p0H47EOX+3IGjt6m1t8qVO1tLhRreULYIsmcp9NSJtvYsZQZb2PK5GvKhyAtiJuJ5pMEmYkgdJYhWJ77od+ZApcHYhsaQL5mzvh6oDhQGt6UhRPXjLvJKzn7qpM3knlmTotAnPn8kdBC7lowaIqizwjHw97dN1qEffshARJtgiachn5qSYfVTzHBrQPfgQJoaSQdZ722apH0HZi9SIYtqsxJVbdZ/i4Cs4WBmoaYg+CvnLzvhr25pf7OYhnn9CT3XxaP7atysd6wfGL4v5RIGQKVdcPAAdI3QMl7nXnDGJpkFq/qapjb2HYzkDCCATH6+jwP9QMgI5U5VADGLvIJBFIDAJV/vo6SHdfKCTZ2eV+p64d8+jxaZp4ZjMhRFD83W7Wh7bpWcYkeDYY8SnySQBC2o/nmJgzbAuxjjeJ/vCYnrqqfOFQNLYLb4RDmGXUDe79/BFvzNYBG9QAPukb+s373vovO3dQL1aQ5WazLzuDuyvNh16Ee87yGQnITE63EPv/f3tfAidFcf1f1d0zs/eyHBKvqKiJQDAar8QDdhWPYIAF/rs/E3OYmIAaBSXKpbiDioAawcULjfEX8zHG3R8gXolBLtGIovHc9QKy3sjC3sfM9HT3//tqunZ7Z3dh74Ot+nxmuru6uqr6ddW33nv16tXjE8awM4+Fcyhc/vW1j9l7ANoREAvIL2AfCSA6prwcZ4ee5uo8iXMIBvtI9VQ1+h0FhEoKXEOQR/jSdSVMI7ks0mcaPInr3spEqK9WR9jb1Ce97CS9B66/PTSZXTMWvixgWbO7op79eSekR/Thqr7Sh8nJtAPbBvLNaztfGnbI+pIFtAoonIfCcYXgrHu7EVWDkMfCxmvD15VsxcZ3GXlZyYDSeMGFJ7PTvj2UvVuyh5VUh7GCSGeHpiexsd89nJ1yzDBR7fXvf87mv/bfmD/AvkJ0qhnpmOGF2raiO7+aMSmm8+wz+tje/uKq/A5SAIPvZoMFhXbqM07LrJviVQez7fxjEjSJaZSgMgo6zD9NOZklgMsEjDYEMmUaBheSpxw5jA3FOnfCVWKA3t1TzY5BPDFAfYL5pIppYO/NcLWhaZ8bFcEpFYOXrttraxptPdwnAgkfNHN+LADzNvj1q4L50VUYkb77rUFs0klHi18YyswAzb67gZTMT7/9X/arVz5mBkDVnZGRt3v/SC0IlmGaGS4RlYEnHNrxsPcrpmpwcFDA+Qydml5Fg+6TOKTGztELLygKB55QLZICMdvO7x2RwejXWoBdPPtyXy17dNuHbOGbn7Ej0P/L+wpwikoDPTWAjhUu03j9Z8Tn4wX5f0DsH8ZetVdp3kBXAtA9LoDmf/A1y/+0jN0JvecZRw1jKcmJLB2iek3YZJXV9cID9aPvfc7W7tqHez6WApG+jgaJhtx6+cRl90Xj5rxY1GY0nNgW9nK9VPEHAQUyBRPn03zvmqZZjjWQGRBvKK5XOzIpAEmNGUAf3lpUwr7KSIHxS4zflBWTvBpdfwXTxFd3V7E1X5Szb8rrAJzo3wS+vf0iTVoIXE6Du3cs/mXpnNzdAjyxJ+5mkPqqPsIcN1SXhBDiQI9O8rM6EP6GV3Yw9uou4c8zHXqRSpgxwIZJmCTR3ifHpQVYJQaHPgWc9Da0GBaWwHA4XWrY+vbYC8ac2MbO1b+iQAcpEASXiVB6dO17GbsSvkY7y3Cc3le/0Xw5LWrR0X9/s+HDWB/dn5qK5jAgMSZgsngETJbkXIUE2g5Sp4sfAx+Nrgw1w1bKOAaejvYeDG3rEJskOnovj1reNybuUaxvBRWPwcSRgaZShSWbpVjzTrtppgM0NYxSVcBR2naDQp/hOEVt8Ef6TjFimZ/tXTDpYxHduOOhTKWOigIdoQBNOmosN9dyljyzDR17FPowekvvwo4snXrkCPTbAICTzmV8/IuSiSIti6F9jyRwxqfp/Wu39g7bQnUROJMaSSnBq72D7YcpLsZb01kfCqQcJP3HXhAXZgJsGK1/x3Evvsg3GGhpLXvfDiA1118WdaTGroKiQFdRwN0EDszbFtoaFw2tT3UGAsOvITnuxo+OLf2I8SE/n31uHXvjNyJrGc0J1+N1tI8oWmMFBXpJMAsLe7Rt4Dwbk/bRM2oVRGCyB6UjXff5QPoQ8gFoOy+KusodD/t8xVUF+wUFXN05rGXegM12NTo5IWi/6Br9gr6xSlrcnwCI1LZVzv8JdIcEnsXFgsiYgC8Aqpq4qwjflV8Uc3PcB8t9M/KhzbVXRNYuzbuyGJXXAKZAYS6kRYeXLcjGZCTfwn3CyFxZcnRtk9AdEwu5GH9GZAumU2PBPAGeZfWpb4Dv/A98v9E9NWoJCnXBH1ESjAAcCWzAiFVOnD50VH1SNdIFb6uy6B0KOKygUKiCYDnztNgZEUtJeqcqB2WpEMyhFLHNvVqAxcAzJ8cmRRwITx06iybIVnNDgKfq3F3RBshEiXyWRKAKd7T/E1kW56iBqStoq/JoSgFXmjH87HnHiuyl1WxIoNpaUyp19MqOzQfxf+6bPfFLZIKVpljYLnJzO7Sla0859XVlWKpJs/AKQDtK6obnHEF0QOjLJ4VTYpNFeapBN5BHnXQdBUiawSw7dW6wSYV9efK36166B3KKGQlAZIfTKps/IUosKBC4GQPPYGzv56o5Ez/hmrPaJbwatTrzbYRhPHTKtNieO6s2E2dPHH6MG+1MzupZRYGWKZAbE92hI3qEhUPo7WriqGVCtSMWxj1iosh2XilfMPF58WQu6ZhdUyVvVo5m3O+EYHmuJo68ZGn/OTwHYKKIw4nAO+mhspjInpujuPn2U1I90VYK0HJfmMFBt/4mhu111OkRVJtrK/1aTBczjAce3i9uEwPkqkNinCfFEuHB9lfMufhtJCxwCa9m7ATFOvIHYR0mDNA83VcS/HVIcJ19zP6uI2+lnunjFJBmcNxZ6UTqqLKQdpSqqINfDZYyCfCiFPnPkCR9tcgDE0Uyr0bwpJhFIDOC7rB8FqmvB9tPuk8lvhNR2hdAdNpAKfy+nhqO6UmUeVL7KKhSd4wCrkhZPmcyzOK0/+OBJOrBignqGDXhQRIrq20nf8fMCeF4tVtT8CTdZ9DR9s2ftB3AeS/3990VRx2jRY89pTl2FAuKWLD097k1MWsGZZ7UY9Qf2AVhuWZsIhgmxrc44boqSECKCWp/m7B4IFHjUfP5fQsm/0U87g5MMqum4CliF4l/pz56O0xsPmEGVurjK8gH1PGAFIhC5cG5ZT9ZNmcSsfoc644bWP0DPq0SKAp0lgLEBEE3Vz4v+z2sdV+mZt7bTVAsB8RkW7i+ztCtG8XT5EIyTgpvDp5k8hDcJPx8wio0KCbsaHmhmiVuwxfA1LpuGI4ZrtAd/WbxQJCcNCjVRxuIp5J0JQVc3Vyyf+hyrBx8F1IkdX7FBLWNxjZPgLoD0nfpnClvC8mxBd+7zcGTMg9mCiJj5PobM0MFxL5iMkkRfn+El4MLmchynrd3wcUfx8R1cAEqKAr0NAWoPYL7/GL2mdhvzfmDY2J7DmVB05avQPMVhhOqeZsn6YvFA63MV7QMnjQr7Hr+GW5bV7FIqAju2UlvogC0NfJjcOEJyZoTCT9RMXdivkimTJNao5aK7wkKwE0d6T8r52e/CPHnJiW+H5DoNpZhwiA+jJ2A7N+WzZxQFWOAWp6vaAU8UQiJ7xi5PlwwdR9n2nRM15uUMSRQxUk1/wZitIKO5AM7NekacVso7TEIqaAo0JsUcFe0lc+bRHMYT0McJfGdlmKr0JQC1FfhvRnk0fVrK+dPeZPUl+QntWmyxqvWwZPS0IMA0LJ5P/m333Guj617F37rFChIGtJkGo1W0UgtKPOb6qvH74uNVkpclyRSx16kAInv7uy7Tw/PgP7zE0xokhSpALTpZ7F4YjLn0cjDFXN+8mfhUDqY1Spw0qP7B09K4YqepQuy83kk9Eew/pAAYPykDG+JOuDCYZBkAz8d59dlcydtE8C5n9GKHlJBUaBHKeDOvtO+O2ivOVinvQdWNEoN1/gRopjXMVio9rnyUNpVInrRogNO9B4YPD36z7L5k68Hh/UIDyRjAgnAISdJGisxkM5IfaHRjpjQw18F2hSKEV4B50BqA/3nXV0psmLexHd0h/+UWWYtxFM1Aw8OHKoMA/M6LxmpaZcI73JkltQGt5FtAE+0D2G+FBRpK+ZN+i1Y/9VgceGkktyjD0gzHAGctIiAR6055fN+8oCYYAsOSFr0HwAZ6DUlAA0GDSyC2QiB6WeObUcM2HGjYw9MNRxnJnDMAJ69FTX4tNLfZ8UWtLRgltRS0yHWtO3B3Wv8lFVv+HZVfP24Y/hzgNhyAqltQNz20vpmSrFgAKI6Bm1uReeWz5t8BypKqgw6DMxG2De/lKpVaxTAPAbNZxyy5Olpezh/EGLkkHQwQmi8xIke/IEkZrKOgajuROrfwE7sk/fOnfxVe1Vu7QM8QmQA6JszTjXL507MhQ3ofeC+NMit+A0IM6YoNr1HA4OfTsu6goAzSCZdYvM5BZwHf687SN4QwJkJDnTP/EmrL7H3LB8T8PFa3QdX6QPClht+T+HxLCEZOs76FxxHv6AjwEktoX3gSU8QgLqzdxXzJl+tRSM3kd4vBioH6QxeTLdLyy4NZlnlBtOyYfqxisgB8LSDMeUyXaqgKNCnKQBHF7wAnOdVo0YJKenEyK4h46re+wo60I+dQDIBKEmSB6sEZRGjB2cp2AWz9rHySNpPGrbGIZVGOwPNuLU/CAci4LgAHPvmTlqcseyZEnyUfOYLDGZmmCpBoNw+lUD7a9FDT6Ax0doMmo0Lh973MeOXpfMmvAVuk9++ZMnQhISEmtmzZ9e773uwNroeorUqpjspgIFew1p3AscGoKjxJd11qvneMl/idzl2kfhfuGC7iEUjgE/BhR4cYrwrpgsLg6hpQVQPgvG7TdDaVWF0hO6dAzgACCOuCyB66LI1I8OO8aCTkDQWzpT7P/FpIoxceekw6QCVIKY/7ASicyqum1KB9zXwiy5atOhVDBqDQfjJwWDwQxrVERSAdqQlqme6lQJonxp+9u233z7MNM3T0VbX4xoo6Q0Oz1j6zHxooRYxH1YUmiGALFRy/ZkRInUiPOtCTOfgNnfqjnPFPqy4Em9NEjQxgh0M7RfbvQURUOCDkCX+13OnfnBoKHS+Ha5fQqMWRFwatVCxfrkiiQjOhQmDFS3llvmL8nkTpwvgxEjl5OXJkfvBxMTE7wAwHyOyEHDm5OTo1FC9ZFLnigK9SQEa1Ak48TspEolsx3UB2u1QqlNDe3VXxNFKJB/Xz9PMcDFPSMGsqNhIjsTd/sYUCA6bG34dS8s5C9U9EdATzhTASdwmMX6dAE6iXec4T8pBBg/7m3H7urMcjd+Cip9L1kwMrDJAlPi3vgsq1DhEZcE9+hM1GBJbmAV7zDLZrZULJ/1X1D8Y47LlK9NxyZIlp0Sj0SELFy78lzeeGmVhYaEEWe8tda4o0CMUoDZIBY0ePdoh8LzllluW27Y9E4N8bl5e3upmkhIBCu2DhHmNEUvXp5c5oT8wjvT+xHSY81BW1J6pD3cdblCuXRsINGmNuoFtcBiWpBYlcDu4e252bCscD051ttiuJQIRn7zRE6LjfPDSZy+3uT0XCtrjnChWg0XD9GKCRSPOrrOV76LnaUQlg39NrJ4iFadpvozhKm/f/AkbRRkxglPdm4y+xGFSo3TrQe/j3HrrrcdblmUivsSNVwdFgR6nQDNgRA3QJonb9OP41X4r5AGY4Xc++z3TsoM259PIsUhsG22hD+07IBpjfKhvwu27oYt6hmr3oU/nayFjRVkQDj4odFJMF3l4/roHwDzET739xSG6XnsV9qC7DGZNIwRzZwpVC62tpQ/QS9woscQaGfkbZOzOCNxtaxs45gcq6lL+JlYaCPE7D0RvXS9Cozt+MJuLzdahYa7DO12E0f2XGN2f9NBanSoK9CgF0BYzNU07bvjw4X+ZMWOGKQtHvHfQl9FNj9T2aT8kt12nLVl3EdjYWWCPLgQzxAWI2liXTIFMFYlx6vlAgEl1AK/j4+R7AxxyKTC0AEarK6rmT94hqtQK89PZ6nbnC3MYnWqS+OlLns2AomGaw50rwXT+gAcSwOGF8eqWl6PrztGsaTnQg3DDYLRTKDw9/wsj9cMVkUnPNgClZwBoC5HlSI+GeTqA80lckyXDaFxXxYtPbclPpVEUaCcFRF9GuwOWcRLTZycnJ/+xtrb2Y+RzGrVD/DQM6DSr2USC2m85cdzaYUufOTvEnMshn2VDnB8E0Ixxo2JuQ+RL9eguhgj1JqNqtxyuwewIO4SC8cEutTvwXo84TvTvFfOnlIh3okU9BdiwrT3vu19iNL3ZneAZKyluBPtx/vOB1+vNiy2HXQw16IXQSxxOu0yKj0BgSiYSVKtGsT5WR7gswr391zc2Qy7fkAiNcxCanuI62Hm/uAdCA7TN93Gxnjt8dfkC2izLDQSa5IW7YwSnkqjhDsaIf/jNN9/8Hq5FnMyeGjB+UtSX0eqoKNBhCnjblBzEYQnyGDI8zO/3T58/f/4uGd+RQih/NooZrJhFZdsdsuzZkYDNXNt2zkcXOwtAKrKO9a2oy6hQf6XmLwJ4FBwb+7UbHXegfkf4GAvuiduPwWGKPkzb0cdAuxJ5rtcc7YVouH5NVTC3TDxGoDm6ODaZLXPqhmPDm3VD3k2zjANRujl0+T8PtUPhs0HiqSDqiSD0CJaYlABuFLQB/WnBGB3FD9TCviACD+kTNA14D+QiZgaBsjRBSHNTOBUbgNLdcD0RtgT4+hrguVBL9b0pnJ3KfASnmWMHnUW8qLCIF+Z2bLLH25DlOY5jAaaZKSkp98ImtEzGy6LVUVGgqymANka6TWmKRP08vs+0qcigg8GeNw72mbCs2ZyX2Tj7DmZoaL05xrR5NnrcuQDSo5gROEwsnLHAEcb6Lvqy249pfiFWl7j6UJ9FH4ZZkei4bh8mjYDozzpE8lAtAetO/H2sc/6U5egbYOS+q+FFqA8X5wA0W1ezNaTtghMiak+HmDhPru09HNjwO19INu3wGBBuJBwWnA7Z4hRQ8nBAZgoqmICP4ediJRPRNkZfUXEapejDQP3iYGocQFsPHA3h2Wp8ip34Hq8DQt9yDF5Ufv3FRU04ygMQmxrOKABpcXFstrKthKJRHiM/HwWdEelCcT4jNTX1werq6s9x7yI06mL8tKKiIlrtAV28EEPamr1KpyggKCC5SbSl0xGxEr/NOJ9LbQs/AinmPafr9oTgpkwjmLU5mv9SzjDbis5mlvbEteNXvyvypIw8OlGZb/oda481bP17FndOZI52BjrmCQDEQbhPrGlSA+dI0ib1YwrUh0U/hvqS+nHUpPkQmt6njef3ADzfRj9+DUmKLB97v/r6iXsR3xg6Jy025tPOs94Az8YqEjdaBKX06OajRQ4IUlw6LPHTmvIjfY5xNOToI0DZDMBMEoiYiHVkAaCODZwCUAIwuVYLYNwHbvXTBBYtSfjWoN0ll2WGm4AllUw6nKJClOkBb+JpgbZ3vnBBcsCfnA272o+uyXr6jcaK0mONQJqXFwS2t3kkF6M+GtwlAMkr8LsB4jzZ2uE0BpqkE1VmTV5qq/PWKEDARYMy3aeBGdYdx8D8qMgwjESYzP0WOs1HKI2r26RkcRweRe0/oH/x3MIcjaSv/A3Zx1qcr0vLCIyuqwxNuCbzqX/kFKC9SsmMVGlk3kShBW9Eo4NF/m8SdwxFRz0aFTnasbVD4FYn2bbsJB9M18mmyHJszDtoIYdbdZrDqhzu+xJ9vYQFUj6tqGQ1YvLWW+WmuEHv1+539GbX0fPeBU9vrekjFOIjFA+DqRPEAtJVdkUgQrNxAMxShxVCl9lKvtRgCBCXb8y+BZ/iJhT9OiSHjcC3jeGI9u4NF67d461OMMgwugsRxBvd5nMCTNjfHX/ooYd+hpnQOi+YtjkTlXCgUUAMxN6XnjNnTiomhn6G9vMqQPNd3GuWxpv+QOdeMX3Fi1POp14JFWM6sv3lrMw1fz1A/rTaEOWjv7FMLJ7pQvGZuEsKcRKriOulv74Dns0JACgDfgqnG3kQEcAtFhfTh2mekmJGARwpkM4jD/An3kz8xeLFzdb/JHhSiuUvTtmaOth/drgeOhvArRmxdoI13Q79yybuGBvKto78LxqqEItaz7HpHeIG6BnJZeL8MKD1h0j1FRr+dFy/5AIow7lYEdI0B3U1kCkgB1cYup8GTnMiaPEg2kkTe02ZpqN0kmI6PX/PxqlzsFZkWbjO2o057Skzs9Zsc/OlTtWmPoV0SIuklJr6scsxCwbJzazpYQupAsDkIJYkQ1gGuP24reU1za6brwS6dHMZ/Sb7AogjuRBHVrw49TSH21tRcRiAMsvwa7rPpxOIYpLeJoPb97Gf/UOzslb/xQu6bXhRSW+HPNsUFxfPwDOkq3oWHYHWx8frqqjR9MmG04Z3VUk6QQECQojlGq0OIt04qXWgO/99IBC4NxwOm7j/A7SX96kd4V7DKqKOFEltuBBiOrX9+zblpESc6IPpQwKXVu0NvwTgvBTA+YXbN4hhUO3RJTLZIqrgUoAaj2gk4wu337Mx+25/km9+qNZ0sDDKjkZsG8iHyXstTdP5mdEQvEghLFpEYkqwrQ1KphMTSXj8fnSANW7xDOe0/pjsqVJwjJlddFIMk3mrY/+hgIeDtAgYpZ0w4vVQKPQs3mQ+2sf7bjphqE7pOhJITOdiNh1Mw8ZpZ0Rs87HkdP93qveF767Qvj83mBWMEkeam1VIkzgqeCigwNNDDDqlmXVovrHsyHd7uDZ6sc+vnwiOkxoo0cqCoptcxs64dvxTT65adYpvxoxgw8oNer6NQYAodQp0AmzKxbCeoABcb64Fk6bpEMsWI34FnDfcNXfu3GrcJjFecK04tktd0Mb6qGR9hwJiIjE/Pz9QUVFxKdrDjptuuuklfHeSSmjAFoM2VVdOOHa06kJM50EBivdsmHqNZjj5MIEO11dF/mfWuU8VMLYW7RLSmALOFkmswDOOLAROboOpWb4he17UtJ9HEiyOYpYO8R2TSvBz4kCBztiMGW+alDYnpxAT/+0XZ0gUo05BeQFIJSi+issPYdp0c01NzX9wvg5pCGRFI5c6U3pGhf5PAeIe0Q404hylAw+aQS8vL38uPT19ZGVl5TK85Uv0prKtuOeyvdBlu0IsnyAmsTdHH9iQc3hYs+5NyfBnV+0LvWZo+mXXZK3+UJaVmxsUnG27ChggiQU3M0DetV2vSY0HP3vFhikP+BP1KwCizIo6n2Cy/qu0jIRx1eXhl8GBTr/u/LUfUMYCcKX5RrtKakwsxTUCyDFjxpyE1SE75s2bV0kpbrvttqNgimKiTnKSgL6dVAM0ZqLO+i0FZJsDeI6E9EE7FZDno1WID+G8S753gQNOkscWgCzfNC18wAAgAAAUR0lEQVSb2/YjgWRjcKTOXFLOT7pZiukErP2WkD1UcQWerRBammz86d85g2vqzXd0n3aEGXUyZ5+7dss9G6bMAy+6xLHEdNEfZmV9/x7SGxGAttegPr542YE88aLTYLJgMzrSOIhqwb179y5duXJlmMAW8VwZ23uo1YdP5ffCNyauUXxXnA+CaH4FwJJWBN3SSvU7DZzUnuHxjFHZyzdlD2I2ux26zStrKyMlhsEvv2bc2o1UdlcwAa28w0EXTR9FhVYoIBvSPZum/Ap7Rh157XlP3SaT5q+fdrKlWavShyaeVrk3BFGbX33deWtIzO50A6RORjOtEOVgWywMUGm9/AXIegnAMxUG0eNuvPHGr2VdPMdOdzJPXuq0CyngVbfgWwqpBsc0fM9/QzwfDf3mP3H9YypSgmxXDYqyHVPeMHq/AHL4Q6mDA0dVl4ceD/ii11x1znPlZPg+up0r6Si/gRwUeLb96wtgooY/alQRwK3QWvXGdF+ounQWnCMs1aEsghnTrVErddkNF/61ljBvEayEveuC215Us5QNZcNMZQgcMMPtluAiEsC1XI3O9jV0pGu8eylRPd00HdaNNauFiugUBfBN/PDknoGtML6hjGhSCLrN2QDQT0844YQnacIQ0V02ABK3mQdLENLH37Vp4lDDNm7FppFXhOqwO4LNZs4av/bvVA+vfSddq9A2CijwbBudYqnIZNedGPKO5vdsmXa8E7VWpA5OmFBVFtqp6/p1MzNXP0MPUbrOivKUTyucy2B0vM2DBw8eU1ZWtn7kyJE/djsgPSICgSh+CkAlQbr5KAet+OWRUK/8CoPcTSjexKaBP5K67LjqdAlweu02KX/MpE+BK8j7IaZ/q6Y8/BdLt66/PusZsT5ctY+4L9COSwWebSdWs4YtuMvNmbpUri/fNPUyLEm6MznNP7Su0izkhnbjrHGrP6EiBNhiVl6Cb9uLbUwpxTkXDEV9cE4AejlSkYi/nO5Rh0AYAz3a1zhvsqyU8qAc8YyabGokbbee4RskgN4bQfvjcVyKQW6F5DJxr0t11t5BffnWaSN41F6clOq7pK4y8hXWj18967w1a+llXW6TOF3VDjr49RV4dpBw3se8DZY80FhRc6E/YFwTCUXJN+nCpNpD7pkx8SHyENNpfai33PhzAk38bMzMH4mZ+dfRUYehw96OuJvj0+JafnvVeVogzoGi3BVipIsmrl4MZPQMrk8E3a8A3Wlb6t9JDpN2rYTIHsZ9WqHW5YF0lgWuyRypk+oqS2ehkMWBRN0fDpn3+Wwt7+rxa/d1sTqpy9+jP2UoO1B/qnNfrSvcy8WWuFEF796QfTpk/DtSM/zjqsrDnxoav3Fm1trH6R46UIPelK47E4iTJDtBygPcDHVksfTzo48+moy9lGbjeifKu4zi8SOTp8OTkpLqya8oXSOIjh+r0yi43yuWgBC7q/7F92qLB3bQEG7N2VbQ99T6+vrnAJ4/pUUO9I0QBP296peuIG3su8V08JTfPS9OuQhL4e5KGxwYXVUWfkXX+PVyXbp3kO+Ksgd6Hgo8u7gFCJMQ5CknimBofxmIvDglI3BYdUVki+bYC2adt+7fVCw1fDgxaUjbxVVplp27Z/druHE0fnehUy9AHcieTwAojjIc6FqmO9iPYmUXaCR1xoIuBIZw0JGJ489BgCg8Y10t9whCPHxYsijcDr7pEkc8I741Ijx5ubc7dojXa969adJx3NGDSSnGpXXVZi13tDnXjl9zP+XuNVPqWGnqqZYoQB9WhW6ggHeUX7o+Jz3BsK6Bk+eFgUTDD29NTzhcXzz73NVFVHR3mInI5Z7uq3k7Pbkvo+1nv4Fz5p/eddddtZQGnf5HiIevVGc7OjiJluIZ93nq9NJH5MEo5hNI6vCT6Xg474b3X7p0aTrWlNMCBaF6gbVDBkTwd2EydgTUI6vHjh17SVZWVktG5Q15SDp2+ohJywLXiQfltWrTT4fWWfU34GvN8QV0Bh8MqwIJxq1XnlkIn5jdqyai/AdyoI+rQndRIK6hr9yScwx2Jb7R8GmXC+f3UedBLcG5a+ZZT+2kKhDgFufAa71n24NOVk1+3wMCHmaDt2VkZJwB05mdANBzARSfEWDiJ7kuURXcEyIoHaWBvlxW2Mm6dvvj9D7kSJiWQpINLQokujQDOPned9xxx7fq6ur+iDQT8NIvQHS/hCpJ93H4AawqahcuXChWmOFa5OPeozSU9wHpjjRtCvGc5qObLkuoZBW/ZfCamZTuH1JbFXnB4PpNWFopnHiLwbuTE5RtqtgATiQ71wAmQfe/OnUoaRtKpf1xy7STtah1c2KKL7u+xoSjbr7Mx42Vv88qdJ2EdI15U0tvRnWJ09/JTv8dpP9/+A3BbwnSCVMWcKSzMWv/C9RxKwDzbsSXUB74NQFVPNMAQqTXA6DSLLIDkKIZXRk48hDnyK8lYGnIQz7gHluMJwCn+/F5Uf08ICnLb5aHfA+XkxyPrE5BnuQe8GXKF8cTkPdTiCuH9cIDEMUfo+Lw89Y9/poe7boQNwATiOZvmpaDvYGCqUMCI6v3hYrh62v+teeueZoKjVcbdV1FVE7xFBCNLz5SXXcPBeJ1Tys2TB2LfnhTYqpxfn2VWQcUWAn/ifeS/0SqATpvE9Dtnlq1nKuc2AB3+TukuBG/w/CbBuAV9qsELLieid8X4MDWEgdGYBYPZLh/wOA+R+losqoJMLvXFE+iNYGWF7gOmLebQACcq/P9Aeo4HCL3PxYsWCAWG+Adr8Tkzv0YJBgWIcxCOfluuWT2lYSfENe9hbkDRPdNrsWBJrWdjM3v5Fq2My813f/9GjI94tpiLVT/yMwJ/8C2sxDnPROW3rqq8+6hgALP7qHrfnNtsgcMUtKSOcyQzgcnmllfHQ1h09CVXHfuvy7rqRLKSIpsHfXeRHkcKBCAAUSE3k/O2nvBkETY7du3l0KXKjasg/OK8QCbpwA6ydAHPgxQnU5l3HnnnckQdRcDoLB5n0O2jQ8DfIQ+cPr06T6spElFMprtr3fr1BrndsB4yi8tLc0v9baUH8oycLgI5Z+J4+ewqXwIQCfqDC76r1iJ9XPMhNMSyAuwxHW9+wwNDD/AbweeJ+/+zYIcTJrd6OKIILZ3GTUqR6xgo6zJHjPDGTQVI8b8lEGBk+CQZg/X+dKwYf1p7tlPk7vCbjV/o/xVaJkCCjxbpktPxDbjFFZsxp4xtrMgKdWfWVcdIcB5GELiQwDRt2WFSJfVnSAqy5FHgEkTTlDG03H58uWDamtrv+Pz+b6ASc5XFIdjOvyQrkHcuZhM+QCeoc7CvuHldA95LUb8AtM09wLcLgPgPufGj8BxIX5D8XsG6R6ieArEFQLozkP6veAWF8k1/Yi/EPEPIslQiNSLIVIvpfS4fyTK3I714sPhzu01gPv5rk9UmhQ7C4A/BunfAoi/QYbqyANZt6hCoOzEOnPcF6ciopv+5ABJy35lEbQyyHbsm7EO/SSsDNoHZnRpwJ/4yFXn/E3Qs6fbgqyXOsYoQKO0Cr1DAcftKA0gem3mWuKE1i9/cdp4gOZMbP9xJWxFr1yxcco/sXn1ioqsMetzecy/InWcLp5capEKADLSbcab7Ii01113XQVOXvc8yJctW1aJZwjYDsXPJOCUAAUQ2gjgPBzpExEnRGZ6FmCWDlA7HadH47cHz//JLZfAazziJuC3G8+uwFE4REFeIdyjGeWPcC64RSoHoPolATdA3Y/j5xI4kY4DYF/BkX4NAc/isRjXjUgS05vocul+Q+JuOBHfEQ45pDf3VW9MTArX6JPhc3t2YppxKrweldaUR+YZDv8TGblTFeQzytdmN3yQdmSpOM92EKu7k1Kn8HIecB12EjR809G3f52c5ksAN/oO9FwPhKL63+edXyj8fJIujPaW70luVNLBBR3e0gSUTHOAo2x/QteJtLT9CJlJUaB7Arjc1TnluEfceMMzlMgTGtJ74pqc4nkxkeQxR2pyv6cuJJfp/Wb5m6YeEbWdn+MlrkgZ5D+qpiK8F5zmH7nmPAjJgwYpAZreZ3qqvqqclikgG2LLd1Vsj1OgpY51N7x969z6jc3sK1PSA4eiY32DGdb7fAH22NVnrflUVlJyJAAJAp1u5ZhkmS0dJajSPdSlgZMj8JLpOwG4IgtvGS2VQ7PtvQ2S8l3do+DevVYXFL/ipWkn2qb1O43x3yan+xJgclQMbnelFTYKZl9UKFaBeb5rAy3j8laXvUABBZ69QPS2FhnfafKf/3GanRiYBICdlZTiP7W+JmICIh+H4PtoxT59WzC3MCLzJo/hOaxj24PIPHr4KMyYABxUrBf4pcqgVweEjtKCBsNFHucxlA99m90vmefZUTYLTrbJhpR2Zv2HjgExkDb0XzNOfcgU6Ug1o3xsdpT03f6cAs9uJ3HnC4ifgaUcsbvneRDnfw8hdgpm6RnsRT9EP32M+fiT152zepcsVYr1qhNKinT/UUoP8TppWFUca3Pnf3D/Z0lpgdHwdBQGcD6K7/awdKRNteuHA1/3E7UPlqDAsw9+lNaqRGJvM7Fv46TvYn86dEjnUszSfwfcKJk2PcM09uekVAtczDMNNopiGShWMEkHua2Vo+LbTwECTHJ+zTZv1qSLQsplFZbmhozoj2FF8QuY9E+AxMCgu96lce1//Y7+5yvPiy2jpOdp73Sl02w/7XvrCQWevUX5zpUrZ+hJByZEXIj0ASvRfw5ztMswPzw5Kc2XUlcd3u3Y/HkYsT9RF9W2y0kmKpq4WTYuU2NbMpvNMHeuagPnaQGYEMnjabhpU9B4z3r/R7ZmXYpBbSqcEA/DrDlNhK3TdO1Ry69vm31mobBzbWlAHDgU7N9vqsCzf38/gGBzbvS+TTnfijrmJNvhlwJbxyal+Vl9tbkbnf1Z7KFcGEi3XvZypAQCucT1gBaK82m9QQiwBHdJ1g3xahAavJyA/xSbaWRWlQ3vRqPDIYvZUWcTbLH+DNB8YebYwgbzrJg+uxBWBkxNArVO8j59R4Fnn/48ba8cdWwS++L1bPe/OG1EWLOzsWVytq5p5/jheae+zvwa9qNPwwD7Be7Tt103dk2TzeSoY1PJBBB5eWIPHLr0TuLQ9cEfQNPgIsaLsOInByOL3LJXvnj+tkvTnLqas2GTeSFMyCb7E/SjNJ2zcJ35Eb7H33XHKZg5fl2xTC/1z2qAkhTp30cFnv37+7VY+5a4UUpIey3ZtnUxeJ1cXJ6G7UKM2qowXNLxN7Cu/mnGjY1JqaGPvVypWwDHMkF9VOkw5yDv+BwAJ/SW8aK4Swd235ZLjoxEQ2dzjU+BHnMc9MyHYLIOI4vzBtQl6zBx/g9+5JHvzzx+Ja03F4EmgIoXwVuWx2xL3lPH/ksBBZ7999u1peYx28I8eBrnjcv+6EHhHs82z3IsfhG40vP9CcYhAAQWqTN3YmJjC2adXoWnp1ftUN0O1/FEQ3lSfC0i43zEErfbryahBEeJvYOwC6qsv9zTvOElcUKc4vBXiw4NhaJnMIefw5l9Oh79YVKqocHhcBiIuRmz5avB0W+FK7gma+I9Zmb90sTKSwd13jIFFHi2TJeDLxa9PsfVa+bG+XnEnjdJdeV7Tmc6uwDifBYk9B8mJPtYBDo7K+p8geutEEtfBof65tCkhA9+/sPH5SqgJnQijpeN26wRh0qASoBEYj8lEg0t1tp6QvwXe5zKghYtAjeZxxjpKqku3lVcdO0NKzbmfJc50RMR9yM8fx4yGhNINmCDyiCOR0swqLyiaex5x07acO34J8QWwvJ5smYgMD7IuXP5ugP+GGvOA54MA48AxFUJs5rMzbRJncQZQYh7N0w9ytKckbbFx3Juj8PNUbAlHYR96VkkYpdCPC0Bd/ofcF1vcVt7j2v2Z6y+vjSeQ22JqjTLT16DioeVirZHQCvTFeJkNIEuhUXiv+lfXuOlBEKZD91pj1qBvPsnaSY5IjnW4vDjabNTAIrfg+3s8dh3SoPLN2yE6uyCGP4WVnOtB4leKWdGSTCrsKaxFuBO4fWIZcJioescWHuzV+d9mAIKPPvwx+mpqkkxfBRrLt5THcSOoJYzWnOip0N8Pc1mzkngJUf4Aprm82uwWzSx5px/hKQ7sRZ7F4BoF8yjSuCw979I+0VS6iG1ctVMj77T5szAcH9GWijEjwASfhtqieOBzOSH9FgMF8fpBj8ckzwsEgaHbdrQ/TKa3HnF0fhrTLOLTrYrP8jK2izc6cl6x036UHQD+Ms06jgwKKDAc2B85za/ZQOQkjlOK7pMmgD5cqt1iBa1R4Fr+x63+MnQm45EISN0QxtKgARxn0VNIfZHwKGS6P85VAJ7kK4U3Nwex9FKAa7lQJ593OHVjm6ZjqObPtswHZ9tYmLLdGyfGdCYFbaZrgeYrllMx/p+g4XsgKPzRE1zksA2p3BupTBHHwKgHo4GPRwy9nBoKeC9yfk26jQMddKw9Qkshjirr0WlGCvBbyc4bgA+f1tjzjs8KfmTmWc8Xh3PhZMqokE3WqxMi0A3FVwKKPBUTeGAFBCidl4OZ5Cr96cvvPfFKUOw/HCQ5fBDNM0+xmHaccgcXJ5Dx+EAqmTMSgPsWAomqBiZ9RDfZgOxrShg0XLED2BrIl0U96IAMwtHMp3SEa8hOZ5yfIZfZwBFUXfkJ57DxnooglVDrQAnwbzC4c4XAOZdWA5QghQlON+BLL4JR5LLb7jwr8RpNgtSb1sEdUJhnG64WWIVMaApoMBzQH/+Dr28mDyhJ2k5otQ97g9UZSnLXp6U6gsH0gwfS7ci0XRwoYMAdEPAEKZGOU8AKCaAY0zgNo5a7BqA5wMIwlEGhz2QA+7UMQGitdBPVmARQDW37VpwrDXM55QBOCuMsFU5PCVQnuuu4JFlt3BsML9qsGelRHH63xaeU1GKAoIC/x+temoEm1ZZSQAAAABJRU5ErkJggg==){:style="float: right;margin-right: 7px;margin-top: 7px;"} To get an understanding of the impact of these changes, let's first have a look at the current streaming code path. The diagram below illustrates the stream session setup when a node attempts to stream data from a peer. Let's say, we have a 3 node cluster (Nodes A, B, C). Node C is being rebuilt and has to stream all data that it is responsible for from A & B. C setups a streaming session with each of it's peers (See: [CASSANDRA-4560](https://issues.apache.org/jira/browse/CASSANDRA-4650) how Cassandra applies [Ford Fulkerson](https://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm) to optimize streaming peers). It exchanges messages to request ranges and begins streaming data from the selected nodes. - -During the streaming phase, A collects all SSTables that have partitions in the requested ranges. It streams each SSTable by serializing individual partitions. Upon receiving the partition, node C reifies the data in memory and then writes it to disk. This is necessary to accurately transfer partitions from all possible SSTables for the requested ranges. This streaming path generates garbage and could be avoided in scenarios where all partitions within the SSTable need to be transmitted. This is common when you're using LeveledCompactionStrategy or have enabled partitioning SSTables by token range (See: [CASSANDRA-6696](http://issues.apache.org/jira/browse/CASSANDRA-6696)), etc. - -To solve this problem [CASSANDRA-14556](http://issues.apache.org/jira/browse/CASSANDRA-14556) adds a Zero Copy streaming path. This significantly speeds up the transfer of SSTables and reduces garbage and unnecessary object creation. It modifies the streaming path to add additional information into the streaming header and uses ZeroCopy APIs to transfer bytes to and from the network and disk. So now, an SSTable may be transferred using this strategy when Cassandra detects that a complete SSTable needs to be transferred. - -## How do I use this feature? - -It just works. This feature is controlled using `stream_entire_sstables` in `cassandra.yaml` and is enabled by default. Even though this feature is enabled, it will respect the throttling limits as defined by `stream_throughput_outbound_megabits_per_sec`. - -## Impact - -Cassandra can stream SSTables only bounded by the hardware limitations (Network and Disk IO). With this optimization, we hope to make Cassandra more performant and reliable. - -Microbenchmarking this feature shows a marked improvement (higher is better). Block Stream Writers are the ZeroCopy writers and Partial Stream Writers are the existing writers. - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BenchmarkModeCntScoreErrorUnits
ZeroCopyStreamingBenchmark.blockStreamReaderthrpt1020.119± 1.300ops/s
ZeroCopyStreamingBenchmark.blockStreamWriterthrpt101339.672± 352.242ops/s
ZeroCopyStreamingBenchmark.partialStreamReaderthrpt100.590± 0.135ops/s
ZeroCopyStreamingBenchmark.partialStreamWriterthrpt1017.556± 0.323ops/s
- -## Conclusion - -If you're a Cassandra user, we would love to hear back from you. Please send us feedback via user [Mailing List](http://cassandra.apache.org/community/), [Jira](https://issues.apache.org/jira/projects/CASSANDRA/summary), or [IRC](http://cassandra.apache.org/community/) (or any combination of the three). - diff --git a/src/_posts/2018-08-23-testing_apache_cassandra.markdown b/src/_posts/2018-08-23-testing_apache_cassandra.markdown deleted file mode 100644 index 30138333f..000000000 --- a/src/_posts/2018-08-23-testing_apache_cassandra.markdown +++ /dev/null @@ -1,56 +0,0 @@ ---- -layout: post -title: "Testing Apache Cassandra 4.0" -date: 2018-08-20 20:00:00 -0700 -author: the Apache Cassandra Community -categories: blog ---- - -With the goal of ensuring reliability and stability in Apache Cassandra 4.0, the project's committers have voted to freeze new features on September 1 to concentrate on testing and validation before cutting a stable beta. Towards that goal, the community is investing in methodologies that can be performed at scale to exercise edge cases in the largest Cassandra clusters. The result, we hope, is to make Apache Cassandra 4.0 the best-tested and most reliable major release right out of the gate. - -In the interests of communication (and hopefully more participation), here’s a look at some of the approaches being used to test Apache Cassandra 4.0: - ---- - -#### Replay Testing -##### Workload Recording, Log Replay, and Comparison - -Replay testing allows for side-by-side comparison of a workload using two versions of the same database. It is a black-box technique that answers the question, “did anything change that we didn’t expect?” - -Replay testing is simple in concept: record a workload, then re-issue it against two clusters – one running a stable release and the second running a candidate build. Replay testing a stateful distributed system is more challenging. For a subset of workloads, we can achieve determinism in testing by grouping writes by CQL partition and ordering them via client-supplied timestamps. This also allows us to achieve parallelism, as recorded workloads can be distributed by partition across an arbitrarily-large fleet of writers. Though linearizing updates within a partition and comparing differences does not allow for validation of all possible workloads (e.g., CAS queries), this subset is very useful. - -The suite of Full Query Logging (“FQL”) tools in Apache Cassandra enable workload recording. [CASSANDRA-14618](https://issues.apache.org/jira/browse/CASSANDRA-14618) and [CASSANDRA-14619](https://issues.apache.org/jira/browse/CASSANDRA-14619) will add fqltool replay and fqltool compare, enabling log replay and comparison. Standard tools in the Apache ecosystem such as [Apache Spark](https://spark.apache.org) and [Apache Mesos](https://mesos.apache.org) can also make parallelizing replay and comparison across large clusters of machines straightforward. - - ---- - -#### Fuzz Testing and Property-Based Testing -##### Dynamic Test Generation and Fuzzing - -Fuzz testing dynamically generates input to be passed through a function for validation. We can make fuzz testing smarter in stateful systems like Apache Cassandra to assert that persisted data conforms to the database’s contracts: acknowledged writes are not lost, deleted data is not resurrected, and consistency levels are respected. Fuzz testing of storage systems to validate these properties requires maintaining a record of responses received from the system; the development of a model representing valid legal states of data within the database; and a validation pass to assert that responses reflect valid states according to that model. - -Property-based testing combines fuzz testing and assertions to explore a state space using randomly-generated input. These tests provide dynamic input to the system and assert that its fundamental properties are not violated. These properties can range from generic (e.g., “I can write data and read it back”) to specific (“range tombstone bounds synthesized during short-read-protection reads are properly closed”); and from local to distributed (e.g., “replacing every single node in a cluster results in an identical database”). To simplify debugging, property-based testing libraries like [QuickTheories](https://github.com/ncredinburgh/QuickTheories) also provide a “shrinker,” which attempts to generate the simplest possible failing case after detecting input or a sequence of actions that triggers a failure. - -Unlike model checkers, property-based tests don’t exhaust the state space – but explore it until a threshold of examples is reached. This allows for the computation to be distributed across many machines to gain confidence in code and infrastructure that scales with the amount of computation applied to test it. - ---- - -#### Distributed Tests and Fault-Injection Testing -##### Validating Behavior Under Fault Scenarios - -All of the above techniques can be combined with fault injection testing to validate that the system maintains availability where expected in fault scenarios, that fundamental properties hold, and that reads and writes conform to the system’s contracts. By asserting series of invariants under fault scenarios using different techniques, we gain the ability to exercise edge cases in the system that may reveal unexpected failures in extreme scenarios. Injected faults can take many forms – network partitions, process pauses, disk failures, and more. - ---- - -#### Upgrade Testing -##### Ensuring a Safe Upgrade Path - -Finally, it's not enough to test one version of the database. Upgrade testing allows us to validate the upgrade path between major versions, ensuring that a rolling upgrade can be completed successfully, and that contents of the resulting upgraded database is identical to the original. To perform upgrade tests, we begin by snapshotting a cluster and cloning it twice, resulting in two identical clusters. One of the clusters is then upgraded. Finally, we perform a row-by-row scan and comparison of all data in each partition to assert that all rows read are identical, logging any deltas for investigation. Like fault injection tests, upgrade tests can also be thought of as an operational scenario all other types of tests can be parameterized against. - ---- - -#### Wrapping Up - -The Apache Cassandra developer community is working hard to deliver Cassandra 4.0 as the most stable major release to date, bringing a variety of methodologies to bear on the problem. We invite you to join us in the effort, deploying these techniques within your infrastructure and testing the release on your workloads. Learn more about how to get involved [here](http://cassandra.apache.org/community/). - -The more that join, the better the release we’ll ship together. diff --git a/src/_posts/2018-10-17-finding_bugs_with_property_based_testing.markdown b/src/_posts/2018-10-17-finding_bugs_with_property_based_testing.markdown deleted file mode 100644 index 2cf634670..000000000 --- a/src/_posts/2018-10-17-finding_bugs_with_property_based_testing.markdown +++ /dev/null @@ -1,116 +0,0 @@ ---- -layout: post -title: "Finding Bugs in Cassandra's Internals with Property-based Testing" -date: 2018-10-17 00:00:00 -0700 -author: the Apache Cassandra Community -categories: blog ---- - -As of September 1st, the Apache Cassandra community has shifted the focus of Cassandra 4.0 development from new feature work to testing, validation, and hardening, with the goal of releasing a stable 4.0 that every Cassandra user, from small deployments to large corporations, can deploy with confidence. There are several projects and methodologies that the community is undertaking to this end. One of these is the adoption of property-based testing, which was [previously introduced here](http://cassandra.apache.org/blog/2018/08/21/testing_apache_cassandra.html). This post will take a look at a specific use of this approach and how it found a bug in a new feature meant to ensure data integrity between the client and Cassandra. - -#### Detecting Corruption is a Property - -In this post, we demonstrate property-based testing in Cassandra through the integration of the [QuickTheories](https://github.com/ncredinburgh/QuickTheories) library introduced as part of the work done for [CASSANDRA-13304](https://issues.apache.org/jira/browse/CASSANDRA-13304). - -This ticket modifies the framing of Cassandra's native client protocol to include checksums in addition to the existing, optional compression. Clients can opt-in to this new feature to retain data integrity across the many hops between themselves and Cassandra. This is meant to address cases where hardware and protocol level checksums fail (due to underlying hardware issues) — a case that has been seen in production. A description of the protocol changes can be found in the ticket but for the purposes of this discussion the salient part is that two checksums are added: one that covers the length(s) of the data (if compressed there are two lengths), and one for the data itself. Before merging this feature, property-based testing using QuickTheories was used to uncover a bug in the calculation of the checksum over the lengths. This bug could have led to silent corruption at worst or unexpected errors during deserialization at best. - -The test used to find this bug is shown below. This example tests the property that when a frame is corrupted, that corruption should be caught by checksum comparison. The test is wrapped inside of a standard JUnit test case but, once called by JUnit, execution is handed over to QuickTheories to generate and execute hundreds of examples. These examples are dictated by the types of input that should be generated (the arguments to `forAll`). The execution of each individual example is done by `checkAssert` and its argument, the `roundTripWithCorruption` function. - -
-@Test
-public void corruptionCausesFailure()
-{
-    qt().withExamples(500)
-        .forAll(inputWithCorruptablePosition(),
-                integers().between(0, Byte.MAX_VALUE).map(Integer::byteValue),
-                compressors(),
-                checksumTypes())
-        .checkAssert(this::roundTripWithCorruption);
-}
-
- - - -The `roundTripWithCorruption` function is a generalization of a unit test that worked similarly but for a single case. It is given an input to transform and a position in the transformed output to insert corruption, as well as what byte to write to the corrupted position. The additional arguments (the compressor and checksum type) are used to ensure coverage of Cassandra's various compression and checksumming implementations. - -
-private void roundTripWithCorruption(Pair inputAndCorruptablePosition,
-                                     byte corruptionValue,
-                                     Compressor compressor,
-                                     ChecksumType checksum) {
-    String input = inputAndCorruptablePosition.left;
-    ByteBuf expectedBuf = Unpooled.wrappedBuffer(input.getBytes());
-    int byteToCorrupt = inputAndCorruptablePosition.right;
-    ChecksummingTransformer transformer = new ChecksummingTransformer(checksum, DEFAULT_BLOCK_SIZE, compressor);
-    ByteBuf outbound = transformer.transformOutbound(expectedBuf);
-
-    // make sure we're actually expecting to produce some corruption
-    if (outbound.getByte(byteToCorrupt) == corruptionValue)
-        return;
-
-    if (byteToCorrupt >= outbound.writerIndex())
-        return;
- 
-    try {
-        int oldIndex = outbound.writerIndex();
-        outbound.writerIndex(byteToCorrupt);
-        outbound.writeByte(corruptionValue);
-        outbound.writerIndex(oldIndex);
-        ByteBuf inbound = transformer.transformInbound(outbound, FLAGS);
-
-        // verify that the content was actually corrupted
-        expectedBuf.readerIndex(0);
-        Assert.assertEquals(expectedBuf, inbound);
-    } catch(ProtocolException e) {
-       return;
-    }
-}
-
- -The remaining piece is how those arguments are generated — the arguments to `forAll` mentioned above. Each argument is a function that returns an input generator. For each example, an input is pulled from each generator and passed to `roundTripWithCorruption`. The `compressors()` and `checksums()` generators aren't copied here. They can be found in the [source](https://github.com/apache/cassandra/blob/65fb17a88bd096b1e952ccca31ad709759644a1b/test/unit/org/apache/cassandra/transport/frame/checksum/ChecksummingTransformerTest.java#L209-L217) and are based on built-in generator methods, provided by QuickTheories, that select a value from a list of values. The second argument, `integers().between(0, Byte.MAX_VALUE).map(Integer::byteValue)`, generates non-negative numbers that fit into a single byte. These numbers will be passed as the `corruptionValue` argument. - -The `inputWithCorruptiblePosition` generator, copied below, generates strings to use as input to the transformation function and a position within the output byte stream to corrupt. Because compression prevents knowledge of the output size of the frame, the generator tries to choose a somewhat reasonable position to corrupt by limiting the choice to the size of the generated string (it's uncommon for compression to generate a larger string and the implementation discards the compressed value if it does). It also avoids corrupting the first two bytes of the stream which are not covered by a checksum and therefore can be corrupted without being caught. The function above ensures that corruption is actually introduced and that corrupting a position larger than the size of the output does not occur. - -
-private Gen> inputWithCorruptablePosition()
-{
-    return inputs().flatMap(s -> integers().between(2, s.length() + 2)
-                   .map(i -> Pair.create(s, i)));
-}
-
- -With all those pieces in place, if the test were run before the bug were fixed, it would fail with the following output. - -
-java.lang.AssertionError: Property falsified after 2 example(s) 
-Smallest found falsifying value(s) :-
-{(c,3), 0, null, Adler32}
-
-Cause was :-
-java.lang.IndexOutOfBoundsException: readerIndex(10) + length(16711681) exceeds writerIndex(15): UnpooledHeapByteBuf(ridx: 10, widx: 15, cap: 54/54)
-    at io.netty.buffer.AbstractByteBuf.checkReadableBytes0(AbstractByteBuf.java:1401)
-    at io.netty.buffer.AbstractByteBuf.checkReadableBytes(AbstractByteBuf.java:1388)
-    at io.netty.buffer.AbstractByteBuf.readBytes(AbstractByteBuf.java:870)
-    at org.apache.cassandra.transport.frame.checksum.ChecksummingTransformer.transformInbound(ChecksummingTransformer.java:289)
-    at org.apache.cassandra.transport.frame.checksum.ChecksummingTransformerTest.roundTripWithCorruption(ChecksummingTransformerTest.java:106)
-    ...
-Other found falsifying value(s) :- 
-{(c,3), 0, null, CRC32}
-{(c,3), 1, null, CRC32}
-{(c,3), 9, null, CRC32}
-{(c,3), 11, null, CRC32}
-{(c,3), 36, null, CRC32}
-{(c,3), 50, null, CRC32}
-{(c,3), 74, null, CRC32}
-{(c,3), 99, null, CRC32}
-
-Seed was 179207634899674
-
- -The output shows more than a single failing example. This is because QuickTheories, like most property-based testing libraries, comes with a shrinker, which performs the task of taking a failure and minimizing its inputs. This aids in debugging because there are multiple failing examples to look at often removing noise in the process. Additionally, a seed value is provided so the same series of tests and failures can be generated again — another useful feature when debugging. In this case, the library generated an example that contains a single byte of input, which will corrupt the fourth byte in the output stream by setting it to zero, using no compression, and using Adler32 for checksumming. It can be seen from the other failing examples that using CRC32 also fails. This is due to improper calculation of the checksum, regardless of the algorithm. In particular, the checksum was only calculated over the least significant byte of each length rather than all eight bytes. By corrupting the fourth byte of the output stream (the first length's second-most significant byte not covered by the calculation), an invalid length is read and later used. - -#### Where to Find More - -Property-based testing is a broad topic, much of which is not covered by this post. In addition to Cassandra, it has been used successfully in several places including [car](https://ieeexplore.ieee.org/document/7107466/) [operating -systems](https://arxiv.org/pdf/1703.06574.pdf) and [suppliers' products](https://youtu.be/hXnS_Xjwk2Y?t=1023), [GNOME Glib](https://dl.acm.org/citation.cfm?id=2034662), [distributed consensus](https://github.com/WesleyAC/raft/tree/master/src), and other [distributed](https://www.youtube.com/watch?v=x9mW54GJpG0) [databases](https://youtu.be/hXnS_Xjwk2Y?t=1382). It can also be combined with other approaches such as fault-injection and memory leak detection. Stateful models can also be built to generate a series of commands instead of running each example on one generated set of inputs. Our goal is to evangelize this approach within the Cassandra developer community and encourage more testing of this kind as part of our work to deliver the most stable major release of Cassandra yet. - diff --git a/src/_posts/2018-10-29-audit_logging_cassandra.markdown b/src/_posts/2018-10-29-audit_logging_cassandra.markdown deleted file mode 100644 index ada231601..000000000 --- a/src/_posts/2018-10-29-audit_logging_cassandra.markdown +++ /dev/null @@ -1,211 +0,0 @@ ---- -layout: post -title: "Audit Logging in Apache Cassandra 4.0" -date: 2018-10-29 00:00:00 -0700 -author: the Apache Cassandra Community -categories: blog ---- - -Database audit logging is an industry standard tool for enterprises to -capture critical data change events including what data changed and who -triggered the event. These captured records can then be reviewed later -to ensure compliance with regulatory, security and operational policies. - -Prior to Apache Cassandra 4.0, the open source community did not have a -good way of tracking such critical database activity. With this goal in -mind, Netflix implemented -[CASSANDRA-12151](https://issues.apache.org/jira/browse/CASSANDRA-12151) -so that users of Cassandra would have a simple yet powerful audit -logging tool built into their database out of the box. - -## Why are Audit Logs Important? - -Audit logging database activity is one of the key components for making -a database truly ready for the enterprise. Audit logging is generally -useful but enterprises frequently use it for: - -1. Regulatory compliance with laws such as [SOX](https://en.wikipedia.org/wiki/Sarbanes%E2%80%93Oxley_Act), [PCI](https://en.wikipedia.org/wiki/Payment_Card_Industry_Data_Security_Standard) and [GDPR](https://en.wikipedia.org/wiki/General_Data_Protection_Regulation) et al. These types of compliance are crucial for companies that are traded on public stock exchanges, hold payment information such as credit cards, or retain private user information. -2. Security compliance. Companies often have strict rules for what data can be accessed by which employees, both to protect the privacy of users but also to limit the probability of a data breach. -3. Debugging complex data corruption bugs such as those found in massively distributed microservice architectures like Netflix's. - -## Why is Audit Logging Difficult? - -Implementing a simple logger in the request (inbound/outbound) path -sounds easy, but the devil is in the details. In particular, the "fast -path" of a database, where audit logging must operate, strives to do as -little as humanly possible so that users get the fastest and most -scalable database system possible. While implementing Cassandra audit -logging, we had to ensure that the audit log infrastructure does not -take up excessive CPU or IO resources from the actual database execution -itself. However, one cannot simply optimize only for performance because -that may compromise the guarantees of the audit logging. - -For example, if producing an audit record would block a thread, it -should be dropped to maintain maximum performance. However, most -compliance requirements prohibit dropping records. Therefore, the key to -implementing audit logging correctly lies in allowing users to achieve -both performance *and* reliability, or absent being able to achieve both -allow users to make an explicit trade-off through configuration. - ---- - -## Audit Logging Design Goals - -The design goal of the Audit log are broadly categorized into 3 -different areas: - -**Performance**: Considering the Audit Log injection points are -live in the request path, performance is an important goal in every -design decision. - -**Accuracy** : Accuracy is required by compliance and is thus a -critical goal. Audit Logging must be able to answer crucial auditor -questions like "Is every write request to the database being audited?". -As such, accuracy cannot be compromised. - -**Usability & Extensibility**: The diverse Cassandra ecosystem -demands that any frequently used feature must be easily usable and -pluggable (e.g., Compaction, Compression, SeedProvider etc\...), so the -Audit Log interface was designed with this context in mind from the -start. - -## Implementation - -With these three design goals in mind, the -[OpenHFT](https://github.com/OpenHFT) libraries were an -obvious choice due to their reliability and high performance. Earlier in -[CASSANDRA-13983](https://issues.apache.org/jira/browse/CASSANDRA-13983) -the [chronical queue -library](https://github.com/OpenHFT/Chronicle-Queue) of -OpenHFT was introduced as a BinLog utility to the Apache Cassandra code -base. The performance of Full Query Logging (FQL) was excellent, but it only instrumented mutation and read query paths. It was missing a lot of critical data such as when queries failed, where they came from, and which user issued the query. The FQL was also single purpose: preferring to drop messages rather than delay the process (which makes sense for FQL but not for Audit Logging). Lastly, the FQL didn’t allow for pluggability, which would make it harder to adopt in the codebase for this feature. - -As shown in the architecture figure below, we were able to unify the FQL feature with the AuditLog functionality through the AuditLogManager and IAuditLogger abstractions. Using this architecture, we can support any output format: logs, files, databases, etc. By default, the BinAuditLogger implementation comes out of the box to maintain performance. Users can choose the custom audit logger implementation by dropping the jar file on Cassandra classpath and customizing with configuration options in -[cassandra.yaml](https://github.com/apache/cassandra/blob/trunk/conf/cassandra.yaml#L1216-L1234) -file. - ---- - -## Architecture - -![Fig 1. AuditLog Architecture Figure.](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAALQCAYAAABfdxm0AACAAElEQVR42uydB7QUVda2/UcdFUUQRBRUTJhQBBOK4gCKERRUDAOKIoiKggrKCAJKDpJzzhlFUHLOIChGMOecxjBf/r51/n5P31O3qrqqu7q7cr3PWntxb3d15b7003uffQ46iBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQkLFgVQIBoPBYDAYpXH00Uf/yI8IhBBCSPwQr7zyioxXX301ayxfvtwyVqxYYRkrV67MGatWrbKM1atX28aaNWtyxtq1a21j3bp1trF+/XrHsWHDhqyxcePGrLFp06a8YvPmzTljy5YtBcXWrVsdx7Zt2/KO7du3exI7duzwNHbu3JmY8PpcenUPFHI/5nO/F/qecvJ+zfdvQK6/Kbn+Jjn925btbyQi299XJ3+fs/19t/s/wcn/J3b/F9n935Xr/7wSESaEEEJIHAXYb/m1+5ATBfmNi/hGTXopufGT46BlOJ/3QKEyHFcRToIEU4AJIYSQmAqwm+LrlfxSfKMhvmEVXkpsNMQ4KjIcBxEOezY42/8TfkkwBZgQQghJmACz5NmZ/EZBfKMgvVGX3V27doUuoi7GYZfhMIgws8HuSLCVCFOACSGEkAQJcFTkN05Z3yCzvUFIbxRkN4xSG1VRjqIMe5kVjqIIx0WCnWaDKcCEEEJIQgQ4DON9k5T1jYr4hkF6KbnxEeQwCHEYssJhEuEwZ4ODGBdMASaEEEISIMBRl984ia+Xpc5RkF6KbrLEOEgZTpoIRzUb7Pe4YAowIYQQEnMB9kt+w1ryXEy5cxTE189sb5DSSzGNhxTHRYaDFOGwZoOjMi6YAkwIIYTEWICDlt+gS579yPq6Lb5hy/YGIbxhl8Hdu3f7EkmQ4jDLcBAiHKayaK+ywUFLMAWYEEIIiakAR1l+45L1jar4+i29SRHaJEhzlGQ4CSLsZTY4bOOCnUowBZgQQghJkAD71ek56VnfoMTXT+mNkvDGSXCjJshRkOE4iHAYs8FhlWAKMCGEEJIAAY76eF+vG12FVXy9zvb6Jb2UXMqx30LspQxHVYSjVhLtlQRTgAkhhJCYC3AU5DeorK9b5c5uN7cKk/iGUXrDIIuvvfaa5xF3KfZDhqMkwlHPBvspwYWKMAWYEEIIibkAJ328b9Syvl6Kr9fSG2XZ9UNm/YooS3EYs8JxFeEwlkT71RyLAkwIIYTEVIDDIr9elTwnTXzDmO2NiuzGSXDDKshRkuEkiXCh2eA4jwumABNCCCEJEmDKr7/lzkGJb1Skl5IbXzGOqwz7LcJRzgaHdVwwBZgQQghJiACHRX69KnkOU9bXjQZXXopvUNIbJ9nds2eP5xEnMQ5ChinClGArCaYAE0IIIQkQ4DjIbxSyvnES37BIb1xk1q+IghSHWYaDEGG3yqKDzgb7OS64GAmmABNCCCExF+Cwy2/QJc9hKXf2Snz9lt4wCm+cBDeMghy0ECdNhOOWDfZbginAhBBCSIwFuBD59bPTc5hLnsMqvmHM9oZFdim44RDjOMlwVEW42GxwnCWYAkwIIYTEVID9lF8vml2FPevrd4Mrt8U3ytJLYY2WFIddhr3IChcrwn6WRfvdJdrtDtH5SjAFmBBCCEmYAMd1vG9cs75hEt8ghDcOIrl3714ZlOLChTiuIhyWbLDf44L9ao5FASaEEEISLsBRHu8bh6xvUON7oyS9YRLWKETUpTiMMkwRjldzLAowIYQQklABpvwWLr9ui2+1atXUhzAGg8GwjVOrVY5ENjjs44IpwIQQQkjCBDjs8utVybPXWd9CM764Pr9vuZDBYDCyBv5WuJ0NTroEU4AJIYSQmAtwVOU3bllfCjCDwXBDgONaEu2XBFOACSGEkBgLMOU3PFlfCjCDwXBLgPMR4TCWRAcpwRRgQgghJKYC7If8+jneN+xZ33wa4lCAGQyGUwF2a8qkYkQ4ThJMASaEEEISJMCFyG8Yml0FLb9udXdW3WMpwAwGIx8BdmvuYL9LosMowRRgQgghJCECHAb59bvZVViyvubpUyjADAajEAH2OhschATnK8LFSjAFmBBCCEmAAIdZfosZ7xtU1rfY+XwpwAwGo1ABDkM2OOjmWMVIMAWYEEIIibkAR1F+w1zyXIz4hlWAN008W4zrWk0Gfv5lY+3EicaoLtXE8+2qerqNb1fXEu8sPC8jflhXi7IXYMztd7pocUPFyAlwWCXYr5LoQiWYAkwIIYTEWICTJr9hF1/Ezp07QyPAL71whjit6mHqA6EWeAzPJUmCIEBX1i7r6TbwBYP5XKs48+TDpYDH/cuHB2+tJCOo7dc+u4yY1P0Uw2PP3H9CaKsysF/4u5KrD4FXIhz1ccEUYEIIISRBAhxH+Y1y1hfiqyIMH7anPXeqOOTg/yfqX1xWrBp9phQvBH6GCOK5JEmwnwKMc798RHUtZvU+Tdx5bQX53G1XHxPr84xzHGS2FecY1yFqAqyCEly8BFOACSGEkAQJcFLlNwxZ3zAJ8L55NcThf/2LaFyvvGXGEY9dUuNIUaXSoYkph/ZTgFH2bPV8n/ZV5fPrxp1FAaYAWwpwMdngsI0LDkqCKcCEEEJIQgQ4jvIbdNY3X/ENiwCjBBUC/OkrNW2XQWYSsoIxweqxPbPPlfKCcl0EspbbppyT8dqFA08X19ctJ04+/q+y5LR7myoZ41yRab6lfnlZbn1+9SNEh7sqiy9XXGBYBvuHx/E81oV1mrPSap8+fPl8MaDDiXJZrLNV42PlY+Z9U+s899Qj5DG0vuVY+ZhZgLF/StRQMouf1fnCseCYcGzYL2TRzVJViADjcStBgxCrc4X9xj7bHRuewzKIR5ofJx9DaTXGOOvHvOIcWL0ex4lzqn9854xz5LVW1x3LWB0D9hvnEOfk8ppHiReeOEn7AkWNs61c4VB5HPg525hrPIfXYDvYtrpPcN4xltqqnF/dc+p+0t/f+B3bxPnFvuFnrF8vwFge5wzHiPOHx4P+AshKgL3OBsddginAhBBCSAIE2C/5jcp4Xz/LncMowJCEfEttIUFHHvEXKQ8QTQR+xmN4zix5kBZIF4QFmWSIkZKJZcOqyxJrCMvwzifLzKeSKyU3kBHspxobi+WwPNaNkmG9qCupgZBivyAuFcsdIiVGL95YJ9ZX7qiDxRMtjpeChiw4HsO69QKsjgNfFmB5CChej2PAclj/U/ceL48RMoXjwXaLEWCcRzyvH6OKY8W6r6lztNxfnAvsL0RSL3gHXjxfPqb2C8viNVgW50WfdcV+4tzaCTjOqXoM1wrXGBUB6rpjfdgOKgnUcjif2E8IOI4T+4DXqe3mK8AqU4zlcS/h+uNaYJ3Yvl6C8Zy65/Azjg/3nP76OxFgnCtcZ6wDx4HjwRcpYRXgsJZEh12CKcCEEEJIzAU4ifIbxqyvil27dgUuwNh+LlkzB6QM2TF9Rgw/K5lRj0E6zCWuECV9ZtEsmwhkf7EMMq9KvPC7OSuM10KAzAKMLKx+35C5Nssk1geZNWc4IWJY1kqAcTx60YQkYj166Uco4cuWMcwmwHgdZBzSBZlV5wT7Cxkzd5OGwOJ6qMfwWkifOTOM/cI2CxFgdX1xzs3XHecFsqgeQ0UBvuwwZ/mxXb2sOi2BxnLm66e+JMC29Pcv5Faf4Vb3nPnLklwl0Ob3hCpJV9cjjAJcjAgnVYIpwIQQQkiMBTiq8hvW8b7Fiq+KMAgwMnlejJ1FFi1XdhmyiixcIeWlkBSIplmArRp26SUR27KSNPUclrUSYKeNwNR+2GV39evEPuE4VOB3bB/yqxc5tbxVqTpeBzlVQozXWl1TZECRrS1EgNUxmWVfZV1xPtXv2L7VuS10DDCWQ9bZ6jl8IQDZz7UOLGOW2mwCbC7Th/jicZUpDuq9ir8vXklwMSXRbjfH8kuCKcCEEEJITAU4bvIbZMlzMVlfvfiGSYCRGSxk2iQlvCpQoqoXR5VNRYYQJanYDgRCL7vognxQyXRLkGWUzyLTZxYQjC+G7Oi3B3HTnz8lafqyXSvZUnKHzLLVsWFfrQTYnIFW60LWESKv9gvjTp0KMMQMx4HAz3gMWW3zeGqcF1VGbA58gaDOA8YI42er8dgqO1yIAEOoVfbYHCiv1h8vSoXVceB5yLDVuc5HgK3GKVtdG9xbqpxdf6+YM8WFNMGyWj4IAXYqwdlEOA7jgt2YJ5gCTAghhCRMgCm/wWR9wyTAkFPIWz6vUTIEYUX2D1KAgIiZy5khyhivCUlSwgpB1JfCQtqQ+UR5LbLGWAbLqnJTrEONfcW21fYgOcUIsNVyVplsJVnm5VA+DSnF8aBEVu0XZNWpAJuXwTlAqbO51Bbnx06AVejPgbm0WwWuWSECrL7MyLZ9fXYaX2yoLyzwxQhea+40no8A231Jo86jKvfGvYzzh/OlrgcCme84CXA+IhznkuhiJZgCTAghhCRIgOMov4WUPHvd6MpOfMMiwMjOQS71TYzMgewdJElNyQOZgOTlypzalQdje8jSZWuyBYlRWT9IDeTXqvS3EAGGfGM5yLvV9pG5dCLA2D+cF3P5NoS9UAGG+OLYIcLmLx2QxTRnxs0BETzIYrysCvM4bZxDCKp5OTVuWp1LjJ/Vi2ah0z7pM8H5CLD5fFiVwZv3WR9xFWBKcHESTAEmhBBCEiLAlF/vs765xDcsAozSUZTemrvp6p9HlhhZTv0YT7PA4rX6EmhIIWTCah5bLKdkBJlTq7G1GPOp5EhN92M1P3EhAqykGus1H7OSVycCjPVZjU3FvhYqwPrn9LKlGjlZSTukUn8OcS2xX2YxVxKrPw+qBN08tleJoDqXyO5CwK2++MA1Vg2msBxea85Aqy8d9GKOc+ykAzmWwz1nLuvGvak6Q2fLfqMB10EWja2szmcUBdgNEQ5iXHAYJJgCTAghhCRYgOMov0GVPDuV3927dwcuwEoQkEVDNhOZRogEAj/jMTynlw/II6QZ0gWBw7+QLpQv68URcgNBgTRgOYgJsqY4ZiXGamwrRASSh1AddyFn+vJf/I71YH/VPK+FCjCyhRA6iCL2H/uG40X21WkGGDKHx1EejP2GRKLcG2JdjADblULjPEAEsT1sC4Gf8Zhe5FSWHdcJP2M5nFNcC5wz/XmAmOJa4tphzmYsi6oAVbasP5dqW7iGuB9wzvBFCK6NvkQZ5xSZZnV/YJ3oEo3l9BlkXHtcA+y73Zhlde2wf9hPNAbDdjGWHF/K4Bypc4hjwe+oFsD1xTXB8jhmPG4WYHXPQt7VOsIuwPj74pUEF5MNjqoEU4AJIYSQBAhwGOXXi07PYRdfFWEQYDXmE5ICISn5UCh/xrhNc3k0JEZNTYNAeamaF1af0YOQIBuqXyfESp8FRJZSTRukloGsQNj068F6IV94HstCwiDE+vGrkGp9qbZ57Ku5kRKWU2OODyppxAVpw3L644AgWY2TVcKkjg/7h3MImdOPYbYKtU67ZfA49k0vqzhXkFCcb7XPkEKrrDCOQ39s+BnHa1V2DFlU0o6AwJrL3vUdn1WzLnXtsU/6bDPuD5wHdb3U9s2ZfnVdcb2zlc6rfYag67eN/TSLM/Yb11Etg+PCMag5q81juPG4ft5m9aWP3ZzZ5qmUghBgFZTg4iWYAkwIIYTEXICjJL9eNLsKm/yGSYD1U+VAhhG5xpui3DVbltNqnXbLQKDUMnZTIkGYsj1faEDYipnfVR2fVQm5l19YONlnHJs+65pt3K152WyBbee69up6FTt3rnmfsU6r6aDM+1foeOWwhlmAvcwGJ0WCKcCEEEJIjAU4TvLr93hfL8Q3rALMiHc4bTzFfY6GAIdRggsV4SAkmAJMCCGExFSAg5DfKI339TLraye+FGAGZZIC7IYAe1kS7XdzLK8lmAJMCCGEJFSAkyK/QZc8ZxPf1157TQYFmOFnoJEWxuxyn6MpwPi7UogEOxHhsIwL9lOCKcCEEEJIAgSY8ut9yXOurK+SXwowg8HIV4BVeCHBYRkX7JcEU4AJIYSQhApw0uQ36KwvBZjBYBQrwIVmgynBpRJMASaEEEJiLsCU38Lkt9hGV3bySwFmMBjFCHAQJdFhleBs/5fZSTAFmBBCCImxAFN+t4cm60sBZjAYbgmwlyXRcZdgCjAhhBASUwGOmvzGZbxvNvHds2ePjDgLMObrvbzmUYYGRvnMz/rligs8mfc36qHm1zVHrnmb4xy4R+pfXFY8c/8JiRVgr0qi4yzBFGBCCCEkIQKc6wNCkPP8hkF+/RDfJAjwC0+cJA7/618Mwnvy8X8V5Y46WIpwLqE58+TD5fmB3FF8S2Nc12rqg3tG4JzhC4e4f2nw4K2VZJjPi/l+i5sA4+8LJbg4CaYAE0IIIRTg2MqvV+N93ZLfuAvwaVUPE61vOdbwGAQYx3xL/fJZXwuJU1JHAbYW4GnPnSqWj6iuxazep4k7r60gn7vt6mMSN58xpL9yhUNFh7sqx1qAnUhwISXRbjfHioIEU4AJIYSQBAhw0uXXq5LnfMU37gK8avSZ8tjwr1mAL6lxpHxu4cDTLV8L4UUmr/bZZSjAWQTY7ryoLw/WjTsrUQKs5gyuUunQ2AuwVxLsdzY4aAmmABNCCCExF2DKb+Hy62bWNwkCjLGYRx7xl4xSXAgwnkMGGD9bjVu9ps7R4vzqR4hRXaxFD+XTyCyfe+oRMsuMdVnJ3ksvnCGur1tObgfr696mihw/q18Gr8PrsR6sD9nDT1+paVgGv+NxrAPrwjqxbjs5hZxhORzHsmHVxZ7Z50pZM68XXw7ot42SXvMyhQjwvnk15PNYrpDt6c8vAlKJ8dgQa1wTtdzcfqdbZlvtjnfnjHNkhhpl2ghsw+oY9OcQY8hRSq/uI2wT60amF8eBn/VjzHFdcOzbppwTewF2KsJhL4kOUoIpwIQQQkiMBZjy62/Jcy7xjbsAQxLRlMj8uBJgSBYE+YkWxxueRxnvIQf/PymmVqIHuYP8qLGukCOIJl6DMmCzJEK4IG1YFplBCJWSKcgpXod9Hd75ZDGgw4manEH4VCMu9RjWgeWw/EElJcj6fYcMHlRSfozlII4Y7wzRMx8HXottY99xDFg3toHzk0uCcwkwRBPPT+p+SsZ5NW8P50S/PawT5xfx1L3Ha+cXyyIjr8+64jpif83bx3Uw7x/ONbL6OP84zwjINbaDa6qWw/2A/YSc4zjxO+6TVo2PdSTAuF5W8h9nAQ6qJDqMEpzvHMEUYEIIISSmAkz5DU/Jsz727t0bWwG2Gv+rF2D8DAmC7EDYlLxAyNTrrEQPUo1MrDmTi8wmJE39DrmC/JozoxAmZCiVpEPIzJ2nsYwq3YbM4XclxCoa1ysvhdAsfXoZU9lQSLD+OLAuJcbmbTsZw5pNgCH32Dd9Myi77eEcmreH1+IamBtJKbkvRICxT1gn1q2vCMDPuE768eDYb2TqzevDdvXX3K4EGoFjMn+xEhcBxt8XSrB7EkwBJoQQQhImwG5NdxRH+fWq5FmJr4q4CjCydlZT0ugFGAIEicSYYDV+E/KiZNMsepAyq8yrXrpUNhEynKvRFjKb2HYhHZNVh2v1OzKUkH67cnD9ceC4IP5WmV5kXe3WYxZgnC+sWwV+x/nFuvWlyk63h/OO5ZC9Ni+HUvWK5Q4pSICR/cXv6ouObOcR2zcLcD5jgNWXH3bPxUGAKcHuSTAFmBBCCEmQAFN+gyl51stvnAUYx5VLgBGbJp4tpQfZSfyrL9s1C7ASK4gtBEcfeAzPqTLoPu2ratMCIRMM0UP5rH7MMcqCsQz2CWXLyBpi++bsMsaToiQX0qVCTdGkFzJzxllf/qs/DmRT8QWB+RgQyEjnuifUeUFWFfuOwM94DEJvHv/qdHsoOz8oy/hZZMwLEWB1Lay2jy8h9MviMfyOL0bwpQJkGOcvHwHO9lxcBDibBBczLjjuEkwBJoQQQijAkZbfKJU8m8WXGWBj516cB/OY4XwEWIUqb1YNkSCukCxkOfFaZAf15cyQPmwfcofnlFiqeYrVOGGsAyXb2CcE5MwswHbSZRZgyL6dkKoopAQa+4Asrbl8GefAyfbMWXRzqPOerwBDYu0EWIXKTiMbjww/zhGOBxUBB1lMm5XtfKNEPgkCXGw2OGoSnO3/okIlmAJMCCGEJESAKb/hkN+4jwGGeDkRYGRcsbxeXq1ETzV3ssoIOgnILmQWZbd2y0D+MF5WjYuF+Fo184IM668dsr8QaKt1KgHUZ0QhpFYdsIuZBgm/Y72Qef3j2B7KjHNtT5WYIzNud03NAgxBzSXAKtNuFvN8j1d/3ZM8BtgPCbYT4bhJMAWYEEIISYAAU379K3nOJr6vv/66jIMS2gXa/LjV+FQr0YOEoZmSVedjLK/G80I6zXMQKzlS24cIW81FjDJiJVcoETaLFrZhLlVGefVBps7LSiqRldUfB0Qfv1uNtYXkmadYMotjtiZYOKaDTF2Q89meGpNtHheNrOxBpiZYONaDLMb2otxcv3+4thBwPG71pYQSbhwnro35ixDV2Vl/biHAKFs3r08tqx8DHScBxt8VtyXY7WxwkBKc7/RIFGBCCCEk5gLshvzaCXDc5NfLrK+S3zgLMEQGmVSnAuw00wlhPahkeiPIEzK2ECOIrb5MFs9DPCFCeD2kSnUyVnMGQ8iQMUU2F+tBqPGqqtGWmoYH28B68FrVKdl87SBkyDBjO9hPrBfHi30xHwey41gWXaMhkAhIu7mBFbZvzsrmmgYJcohzr8q4c21PL8bI3uIxfHmBn7EcXqOfdsjctRrjoXG8ap2qbFm/f2pbOB6MMcb1gKybS+Uh4NgOpByvxzpxvrGc/osAnGNINfZdP2Y57vMAq78v+Uqw3yXR+Uhwtv87vJZgCjAhhBASYwH2crojym/+4ht3AUb2FcdmzsKqeWCdrAPSB4HUi5ySYNWECgE5guDpm1eh3BdjeyFJajmImT4ziiwnxExNU4TAz/qpjLBOJbZqW5AvCLJ5/CvWB5nTjzeGOFvNi6u2rbLDB5WMPTZnaVU2VS/AdudFXwptnobK6faUROrPL44D4m9VdowmZvplcX1xvFb7B+FVXxwgsC/maaMgufgiQ53vg0oamZnL3tV1wfXAfunHk1uVZcdNgLNJcFjGBYdNginAhBBCSIIE2MuOz0mRXzezvkkQYJXttRoH7FZAliB72aYxgghjGbtsqZJDtYzduiBceL6QcbsqY2u3bqzXTmZVptXN85ZreyqwjD7rmm3cLZZzOsYX6812PfTn28l+6q8jBDvXPMpxEWBKcPESTAEmhBBCEiTAlN/g5feNN96ItQAj04sMndsCF8ZA+TTkUE3DZB4Pjcxo1I8x7NMLqfmO7bpYx1GACy2JpgRTgAkhhJBECbDX8msnwHGU30LEV8lv3AUY2dJ8Sp6jHijdhfBjnCtkDGN5Ib+QMisxpgC7Gxi3bNVoKwkCnBQJdnuOYAowIYQQkgABDqrjc9Dy60Wn52LlN+4CnLRACS5kH/PQYuwxxhNjGiXVdCvqgfG15jG7jPAIMCU4fwmmABNCCCExF+C4ym+Yml05FV8KMIPByFeA8TeGEuxeZ2gKMCGEEBJjAQ6q43PU5LeY8b75yi8FmMFg5CvAxUhwoSIcVwmmABNCCCEJE2DKr/clz3biSwFmMBiFCrATCXY7G+y1BNs1TvRSginAhBBCSEwFmPIbTvlFVKxYUZvvlMFgMOzi2IrlLf/+UIILl2AKMCGEEJIQAQ5quqO4yW8hJc/62Ldvn228+eabtvHWW285irfffjtnvPPOO3nFu+++6yjee+89x7F//35X48CBA57H+++/72ngfWv1uB/HhnD7muRzPzi9x/K9d3O9F5y+rxDZ3p/Z3tdO/i7Y/U3J9iUcJbgwCaYAE0IIIQkQYMpv8PJbqPi6Kb9eiG9Q0hs1uS1GgJ1EmIU4TiIchARnE+G4S7CbnaEpwIQQQggF2NOOz0mR3yhkfeOQ7Y2T6HohwH7IcRhlOEkiHDUJzvZ/gJcSnCsLTAEmhBBCYi7AQYz7pfzmll83sr5hFt8wCG+YZddPAfZCiv2UYb9FmBIcbwmmABNCCCExFuCwya+dAHstv0E0uwoy6xtkqXNQ0uuXOH7wwQeeBd63Vo9HQYqjKMJBZoMpwcFNj0QBJoQQQmIqwFHp+BxW+Q1ivG/Ysr5hlt6oyW0xAuwkwiTEXstwGEXYq2yw282xKMEUYEIIISRxAhx2+c0lwEHKr1clz35nfYMW3yCFN2jJ9UqAvZZjv2U4bCJcbDbYCwkuNBscFQnOtzM0BZgQQgihABctwF52fKb8upP1jYr4BiG9YZZdPwXYCymOqwgHlQ0OS0l0FCS42CwwBZgQQghJkACHpelV3OS3kJJnP7O+bozx9SvbmyThDVKA3RRiP2Q4TCJMCY62BFOACSGEkIQIMOXXX/kNQ9Y3KPH1S3rDIK0ffvihaxGkALslxFEQ4TBkg/0cFxykBIdpjmAKMCGEEJIgAY7ydEeUX2/kN2jxDbvwuim2+QhwPsuHXYi9lOGwiHBQ2WBKcP7jgZUEU4AJIYQQCjDl1wf5pfiGT3qDkFw3BTgoOfZShsMqwpRg9yU4qKZYFGBCCCEk5gIc5qZXYZRfL5pdhUF+3W5u5VW2Nwmi64cA+yXGcRHhoLPBlGD/SqEpwIQQQkiMBTjM436jKL9eNbuKUtY3CtLrt7h+9NFHroQfAuylEHshw26LcJizwWGQ4HymSMo1X3tYJZgCTAghhMRUgCm/e0I/3rfYrC/F1zvhdUtq8xXgfJYPsxAHnRUOezbYr+ZYQUpwWDpDU4AJIYQQCnDkOj5HTX79KHmOqviGSXiDkFw3BdgPOfZThv0W4aCzwX6NC6YEb6EAE0IIIUkU4GLlN5sAU379K3kuJuvrtvj6Jb1xlF2vBdgLKQ6TCEclGxyWkuiwSXC+naHdLIWmABNCCCEJEOColj7bfQCLqvxGIesbFvGNkux+/PHHroTXAuy2FIdBhsMkwpTgaIwHpgATQgghMRdgym+y5dfNrK/X4hsm4XVLavMV4HyWD5MQx0WEw1gSTQl2txSaAkwIIYQkVIApv8HLb5zE10/pjbLkuinAXotxnEXYy2xwUiQ4qtMjUYAJIYSQGAtwsdnfqHR89mOqozjKb5TEN26y66UAeyXGYZFhN8cIh7EkOs4S7EZTLAowIYQQQiwF2MvSZzeaXkVFfoNodhVX8fVTeqMgvH4LsJtC7IcMhz0bTAmOZik0BZgQQghJmABHrfQ5ivJbzHjfsMuv1+KbJOENWoDdEuIkiLAXJdGU4GAkmAJMCCGEJEiAKb/Bya9XJc9xEN+wC+8nn3ziWWzYsEEcfPDB6kO5jKOPPlreg1bLh12Ioy7CURkX7LUE55pqLqwSTAEmhBBCKMCBlD5Tft2TX6+zvm6Lr5fZ3qhKbq5o1qyZKFOmjPxgfsYZZ4iOHTsWtJ4wybBXIlxsoywvs8F+l0QHKcFh6QxdSBaYAkwIIYQkRICjJL9udXyOgvx6WfLsZ9Y3KtIbpOhmywIrAS5Xrpxt9rfQiKMMhzUbHAUJtuuOH5amWF6XQlOACSGEkAQIcBhKn71sepUk+Q1b1tcL8Y278FpF/fr1ZSl0odnfoIQ4yiJcbDY4LOOCoyDBYRgPTAEmhBBCKMCxGPcbN/n1Muvrt/gGIb1eCOOnn37qeWzcuFEceeSR8p6zWybsQhxWEQ5rNpgSHEwpNAWYEEIIibkAx3ncr90HOMpvfvIblPiGSXj9kNxcsXTp0qJeHxYhdjsrHJZsMCU4HhJMASaEEEJiLMBelD5Tfu3l14tmV2HJ+ioZOemUqoaOxQxGFKLaKSd7LsJxkuAgpkfyqxSaAkwIIYQkXID9HvcbVMfnoOXXq/G+fmd9cW+NXt9LjFnfW4zd0CcVfcW4jf3E+I39xfhN/cWETQPExM0DxaTNg8TkLYNT8YKYsnWImLp1qJi2bZiM6duHixnbR4iZO0amYpSYtXO0mJ2KObvGiLm7xoq5u8eJebvHi/mvTUjFRLFgzySxMBWL9k5OxRSx+PWp4sXXp4mX3pguY8kbM8TL+2amYpZY+uZssezNOWLZW3PEK2/NFa++PU8sf3u+WP7OArHinYVi5buIRWLVu4vF6vcQL4o1+18Sa/cvEWsPLBHrDrws1h9YKta/v0xsSMXGD15JxatiE+LD5WLzhytkbPlopYytH60SWz9eLbbJWCO2f4JYK2PHp+tk7JSxXotdn20oKPTr2Fmy7h0l20rHGrkP2BfsE/ZN7Wd6v5fLY8Cx4Lg2vP+KPMb17y+Vx4xjxzlYmzofOCc4NzhHq1LnC+cM529F6jziXL6aOqc4tzjHONfLUucd5x7XANcC1wTXBtcJsXjvVHntFu6ZLK/lgtR1TV/fCfJa45rPSV372TvHyHth1s5RJffHSHmv4J7BvTN121B5L+Gewr2Fewz3Gu453Hu4B8en7kfck7g3x6Tu0dGpexX3rR/Z4CRLcFSywBRgQgghJKYCHIfS57jLbzElz0GVO2cTYCm/mwamZGSQJsBTTAI83STAs1ICPFsK8BgpQHN3jTMIMERpoRTgEvk1CTBEa4kmv7OkiGny+9Y8KWoQ4BWaAC+SQmeU37QAl8rv0pzymxbfUvlNi29afneUyK9efM0yu/uzjcb43BSm5+1l2CzCziR4YzYJ3l8iwVKAbSS4RIBxjpUEL5USXCLAqXhJJ8G4Zrh2+BID1xJfaswvkWAlwPjyA1+C4F7AlyL4ckQvwNNLvkDJJsBSglP3ohJg3KNjLAQ4lwgH0RwrKhIc9VJoCjAhhBCSEAGO87jffOXXToDDIL9hyPpmG4+ZFuDeMrMGuRiXkozxegHerARYn/0dIjN3aQHWZ39HykyfWYBLs79WAlxc9neVlv19UQreGp38SgEukV+IIQQxm/yas765xNcou5tkvJYjdmthLcNmETZmg51LMDLeSoCVBK/RJHixPGcrpQS7nwXG9ZYSvMuUBc4Q4OGaAOOemqIJ8GB5z+HLl3wFuNhssF/jgpMuwW6WQlOACSGEkIQKcBRLnym/+wMTX9XcCvfWGE2AVfa3n8flz5Nl5nBxRvmzyv7OLCL7a1X6/Epp9lfK7/I85NcovmbpLZXbzTL2fJE90suZhdgowtmzwU4leJlBgh2VQufKAksBNmaBF2lZYCXAEzPKoNNZYFUGPcqmDHqIlgWGAFuWQSsBTkU2AY5KSXSSJNjLLDAFmBBCCEmAAPuZ/aX8eiO/QWZ99Z2dlQDblz/bC3Dx5c9TtWxi/tlfk/ymIrP02TTu16n8amN9LeTXIL5mwd3iMDJlOJsI5yXBUoD1EmwshTZngVdpWeCFObPAS0xZ4NIy6ClaGfQCD8qg0wLc3zAOGPdtMVMmxU2Cve4MHeZSaAowIYQQEnMBjsO4Xy87PiddfvOd0ii3AKvy5/T4X6vyZ02As5Y/T9QE2Lr51QxNgJFxXFpg9te69Nk47lff8MpKfs0lz6VZXyvxTUvtXhlb0/GlRajnSpa1k2EpwgVIsCrpNjfFyswCL3GUBcZ5lxL85hzLMmhNgosugzYK8BSTANuVQesFuJh5g5MswWFtipVPKTQFmBBCCEmgABdb+hzVcb9xk1+vs75287bqBdhq/O+kjPG/Q0sF2MXyZ2Pzq9nG8mebsb8Zja8O5Mr+msf96hteGeXXnPVVmVq9+GrSmxLc17XYliPMUmwW4cxscH4SbFUKbd8Qy/lYYCfNsOzLoG27QW8zlUE7HAdsFuBcIhxlCbZr8MdS6HRQgAkhhJAYCzBLnym/bomvQYALGP9rVf48y5D9HVt0+bPKQioBzjf7m7302SS/qcguv9nEt1Rw3/hqe9awlOHUuvQZ4dJssFMJXpUpwSWl0FpX6AKzwGkBnmPbDCu/MujRlmXQVtMhZYwD3mgcB2wnwF6VRFOCwyvBFGBCCCEkQQKcT/a32NLnMMtvLgH2Wn69aHblZdZXxccffyw/PFqVPxciwHl1f863/FnK7wLLzs/WY38zuz5rpc+Gcb/GhldG+d1kIb9KfO2ld99XOyzDXoaVCDuX4IwpkixKoW0bYtl0hDYK8PyMZliFlEGnBbg0CzzLQRm0k/mAswkwJdhbCQ5bKTQFmBBCCImpALud/Q3DuN8oya+f4339EF8VdgJsNf/vVIMA5xj/a1n+PKng8mf3s7/mcb+mMb+GkuctmvzqM74Zwvu1ip02YS3DBhHWssHZJHidSYJtSqGzZoGtOkLnaoZlXwad2Q16QqkE5zkO2Ml8wLkEuNCS6ChKcL6docPQFMvNLDAFmBBCCEmIAMe59Nmtjs9hld+gSp714utcgEuyvzYNsErH/46yHf+7wDD+d4qlAFuVP6ezv1YCvFhKm3neX/PYX3P2V9/12W7cr77s2VDyrCt3thbfneJNh6GX4aIk2KIUWt8VOtdY4ELLoO27QZcIcCqcjANWX6DYzwc8yHYcMO5b3O9eSHChIlyIBFv9jXJLgpNSCk0BJoQQQhIgwCx9Tqb8upX1tRJgdxpg5Tf+V5XRZgiw0/Jni+ZX+WR/LUufTWN+S+XXIuubkli92L719S7x1jc5IrVMbhG2kWBTd+h8s8DmMuiszbDMcwLnHAc8NY9xwKb5gC0bYb2QtRGWEuBiJdjtbHDSJbjYUmgKMCGEEELyEuAklT7HSX79zvpmFeACG2BZjf+1FWDz+F8pv+nxv9m7P9uXP6ezv0stOz9nz/5alT4bx/xaZn4txXe3jLdtIv28jQjnlGDjFEn2pdC5xgLnaoZlKoM2jQPWyqDzHgecfT5gp42wxpU0wtILsBsiTAmObik0BZgQQgiJuQCHtfQ5zPKbS4CjKr/FiK9RgIvvAO10/t/c43/nOBr/a1/+XNr8Kp/sr3Xpc3b51YuvJrrfvibesQk8lyHDJSJsL8FbTBKcXxbYOC+wdTMs52XQSoBn2Y4DdjIfcNGNsGwEmBLsnQQHXQqdreqJAkwIIYRQgCNZ+kz59S/rq+KTTz4xCbDTDtDDjQLsqAGW/fy/Tsb/IjO50qr7c9byZ928vw6yv6Wlz1sMpc/Z5FcJbano7skRpSLsRIJLu0OnS6GzZYGtO0I7LYN2eRyweT5gFxphORHgOEqw152ho14KTQEmhBBCYizAQWZ/wyC/bpc+J1V+Ib4qrAQ43QBrYM4O0OnxvyMNDbDm5GyANTW7AOcY/5tf92freX+dZ39Lx/3ayq9JfN/9DrHXJuxE2FqC7Uuhc2WBczTDsukGXfw4YCXAk/NqhJWfAKfLoHHf4n7P9j5JsgQnqRSaAkwIIYTEVIC9anyV1HG/bs7z65f8upX1dSbAqgHW4KI7QDtpgJUx/2/e43+N3Z/ty5/T8/6aOz+bs7/60mcn8qsk9z0Zr9uEWYTtJHiHQYIdZYF18wJblUFbjQO2L4MuEWCbccDZBXhK3gJs7gQ9JaMTdOZUSEqAKcHJkmAKMCGEEEIBdr3xVbGlz5TfcI73NYuvcwG2mALJtgN0vgKcuwFW/uN/rbs/W5U/mzs/m7O/Wumzrtuznfxq4vv962L/929kBB43i7CVBGuNsXJlgQsqg37Fsgzabhyw1XzAVo2wcD3za4RVKsAZnaAdToWkF+BcIkwJjkYpdKFZYAowIYQQkiABTkrpcxLkt5CS50KzvjkFeLNJgLNMgTRw3rOi56ROzqdAsuwAPSunALd+ooVods9NmgAv37dADJvdT6x+90XD+N91+5cayp8RS3fOE6PmDLYUYHPnZ/PYX0P2VzW8spFfvfAe+H6fIba8vVZMf2mi2LZ/vbUEqzHBJVngqS+NE0s2zjNkgc1l0HoJzt4NermWBc5HgLM3wlICPKOATtCjMztBW06FNNh2KiQrAS5Ggu1EmBIc/iwwBZgQQghJsABHqfSZ8htc1hfx6aefysC9Nc5CgK3mAJ5WMk5TCfC0rcNFhePKy3UMmt+jiCmQcneAbtS0vqh5SQ2tAdZdbW+V2338uYe18b/Pj3xGHFHm8Izxv10HdZbLqvG/2cuft2aUP5tLn7PJ74Ef9pXEm4boP7KX3IcrG9ZNZ4Q1Cd5jkuBd4oWJacG7uflNFh2hrcugCx4HnHcjLJ0AmztBuzYVUu65gO0E2IuSaK8l2M3O0FEphXazIRYFmBBCCEmIALud/Y1a6bOXHZ+TIr96AR5vIcC5pkB68oV24uCD/yLKlj9KXH9nA4spkCbknALJqgO0WYAxJtUgwClhGzT1eXHh5ReICUuGaQL8jwGPy+OxF2D78b9W5c/p7O8Oy+yvanaVKb9vivdV/PhWOlI/DxjZW31QF0MmDNAk2JwF3vnhJlH5hOPSAnyHXoC3aWXQEODXso4DXp17HLC5EZYmwIsdCPDsvKdCKhXgsc4EWDcXsJ0A4z7P9j7xoyQ6KAn2ezxw2EqhKcCEEEIIBZjZ3wTIr5vi64YA17riPFHzsnPFrW1uFGWOOkJM3zIi7zmA7adAmp9VgK06QNsJcDe9AJunPzKP/81S/pyZ/S0d86uX3w9S0puOt0viLTFgVFqAL617sTi+SmWx+4PNJeOCS0uhIdct294ljj2uojirRnUpwIZmWLnGAUsBXptXIyy7TtBOp0LKT4DHORTgoZkCvNlagMMuwXZd5VkKnZ8EZ8sCU4AJIYSQBAhwkI2vKL/xkd8MAd7kXICHLXleZn879HtAjFzaR67noe73ZgjwhJUviBoXnSUGz+2ZIcBN/n6duLfDHQYBnrl+rLju1gbi6PJlxV8PO1RcdGUtMWbxoAwBHv/SUHHBpeeJRVuni0Xbpotal54vTj7tRLkfteqcn4qaYsqyMbYCbDX9ker+vP7N5eLvbe7UMrFnnHWaeKZvZ7Hvix0ZAoxxvRfVqS0OO+yv4pgK5cXdrZqLfZ/tEjc1u14MHNVHfJgSYMSAUelztHj1XFG2XFnRovWdGVng+StniIMPOVj0G/W8uPjyCzMEeNmOReLam68W5SuUk8udftapokvfJwyNsJbtWiBq17lAzFs/Tdzd5nZR6fhjU+fxr+KiurXEjJXjMwT4H4OeEGeed0bqWh4sjip7pKh37eViyopRGVMhTXxlmLiiUR1xZNky4vAyh4u611wqxi4bLO5JXb97OjQ3CPC0DSPFTXdfIyqdUFEe8/EnVhJ3PHSzmLV9jEGAz7mwuugzo4t4cvCDotYVNUSnIe0KEuBcIkwJtpfgsJZC59MQiwJMCCGEJFCAg2x85UXpcxjn+o2q/NqJb7EC3OyBG0S5ikeL6VtHyKZGyASfcd6pGQI8etkAuY3nJjyVIcDnXXy2aHhzPU2AZ28aL048tYo45tjy4oFOLUWXwR3FHQ80FUcfU1acWeN0gwC/MD2dUZ2zbpJY+to80erRu8WV11wmH7v/sRYyFm6a7kCANxkEeP2br4pTTq8m5ffJ7o+JgWN7i9bt75WCe/0tjbTyZ2Rux84aIUUUWV2UOA8e2080uPZvUoiR5e3w9CPio5/eER+mYmCJAK9/faXoM6ynfN285TM0AX7zy12i5kXnicvqXSqzzWYBXvP6MlGxUgVxTs2zRe8Rz4phUweI5q2apcdBd2+vjQNevHm2fKz6OaeLOlddLLoPeVo83bejqFqtiqhw7DHilT3zNQFu9/T9Unyb399U9B7XTXTu95g8zzj/i3dO0wR40qvDpPTi2rR75j7x1MBHxfW3NxQVK1cQF9erlbqGV2oCPGvLWHHy6VVFxeOOEfd0bC6eHfNESn5vEYcfcZhocPMVBgHGfta7qY449K+HiitvvFQ8P61zxlzAEOBJDgWYEhyuUmi/GmJRgAkhhJCYC3Ax2V8/Sp/9HvebdPktJOuL+Oyzz2QUIsBTtgyV8tv4nkZi1o70HMAd+7WV6xo4p3vBAnxzi+tlhnH8y0PSJdAlDbD6T+4u12EnwGoKJFUCvdlBCbRdB+imdzeWGda1bywzNMAaPXOoXMfIaYOlAL/x+Q5ZqoymVu9+s1dX+vy2uPWuW+SyHZ5uLz7++b2UBL9rEGAsC0k++7yzxNtfvSYFuFu/p6VkL928wFKAZ706WTz4RGuxZf9qQyfom++8SZx+5qkZAtzg+nqGEuj5G6dK2e3Y4yFNgB/r/qDoNfoZQyfoxTtnyMz7Yz3aagJ80RUXiMpVKolFO6caSqAf6na/3JZegO9+pJm8hmNS111fAt1p8MMl98ezBgFG6fyAuV21EuhiBdgvCS6kMZYbnaHjWApdbEMsCjAhhBCSMAH2q/FV1Eqf3er47LX8FtPsqlj5LVSAH+2bFp+hLz6nCfCMbaNE+ZQUX9u8fsECjMxjs1aNS8cAa3MALxS1L6/piwCXOfII8chTbS06QO8WtS+tJa5tfLUU4CkLx8p1Llw9W47/1Qvw9vc22grwhhIBXr5ticwCP9XzCbH5rdWi7NFlRbvHW2udoK1KoI2doNMC3HVA+tjMAjxkWt+MMcBnnHOauKn5tYYSaPNUSBgDXLlqJdHykeZSgBdunyLX1757m/Q4YJ0AY/z2CSdVNggwsr9XN62XMQYYgWZpLTrcahDgG//e0DAG2A0BjoMER208cJANsSjAhBBCSIwF2Ovsr5+lz1EY9xuU/HpV8qwX32IE+PzLzhEnnnaCeGb0Y6Lr6A6i25iOqXhcXPy3C0SZskeIaZtG5C3Ac7dOlMt16tfeJMDpJlh3tmnquQAv275QLjd61hBLAW7Z9m5x6hmnSAHuN+I5uaxqgKUXYJQ8Vz2paroE+ud3ZRm0PgOcnhppn3joiTYp4S4jLr/qUnHyqSeJvZ9tyyrAo1L7dfWN9cUJJx6vRfljylkKMOY9Ngswxklff+vVmgC/8vp8cX/HFnIMcOWqx2mBTLES4CGz0+d56Nw+GQKMJlh1r7nEIMBYtlKVY+W473NlnCnOvTAdKHW++tZ6BgF+8NmWnggwJTi7BEe5FJoCTAghhCREgIOc9ijo7G9Y5NdOgKNU8lysAL+wqLs2nY9dtPlHC9cF+O52t+UQ4GVFCPAmgwCPmzfCcg5gdGhWAtzXJMAHSqY9UhJ8TIVjxGMpAf6wZAywaoK1fu+KtAB/n3offLFLii8en7xwrGEuYLMADxqffv0Nza4VA8f3EuPmD0/FMHHvw3dbCvBIBwKMJmIVji0v2j11nxg49TkxeHovMWja8zITrwR4+Lx+WQXYPAYYy9aqe55o/uDNonnbJuJ2GY1l3NamsXhy0EN5C7BdEyzc806+LMpHhOMqwVEuhc6VBaYAE0IIIQkR4Kg0vqL8Biu/duJbqADf1PJqUbbckWLyxhfkFDaqBBpSgwZHF1xWQ5x2TrVSAV7aX27jmeEdMgT45DNONJRAo/Pz7a1vtpwGKXsJ9NKCBNg8DZJVCbR+GqRLUlLa8Pq/yY7N016cINc5e+lUQxYYErxy+8vyuceeelibCklNg5QW4H3pDtDfvy6mLBwnbmh6rWEuYCsBrlu/jow9phLoJ3q0LxHgdXkJ8IQlw+VyL8zonTEN0rGVK4oWJQK8eNd0mRFu+/S9GdMgLdw9RV4zcwl0o9vqO5oGyUqA8+kCrb//kyjBUSqF9jILTAEmhBBCEirAQU57FJbS5yTKbyFZX1sBzjEP8JTNQ0S5CmXFDSXjNq0E+LE+D8h19pvRVcoPJAhjP+s3ucIgwEPm9SppoFQqwDc0v1o2UJq+ZoycekcJ8MBpPW2bYM1eN1ET4EFTnk9L8dpJBgHumsc8wGiChW7LqgmWEuCJC0alM6GTBkhZfeur3TJ7e8FF58tMLjK6ENv3vnld1L/2Kp0Av2WYB3jd3uVyWTUFErpJv/759rT8puItkwBDfhEXXV5bToGkF+AdH68T55x/VqkAp45v8eZZjgR41IJBcrkxiwanBTh1XnFu+054Vj6uBBjX4LKGF6ekuIKYsX5MSoBnawLcvO0tGU2wmt53o7yGE1e9YBDgsSsGitva3CQGL+jhQICHZArwpuwCHFYJDntTrLCVQheSBaYAE0IIIQkQYL+yvyx9Lkx+3R7z65X8fv7555oAj7MQYMiHXoDb97lPLt9nVheDAM/SCfCMLSOl8F5z61WaAKMcFq+7/o6Goue4zqJNl5ZyCp3yFctlTIOE8lvI1kNd7xd9Jz0r7m53a0qojhR1/naRSYB7ZQjwy7vmiCPKHC7OrXW2nPrn5R1zDQL8QMd7ZbSR0Uq0ebyVaPv4fTKWbV8g5XL13qVyCqQqJ54guvV/SkyYP1K07Xi/zAw3uqmhzNCquYBnLp0sx/BWP/t08eSzHcQzvZ+SnZ2va9JIVD2pinj0qYfSWeFU9B9ZIsB7lmvZX/0cwPrsL0qupQA3v0kb//vYMw/JplkPd24jxs0fIQaM6yXOq32u7AAt/z5kCPCgrAK85t2XRKXjK4qTTqsqegx/SpY/39P+Dnn+q5x8vEGAp68dIx9HtvfOtk1Fu2daiYuvqi2qnnKCOP3cUw0CPH3jqNTjx8s5gFs/dbfoMa6TeLhHK1HtzBNFlWrHi+lbhjsT4NR9l68Ax1mCo1QK7VdDLAowIYQQkmAB9mvaI8pv+OXXqfiqwL011kKAJ1oI8C2tr5PztUJWlADPNAkwylxbdLhNXHjl+WLOznFSgOftmiBue6CxFGNsD2WyPcZ0Eo3/fp24p8MdmgAjuzh55XBxRaM6cioeLHt2zepi8IxeolPf9qLZPY3TApyK8S8NkWNYF26dJtYfSAvwxg9eESPnDhK16pwvjq9aWYxbNExKIDoi165TMxUXiAsRl6mopcXsFVOkAKMMes3rS0XTu5tI6cU+VDnpBPHo0+3EG19sNwgwxgIvXjdX3Nj0OpkNhvx2SokwpkVSApweH7xPTJo/Rs4XvOXttQb5xTpU9lcJMDLOLdrcKTo884gmwLs/3STue6SFnKIJ+4QGWO27PCiGzxgojwUCjAz3sl3z5bFOfWWMJr9KgG9vdYto26lVegxw6pxNWz5aXHj5BbLEGXHxlbXFmMWDRdN7bhJP9mmvCTCy8TPXj5UZeswFjGjS4joxY+OYknHcJQK8d6pYtHeKmLJ2uOwEjUww9rVM6t8Gt1whxi4fkLpHxsiKAdwz51xYXTw17OG0AG+zEuDBeQswJdidUugoZYEpwIQQQkjMBThMja/8LH2m/Lovv7YCvMlKgIdIOZGdoLMK8Fg53lNNhQQBxnjQhXsmSznSjwF+6Y0ZspFSWoBnSQHGOFM0W9KXQKenQlokVqVi9XuLpQCjbBfT92AaHyXA6AS98YNXpewp8ZNl0B+vEts+Xp0SxDU2jbCMZdB7v9wqpbN0HLCxG7RRgvfKkOXMKbFFdhcl0Tivg8b0kb/rI1N+M7O/2Ba2qcqfsS/YJ/P4XzWOWTXAwrHhGHGs+uwvxkRvSp0XnBuco/UlAmyeAgnnFucY5xrnfLlOgGUDLDkGeLa8VpgCaUnq+iEDfOOd1xgEGNfaegzwWJ0Aj5L3Du6h7AI8yFaAcc8HLcF2XeGjNh44yIZYxWaBKcCEEEJIwgQ46Y2vwjTdUZjl1yy+eQlwKnIL8GgpNzkFeK9JgFMBmcpHgFUZtF6A1+sFOBXmRlhWAmw3DnjvF1tL5t3dZtkMS0pwSlpvubOxuOPe28RbX+0qkeC0CPcc1FWWK2/YtyItvCXSq8b8WstvafYX27Ke/3eLnLJJCvBnGzX5VeN/cWzbPl6TEv7VGeXPaQF+paQDtHMBRjOy+zreLccBL9w5xTAH8MAZPeT903XE4/KLDb0AL7AQ4DkZAjwyU4C3WgnwQFsBTpoEx6EU2u0sMAWYEEIIibEARyH7G7XS57DJrxfjfe3kN7cApxthZQjwNrMAj8pbgJUEKwHWyqB1Alw6FZITAS4tg96kywIrAYYUQg4hwZDFnVnmA96bJQusJBjSOn7uSDkG+OwaZ4pHOj8o/tGrk+zoDPl96PEHDNJbKr5qzK+1/L6pk1/77K9RgHdoAlyS/bUQ4I0GAS6R3wNLDAKMDtBpAV6oK3+eL6asHCnHZR9XpZK4rXUT8eA/Womb7mokDi9zuKhd93xt/C+anOkFeL6FAKNSQC/AavyvXoBVAyy9AOO+tBPgsEpwvp2hwzge2K9S6GKywBRgQgghJEECHNXGV2Epffay47Mf8ltIybM+vvjiiywCPNAgwPqpkJQAa42wdho7QesFGBK0wCDAxjLoDAF+s0SALaZCWiUl2CjAkDgpwToB3mgpwPZl0PZZ4Gyl0OlM8KvbXxR33HurlGCMFf5bo3rihfH9NNl9NyPjax7za5Lf1Dbyy/5alD9/vCpj/K9BgA/oBbi0AzTOrcr+rjCVP8/ZPEE2wDqr5hkpET5WnHfxOeKBzi3Eoj1TTAJcUv5sFuBdJgHeYRbgYTkFGNN0oVu5lQBnE+GwSXCSSqH9yAJTgAkhhJCYCjCzv+Ec9xtF+YX4qjALsJO5gKebyqCdCfAkKUdOBdi+DHqxcRzwgWzjgDPLoNNZYCXA9llg/Vhgq1JoQzl0SUn0O4bYY4rS594ueU1u+d0mRTxb9jdX+fNmQ/mz9fjfNRblzytsx/+mS6Axdhsl7LiGSoAX6QW4RH49EeDU/WonwG5KsN1UZnGSYLdLoYPIAlOACSGEkIQIcBSyv36WPlN+nWd9LQVYzQVciACb5gKG7JQKcGkZNOQoeyOs2S6MA85VBp1PFthYCm0pwbps8Ns6Gc4I3fNvmRpe2cqvlv3dnFf2N1f5sysNsGwE2L4BVqkAY8z4TJMAT3MiwBuNAox7PY4SHHRX6KhlgSnAhBBCCAU4tNMehbn02Q35tRPgMMivWXyNAtynaAE2T4WUVyfoLI2wnI4DzlUGbd8N2ioLbF8KbTUm2CjCu0oE1yp2WYhvLvk1dn7Olv217P6cq/zZbvxvSQMslKIrAdY3wFqik9/8OkDbC7DWAbpEgLUpkDYbG2DpBTiMEuxGUyxmgZ1LMAWYEEIISYAAB5X99WraIz9Ln91oehUX+TULsJO5gHNPheRiIyyLccAr9eOALcqgc3WDtssCO5VgQzm0KRusibAKnfDqpdcq65tbfgvL/jovf7Yf/ysF+E17AXbaACtnB2iHcwCPNQlwUBKctFLosGaBKcCEEEIIBThS2d84lz6HXX71Apx9KqRcnaBHGjpBZ2+E5cY4YGfdoLNlgbWxwKaO0PpSaMN44JQEm8cEG0U4U4atQp/xLUR+rTs/W2V/7bs/5yp/LmT87+KM8b+Fd4DONQWSlQAnRYKjUgrtZxaYAkwIIYTEXIDDlv0NW+Mryq9z+f3yyy9zCHD2TtBZG2HtMgmw43HA1vMB5yqDtm2G5TgLbC6FtpJgNSY4iwhrMmyKrzKlVy++stuzQ/nVlz4XlP216P7spPxZG/8r5dfZ+F93G2BlF+A4SnCcssDFSDAFmBBCCKEAhzb7G/XSZ6/l106AC5HffJpdWcmvEuAxJgF2sxFWrjLotAAXWgad7gadrRmWPgtsNy+wfSm0jQSbSqL1Iuw09K9Ni6/q9uxAfjNKnx2M/XXY/Crf8mfn43+LbICl6wCdTYDdlOCwTI8UpAQH1RArnywwBZgQQghJoACHOfvrVeOrYkufw9Tx2U35zSW+evnNEOA8GmG5MQ44Vxl0vt2gs2WB7eYF1pdC20mwcUywUYTNGWHnsVUTX2PWNx/5NZY+55X9zbv7c2n58xLb8men43+La4CFioVsAuy1BLvRFMvPUmi/ssBel0JTgAkhhJAECjCzv8GUPsdFfvXiaxDg9b3zaIRV7Dhg6/mAc5VBl3aDztIMK+dYYGsJ1rLAFhJsboylzwabRbhUhs1SXPpYqfQaxdcovxudy6+p9Nku+2se+5t97t/iy59t5//Vjf81NMDaam6ANci2AZYSYNzzYZJgZoGDyQJTgAkhhJCECXAUs79JLH0Oo/yaBdi+EdYgYyMsu3HADucDdlwG/aa+DDr/LHBpR+hcpdBOJdhKhDfrSpeVEG/RSW6p7O7VLWctvtnlN9e4X+vGV86zv9bNr4otfy5+/G82AaYEu98QK6hpkQrtCE0BJoQQQmIqwMz+hrv0OYry+9VXX2UV4OLnA85VBp2rG3S2OYEXSnnLPhbYlAW26gqdVYLXZUpwhgibZXizSXIzhVcvvdnFNzPzaz/uN7P0WT/vr/Oxv8bs7zIt+5ur+7N1+fNcQ/lzkeN/SwR4jEmAoyzBcSyF9jsLTAEmhBBCKMDM/vpc+hxV+VUCPDolwPmOA7afD9h5GbRlN+gSCYZ0ZTbDcpIFtpoX2L4U2k6Czd2hnYlwqRBbh3E5K/E1z/NrXfacbdyvfeMrfefnYppfOS5/tpn+KPv8v9bjf8frxv/iCxuzABciwXbd2L2U4LCVQodtWqRCssAUYEIIISRBAuxm+XNQ2V+vGl+FVX7tBDgI+TUIsN044E2mccA28wHnmg4pVzfo/LPAmR2hs5dCO5dgQ3foLCKcKcMbS6Q2W2zUpNdOfK1LnjPLng3jfjPk11z6XHz217r5lXvlz07H/9oJsB8SHPVS6DhmgSnAhBBCSIIFOGnZ3zCXPkdBfksFuFeOccCFlkFbd4NeoAlwtmZYBWaBTQ2xMrtC55LgVRkSrLLB2UQ4Q4htwrx8qfTqxFfL+jqU3yzjfq0bX+nn/c3s/Gyf/c1sfpUuf57kcfmzcwF2S4JZCu19FtipAOfKAlOACSGEkIQIcJyzv36UPodl3G+Q8ouoeGxF9QGSwYhMHFvpWHmv5yvAYZHgODXECjoLTAEmhBBCEirATptfRTH7G9dxv27Lbz7iq+Lrr7/OGt98841tfPvtt1nju+++cxzff/+94/jhhx+Kih9//LGowPV55ZVXRJ8+fUTLli3FeeedJw477DBx9tlni9tvv10+vmTJEvkexL8DBgwQDzzwgLjiiitEhQoVZOBnPNa/f3+5DO7TQo7F6TnL51ogcl3bbPdFrnvK7l60u39zfeETZwlmFjh3FpgCTAghhCRAgJOe/fWz9Dmu8ptLUoqRXy/ENyjZxf0yZ84c8dxzz4kmTZqI6tWrS9mtVauWFNjBgweLNWvWyHOazzqVGLdq1UpcdNFFoly5cpZijHswrCLshQTbibDbEuxGZ+igS6H9aojlZhbYaTMsJ1lgCjAhhBCSYAF2c+ojZn/dHffrVsfnsMivW1lfr8S3GNnFe2nmzJniySefFNdee62oUqWKFNOrrrpKPPLII2Ls2LFiy5YtRUu1XeB+X7Bggcwg68X4uOOOEw0bNhQPPvigFO4VK1ZkFWO3RdgrCc43G+yHBCepFNqPLLAfZdAUYEIIIYQCnMjsb5xLn/2UX6+zvl6Ib76iif2FyEJo27VrJwUXognhveGGG6QAQ4TxfvBKdgsV4xYtWsjs85FHHplTjMMgwpRgb0uhmQWmABNCCCGxF2Bmf/1pfEX5dSa/bpY7eyG9OG6UKKtxuJBHNV4XJc0obYZc4v6wev1PP/2Ud/glxngPz549Wx7DXXfdpYnxSSedJBo1aiSz1iNHjhSrV6+W96MbIhyGccFRkOCwlULHOQtMASaEEEISLsB+TH3E7G9y5NePrK9b0otr9OKLL8pMKZpRQXIhu2hShWZVkGA0r8L5LlRu3Qq3ZVh/nsxirJp0ORHjILPBUZbgODXEClMW2EkzLAowIYQQEmMBDqL5VRSzv3GXXzsBDpv8eim+uO/QnOqZZ56RZcuQO2Q/69SpI8uahw0bJjZs2CD3NWjZ9VOKs53Hbdu2iRkzZohnn31WNG3aVBPjU089VZ7Dxx57TEyYMEGsW7dO3oNRluB85wgOYyl0FLPAQUyJRAEmhBBCEiTAQUx9FMXsb5hLn93q+OyX/LqR9c1Xerdv3y6mTp0qM5cY86qmEkKjKjyG5/B+KLZ0OU5CnE/naIjx9OnTDWJ8yCGHiDPOOENcf/31UozHjx9vK8ZRkuAwl0K73RAralngQqdEogATQgghCRZgZn+TWfpciPx6Nd63mKwvto2sLbK3GK+LbC6yuipDiWwvsr64R9war5srfv7557wjajJsdZ1wj2zatElMnjxZdO7cWTRu3FiWlCsxxu94HM9v3LhR3oeU4PA3xIpjFpgCTAghhMRUgMPQ/CpO2V/Kr78lz2bpwrlZtmyZ1tlYleNCsjB+F49jHlxcg2xyF4TgFhtByXAxcwmra4x7C8JrFmN17fB7p06dxKRJk+SXGbhHKcEfBdYQy48ssNNmWG5mgSnAhBBCCAU4kKmP/Mj+Zmv84nX2N5/S57jKrxvii2ujmjKh83L16tWlMKFrMTK9mMIHnZqx705ELkqy67UU+ynB2Zpk4f5FiTRKpR999FFZOo1MMTLGSoy7dOkiy9U3b95clATbNYBzS4KjUAodZBY4bM2wKMCEEEIIBTh0Ux8x+5sc+d25c6ccT/rEE0/IjsOYWxdz7GKuXYzXxdy7EKBC5u+Nk/C6LcRBZIOdNMjC/b527doMMcYXIDVq1BC33HKL6Nq1q5gyZYosuY6LBIepIVaYssBelEFTgAkhhJAECHAYml8Fmf11e9qjqMmvnQD7Kb9qjOiYMWPEgw8+KAUXonvCCSfI8boQYHQcxj1X6By++UqvFyL6yy+/5B1ByrCbIuyWBFuNC8Z7ZMWKFWL48OHioYceEtdcc43s5G0WY9xD+FIlDqXQzAJ7UwZNASaEEEISKMBhbn7lR/Y3rqXPYZFf7OOqVatEv379ROvWrcUFF1xgGPPZs2dPMW/ePHltipnHtxDxDUJwi40wyrBXIpxvh2i8RyHGaISmF2M0Q6tZs6Zo3ry56N69u6wygBj7LcHMAoevGRYFmBBCCKEAB9b8KizZX6+mPQpr6XMu+cU5ciq/OEcLFy4UvXr1ErfddpvW4AhZOTSr6t+/v1i6dKk89nymNgqD+AYhu15KsdsiHAYJtruPlRjj/mvTpo2oX7++qFSpkkGMMY0TxBh/2+zeU7hvw1YKHfYscJinRKIAE0IIIQkQ4ELLn4NofhWn7G8U5Rfn5/777xdly5a1lF/cAzNnzpTNiTA2U2XaLr30UtG2bVsxZMgQ2dgI28nW7Mov8Y2T8LopxGEQYT8k2GpMMP5W4AsZiDEaqikxPvroo0Xt2rVFy5YtZYUC7nPcy+XLlxfdunWTfw/cLIX2qyGWH1ngIKZEKqYZFgWYEEIISZgAx2HqI6fz/oap8ZXfpc/5yC/O1WOPPSbKlCkj/vKXv8gPiGg8NXHiRPHwww+LBg0aiAoVKshAoyo8hmltduzYkXenZz/kNynS64cMx0GCnUyPhL8rL7/8sujdu7do1aqVuOyyy+SXO3g/HHHEEVKEe/ToYRDhIEuh45IFDqIZFgWYEEIIoQCHovlV2LO/YS19LrbjM7os4wN+yYdCLU455RSZ5UW2F9kwXEsnDa/ymds3KPGNo/SaA1NHma8pgxGFwN8eL8ugKcCEEEJIjAU4Ds2vqp9+Gj8UMiIXZ1U/Xfz6668FR7ECjH34358+ZBQY33+wWxx88MGiYoVjxBV1LhLdOrUXa16cIX7+eC/Pj8eBe9fLMmgKMCGEEEIBDm3zK2R75Qf5H99nMCIVuG+LEeBixZjvm+Lit8/eED9/9BrPRUDvHS+bYVGACSGEkIQLcNjLn+UH+R/2MxiRCq8E2KkQp9837zEYkQsKMCGEEEIKEmC/y5+9mvpIfpD//h0GI1LhlwDbybB833z3DoMRucC963YzLAowIYQQQgH2vPzZreZXOJb/+fYtBiNSgfv2n//8Z8FRrAzzfcOI8nvHyzmBKcCEEEJIggW40PJnP5pfGQT4m30MRqSiWAEuVorT75s3GIzIBQWYEEIIIUULcBSbXxkE+Ou9DEakwgsBzkeI5fvmq70MRuRCCbBXZdAUYEIIIYQCHOq5f+UH+S9fYzAiFX4IcDYZ5vuGEeX3TjYBLjYLTAEmhBBCEirAUSh/VgL831/sYjAiFbhvf/vtt4KjWBlOv292MhiRCwowIYQQQnwRYL/Ln53IrybAn29nMCIVxQpwsVIs3zefbWcwIhd6AfaiDJoCTAghhMRcgKNc/qwJ8KdbGYxIhRcCnI8M833DiPJ7J5cAF5MFpgATQgghCRTgsJY/m+VXE+BPNjMYkQqvBTiXEKffN5sYjMgFBZgQQgghnguwVTlZGMqf33vvPflh5b8+3sBgRCr8FmCzCPN9w4jye8f8f00hAmwnwRRgQgghJMYCHPXyZ02AP1zHYEQqcN/+/vvveYWbIsz3DSPK7x0nAlxoFpgCTAghhFCAQyHAVuXPmgB/sIbBiFQUIsBuinH6fbOawYhcUIAJIYQQ4qoAR6n8WRPg91cyGJEKNwW4ECGW75sDKxiMyIWVABdaBk0BJoQQQhIkwHEof1YC/J8HljMYkQovBdiJDMv3zf5XGYzIBe5d/J9SiAA7kWAKMCGEEEIBDm35sybA7y1jMCIVuG//+OOPvMJNEeb7hhHl945TAS4kC0wBJoQQQhIkwG5Nf+RX+bMmwO++zGBEKgoRYLekWBPgd5aEMr7fMVusmdpb/LZ3ofbYljkDxQerJ4R2nxn+BQWYEEIIIZ4JsFvjf70qf9YE+O2XAo0PVo0Xzz58p/zXbpkNM/rKZb7YMNXTfZnc5zHDOfl511zx1AO3it0Lh2iP/WvfIvm7eV+cHAfDnXBLgAsV4vQ98mLRcdUlNbJGn8db5r3ONVN6yf37YNU47bFqVY6T96Z52X/tWyjveWzrqDKHy9dh2bZ3XCfee3WMK8foR9xx/ZUFnaskhhLgQsugc02HRAEmhBBCKMChLX9WAvwfby0KNFZPeU7uB/61W+bSmmfKZZ59+A5P92VS7/aGczJr0BPy9yYNL9Uee3/lWMv9dXIcDHfCKwF2KsNuvW+wnppnnSLuuaW+ZYx77uGC30+4T9Vj1apUynjv/LRzlrjq4hpy2RuvukgMe+YBef93bt1UVD62vDj8sEML2n4QgePD+eJ7w9k9l48A55sFpgATQgghFOC8sr9OBdgsv04FWC+/mgC/uSDQWD25Z1ocU/9aPb938Qvy+asuPldUrVxR/Pn6XM/2ZVLvRwzn5KcdM0Sfjn8Xby4Zqj32/orRlvub6zgY7oUfApxNhuU9sm9+0SG/1HnodlfWpWL1pB5pAV4+SntMCrBuO3/unSMa1jlfZn1XTHg2Yx3/3D1LNLumjlzPSyO7uLp/XoQU4Jv/Fvr9DENQgAkhhBDiigBHcfyvJsBvzA00Vk98Ni2OqX+tnm97+zWixhkniTdfSovwvMGPu7ZtrBPb/XjlaPn7pOcfynlO3n91hOX+5jqObOvDa3bP6591uX/umiE2z+gl1k/tKX7aNjXnerE+rPfrDRMc78P22X3En3tmBX5P5Aqc5z///DNreCnCbr1vpAC3u83Rsrj2uE5W97D+3lH3oX5ZKcC67aj7fNaAx2y3h/vg0vPPEKefVNlwT2D9n68da3m/YV/Mj2NZbA/bn9anvfh+82TLvwHqcdyDeA3WZ/eewPN4jdqvtABflde5x/towBMtZODc2i2H/cJ+Y/8XDu0kt4nHrN7neI9iGSyLY8ZyeEx/fOZrqvZhxfiulu9LtW94z+f7t8XunrMTYDemQ6IAE0IIIQkV4CiUP2sC/PrsQGP1xG4l4tgt47mftk6WWaphXVrJ3+vWOktcW7em5XrSH/JvzXj8niZXiasuOsfw2JsvDhIXnnuq+rAm4/6m9cW47m0yzgl+n/R8O20b+tfon8t2HFbx/ivD5H7p1wXZ2Dz9uYxlp/V5WJQrW0ZbDj9juzg2hH5ZvB7rUcsecvDB4h9tmspzg/3XL/v5mjGiYZ3zDPuAZdZP6W55bkd1bS0qli+bsU2/w4kAeynHbr1v0gJ8q6Nlnd7f6j7E/WX3WryPapx+Ys5tLhv9dDoLPKKz5ftBH9gH832B5Q7/66HyfoVM41+E+f6SMt7/UbmMWn/n+5rI1+JvgHlbd994hWH/pQA7vCe/Xj9Ou+fPOqWK9p5uUv+ijG3hXGJ/sR81z6omf8Z2sa/mewB/U7A+PI5lKlcsJ98reO+a/y5gOzfWq60ti3XjZ+yXfh/Ue3beoI7aGG037jn9/zFujwOmABNCCCEUYNcF2K3yZyXA/753ZqCxasIzcj/wr/m5UV3vlx8+v1o3Rv4+8bkH5bIHlg3NWLZalWNFtwebZTx+T5N68sO5+v27jeNF1eMqpD6sniCWj+0i/tg9Texd0E/cUK+WfNx8TvA7toufN03rIWb2S48THtyphdznz1aPynkc5sA+YH+xD+smP2vYB3zQ3bd4oLbsS8OflOu9+8a68rh/3TFFPqaOAcenlsXzeD1EYtvM57V9xu9YHttUy2Kb56Y+fGMdap8/WjFcigDWoY5LndtzSz7U45ro9y+IKFaAixXi9D0yo+jAetL3bO5lS+9v4+Pp+/ts7Xd1Hx5YNsT2tfhS5PF7bsi5zT92T5XLdr6vsWGfJz7XNmNZ7EP6Xkz/jvtObQfrwWO/7pgsml19iRTDH7dMMKwT9yder/Yb/+Lxsd1bG7aD1+FvQv/H7zIcn37b2aJR3fPlfbxt5nPaY/g7gHtevw5sH481vLRG6v06TjsfPR+5Tb5efw/gcbyPEPsWD9Aen9nvEe2Lq/R7LP34fU3/Js+Bfh/2LugrH9NfF1wz7AMex7r056yYey5fAc4nC0wBJoQQQijAno//LbT8ef/+/ekPcXumBRqrxndJf0BM/Wt+7sJzThHNr62j/f7r9onyA2Xn+27KWLbaCfiQ3zTj8XsaX5kWhJLfez/aXH6APrBssGG5P3ZNSUle1YxzIj/w92yj/Y7XWe1vtuMwR8+Hb019sD1MfLR8qOU+NLv6Yu2xmmeeLOrWqp6xjm0z02OOcXzqsTa3NZAfzr/bMMawLH7H4zhH6jEc0yEH/0W889LAjH2oetwx4h8PNDGcW5yzfYv6BX6/qGvipgDnK8PyHnltWtEhM+6pc4v70yrkfVeyrLy/2zbNWId2f5f8vmpc+j48sHSw5WvxOJ4f9Y9WjvYRr5X3mG6f9fulQgqwbjm8b3Hvmpf7bv0YeS/p14F1Xnre6RnLNrr8/PS9r3ts7LMlX4qtGWW7j3axaWq6PPylYY9nPIf16s/bP1o3kX9rsL/mZW+48gLDPbBwcAf5+7YZPTOWxXmXfxdS1wW/f/TqUPm+w/bMy+JvU8XyR2W8dmqvB12539S5pgATQgghpCgB9mr8r5fTHxkEePeUQGPVuKdLPiA+bXh82/Tulo8/1LyhlLk/dkwyPJ7+kH9LxvrvaXyFuOrCs7TfG11+nvwAa7Uvw55qkXFO5Af+Hg9ovx94eaDlftkdh1VA7O++4XLL5/p3uEMcdcRh8uevVqfHG8/s085y2boXVJfHpz8Hj7e4znJZPC4FuOT35o0ula+3Whb71vDScw3rtTtnQQTOyb/+9a+s4aUMu/W+wXogibiGVrFw8KN539/qPsR9avXa79aNks/3bn+7o33Ee63NrfVt3w8qsA/6exGvw32E/TEH9ufRuxoZ1jn4ybsz1jl3QLop3Tsv9jdsB/eu+b2v37Zd4L0FqbV67sdN6e7uU59vq723mjW82HJZLKO/B3Asp594nOWy+xb0MfxdUMeEa2s+LzgH+muHa4bff9063tX3TjYBLnYcMAWYEEIISaAAR2X8rybAuyYFGqvGdk5/QEz9q3+8za1/k5mebm1uNkTzRpfI5ef2f9iwfLUTKsrnzeu/56a6aUHQLdf53huy7ov+MfmBv/v92u8Hlgyw3F+747AKu31FLByULrH+aNkgsWtmj6zrxLEh9Ps6tmsry2WHdf673K76HefkINN4Zn2Yz5l+O0GHEwF2U44tBXjXxKID6+nWpomjZdP3TOaypfd3+vdVYzulJWpJf9vXVq54tLj7+stybvOjZekvewY/cadhn9PvB+OyUoDlPVK6XLYwL2u1zl+3jJEZ0c73Xi9/xzGl3w+dMs6Nfn12gXOQfg/YX4/+HZpr63yoeQPL5ZaPetJwD5ivgT7M+4zjzHVu1LXD/qIaw417TX+M5v9r3BwHTAEmhBBCKMChHP8L+VUC/G87xwcaq8akP0ziX/XYDxtGyCzouadVEfUuPDMj8AEe/+rXk/6Q3zhj/S1vutywbM3qJ4k2za6y3Jc5/dplnJP0h/NW2u/7X+qbsb92x2EXp59YSXRs0cjyubFd75Hr+WXzKG1bS4d3sFy2wSXnyONTv0MWera7xXLZTimJwDnSvxaBbVjFp68OMpxb/XaCjmIFuFAZ1gvwv+0YX3RIAX6gsaNl5f1tsax2f5f8vmp0+j7c/2Jf29fiNXh/fbtmqGFdU3u2luW5S4d1kL/jNVjX2wt7G/Z54rOtMvYD+yDvkZLfcS92ue9Gx+fBap2Ijn9vJCXw961j5f6cWe14y3Oj37Zd9H6kmSh3VBnL535Yn662wDnA73hvNGtwoeWyQzvdZbgH1D5aLbt1aklvgNR1we8zeqUb7X254oWc+4vjle9ZF+41/bkuRICdZoEpwIQQQggFOLTjfzUB3j420Fg1+vGSD4iPa4+NfOpu+UH802UDLF8z9pmW8jX75vbQHsMH4zZN62Us26xBbVGv9pna7y1vvEx+qPx9y+iMZfGc+ZykP5zfq/2+f3HvjP21Ow67uOu6S6Xc2z0nP+SX/F6x3FHiodvqZyz35YpBMkOOfVaPNbrsXHHh2dUs14vtyQ/TJb93/Ps18osEq/NgDikYuu0EHW4KcCEy7Nb7Ji3ANzlaFtfP6ho0uORsw/2t7kPcp/rrp98O3jd4fzW/5uKM9eFeK3fUEeLFwY9IScb7R/88HrPaZ3ypo9+/JlddIC6tcarj86B/j+kD+yq/BBr6qNxG/0dvLfj+XDsunR1fPqJjxnPYvpT9Bc/L33GMeH99uKRfxrI4Lv09gH2zWy/Op/7vAtaH3/E3LNf+Yh/071m37jkKMCGEEEICF+Agxv9qArxtdKCxalTH9AfE1L/qsZrVT0x/8LZ5zQ9rh8gP4h3vvlp7DB+A8cH902X9tMf2zOwqP8TWq11de2zjhPSH4C6trjesc+mQ9lIKzOdEfjjvdo/2+y8bhsnlerZtkvM47GLt2CfS5ZaPNjM8/uKgh+S6hz55h/YYtoPHZjzf2nD8N9Q9Tz4uP/jrjsF8bL9vHikevaOBXFZ+mC55fN+c7vKxbq1vNOwDzl+jOueKqT1aaY9pghHwvaK/Jv/2b/+WNbyU4fQ9MqrokAIsz3/uZXGv415eO/Zx7TFcI6wjfX+nH1s1Kt2Qaf+i57XHqp1QIWM7I59KZzFxH709v4f2+C8bhsrl8dyZ1Sqn7oe+htc1a1BLfimD95Z6rGfbdKa45Y11tMewn3gMz/2+eUTGdvE+1J+Hid1a2h47jg/7kv5SrG/G89hf/bazBdZVucLRhv3HvuCYml9zkfbYl8v7y8curXGKdn6+XTVI3Nekrvw7Y74HsBzWi/OvzuOgjrfJx9J/FzoYziEe3zmti/YY1l235ulyPeoxXDMcmxv3mv5c5xLgYsYBU4AJIYSQmAtwLvkNQoCdTH9kEOCtIwKNVSMfS39ATP2L3zeOT8vh0iEPZ31dm6ZXpD6gHil+Wf+C/H3/op7yQyXivsaXi+ZXXyg/qDaqc07qQ+8Zhtd2a31DuvNs6sMm1nND3RpyWfxsPifyw3nXFobHWt5QR34Yb3DxWWLtmI6G48D2qx1fwTIGdbhVWwd+Nu+DzMql9vv3TcO05fBzs/q10g2TqleVx4LjxnL4Gfui37eebW9Kzyl84rHyeWwXr3v0jvryZ/2yQ5+4XduHjnc3lOcN+w/Z+HblAG05vM68nSDDiQC7Jca2ArxlRNEhBfj+Gxwt++2KAeLcU0+Qr7nw7JPFmSdX1q6LvL9Llls1In0f7l/YU3sMy1ltZ8Zz92mCVrVSeblefLGE3yHbuO/Mr8F6sSzuVdw3WDf2q0m989P3iG5Z3F9YDttQ9yJ+792uScZ5kO8xm2PHfmIZq/1Rx3dQljG18r4vWfbTl3vL/VbvJwR+xnv5h9WDDOvdOO4Jeazqfa2WU+8b/bL69eL9ifOHa7T0hfQ8wLguallsB+dD7UPdmqfJ84793De7q7Ycrpl+39265/B/iRfjgCnAhBBCCAU41ON/lQD/a/OwQOO9Bd1F1/uvl//i91eHPix6tLlR/LZhSNbXvTHrGcPr1LraN/+bqFcLYnipeG3602LliPZiwjN/z3g9tnPXtRfLZdvdWk+8Pbeb2DGls1ynfjn8jsf1j2Hfxjx9p9zW2tGPGY4jW2Bf9OvBa+9rfJncB+zL/L4P2B7v4gFtRce7Gsht4mc8po7TvKw6DhwX9vPntYPl7/gwbbUslsO6IDFDH79NLq9fZtBjzbLum99RjAAXKsRmAXbjOHA/6O/fXIHrgnsZ1xLX9Zvl/eTr9fcnHsN69ddww7jHbbeD5eb0uk+7R3Gt8b7ZMvFJcWRKyvBeNL8G2xjRqblcfsqzLeU68H5EWL2/sU61bqv9wP5+sqRX1vc6zvnLg9tZPo/jwzrsAs+b379YF/YJx4e/BdnOOZ7HeVfrwc929wCWwfNYP7aDvytY1vze1/+tw37M6HlvxvsO58q87268d9wSYCsJpgATQgghFGDPBLjY8mdNgDcNYUQ0Tqt6rGh/+1WOlr2r0UXi8vNPjcVxuynA+cqwJsCbXoh9zO97v7jvpjrik5d6BrofHe9MVy/4uU0c88rhD4sfVvbNeK797fUM+/PzmgFy2Q8Wdc9Ydkq3tCx/sfT5UFzTYgTYSRaYAkwIIYRQgPOWX98FeOMgRoijyz0Y+3mIeHXIg4bH+z2cHne5ZXwH7bHF/dNTrOA5/bIrh7WT6xjasWkszolXAuxUhOX7ZsMghg/x86p+sqS4V9sbfd3uJ4u7y2ERDS6qLt6e1UU+9tvaAWJG9xbyvTSo/c3asni85hlVxJknHyc2jH5Uexw/V65QVjT92/mhOZ8UYEIIIYSEToD9Gv+rCfD6AYwQx29r+om7rqmdHi+Y+pBd74LTxGlVK8rfB7VvkrF811bXyHGWWAbLXnjWiXLZ2xtcINcVh3PitQDnEmFs/8/1/Rk+xPTu6Y7wHy/q6vu2N49tn5LaSvJ6lzvqcLkfiHZNLxf/XNPXsCz2r8GFZ5SMnz5EBn5udMmZ4vtXnw/N+bQTYLfGAVOACSGEkBgLcBQbYOnlVwnwn+v6MiIQG0Y9JHq1uU50vKOe/Hf35A62y743u7Po2+4GuWyP+xuJFUPaxOpc4L7993//97zCTRHm+8a/+PzFbuL9eU8Hug+bxz4iJj9zh5jfq2XOfXlrxpNi+rN3yXh92uOhfO+o/1MowIQQQghxVYDDPv5XE+C1vRmMSEUhAuyGEBsEeE0vBiNy4bYAmyWYAkwIIYRQgMMvwKufZzAiFW4IcKEyrAnw6ucYjMhFsQKcKwtMASaEEEIowKFtgKUE+I9VPRiMSIXbApyvCPN9w4jye4cCTAghhJDQCLCf4381AV75LIMRqcB9+x//8R95hZsiLN83K7oxGJGLfAS4kHHAFGBCCCGEAhx+AV7+DIMRqShEgAuR4awCzOvAiOh7R/9/i9uNsCjAhBBCSEwFOMjxv64L8KtdGIxIRbECnK8MWwowrwMjou8dCjAhhBBCIiXAbo3/VQL8+ytPMRiRCjcF2KkImwX492WdGYzIhRcCrJdgCjAhhBBCAQ6/AC99ksGIVOC+/c///E/H4ZYIGwSY14ER0fdOsQKcLQtMASaEEEIowL50gM4lv1kF+OXHGYxIRb4CXIgQZxNhvm8YUX7vUIAJIYQQEgoB9rsBlibASzowGJGKYgXYqQhnFeAljzEYkYt8BTjfccAUYEIIIYQCHHoB/u3F9gxGpMItAS5UhPm+YUT5vWP+P8bNRlgUYEIIIYQCHNoO0AcOHEh/kF/8MIMRqXBbgJ2IcIYA8zowIvreoQATQgghpCgBjmoDLE2AF7VjMCIVuG//67/+K2e4LcIGAV70IIMRuaAAE0IIISTRAnzcMWXVBxYGIzJRuUJZRwJcjBBnk+DKlSryOjAiGcdWrEgBJoQQQki0Bdgsv/kIsD7ef/99Q3zwwQcZ8eGHH2bERx99ZIiPP/44Iz755BNDfPrpp4b47LPPLOPzzz83xBdffJERX375pSG++uorQ3z99deW8c033xji22+/NcR3332XEd9//31G/PDDD4b48ccfZeA+UT//9NNPGfHzzz8b4pdffsmIX3/9NSP++c9/GuK3336zjN9//90Qf/zxh2X8+eefGfGvf/0rI/Tz4OrDqkmUnTw6lc9C5NYLEc63OZbdObI6n1bn3e4ama+l3TU33xvme8fqHkOY70XzvaruY3OY733ze8PqPYQwv9f070O8b+zes+b3tvm9b/X3wfw3xO5vjflvkvlvltXfNfPfPvPfRqu/oQjz31rz32Krv9fmv+nmv/l2X46a/w+x+n/GzU7QFGBCCCGEAmwpv14KsNMGWG4IsBP5LVSAnchv0AJsJ7+5BNiJ/FoJsFlw7ATYLEz5CHCx8msnwGEQXzdFOB8BdirBbguwUwk2349OJTiXANtJcFIE2E6CKcCEEEIISZwAh6EDdJIFuBD5DaMA+5H9zSZ1Qcjvf//3fzsON0XYCwlOahY4lwDjX7cE2KkEFyLAhWaBCxHgQiWYAkwIIYQQCrDPAmz+IOmmAJs/AIdRgKNU/uxF6bMb4puP9BYjw15LcBBZ4KiVQVOA/RdgN8cBU4AJIYQQCnAoBbgQ+XVLgKM6/tctAbYqN41a9tcP+XVDegsRYTck2IsssFVZu5dl0FZf3FCAKcAUYEIIIYQC7NscwG53gKYAezf+Nx8BDkv5czHZXzfl1yvxzUeEmQUObhxwNgGOSyOsXALs5jhgCjAhhBBCKMAJF2Cz/IZRgAstf3ba/Cro7G+x8vs///M/OaNYEfZTgjkOOFkC7GcjLAowIYQQQlwV4CjPARzmKZCi3gDLbQEutPtzGLO/hcivE+EtRoa9kmC3s8CFlkFbfaFCAeZUSBRgQgghhFCA2QHaVQGOavlzMdnffOXXS/HNR4bdkOCoZ4HDMA5YCXBUxgFTgCnAhBBCCAXYRwHmFEgU4CDLn/PJ/oZBft2WYDezwIU2w4rCOOA4C7CfjbDCLsAICjAhhBBCAaYA+zQFUpgaYIVBgMOU/c1Hfr0UXyci7FcWOOgyaAowBdiLuYApwIQQQggFONECnNQO0MUIcFTKn73I/uYrsP/7v/+bEX5LsJdZ4GLKoKM6DrhYAS5mHHCSp0KiABNCCCHENQEO2xzAFGDvBFj3AVCLI488Uh5DGAS40PJnP7K/hQpvrghCguM8DthLAda/bw4//HD5d8fPRlgUYAowIYQQQmIowLnkN84CnEt+3cgAN2zYUBx33HHyg+AhhxwiWrRoEZoGWH5nf92S30LEN18RdiLBbmeB3SqDjosAlytXThPgxo0bJ3ouYAowIYQQQijAnAM4Eg2wpk6dKk466ST5QbBMmTJi7dq1sRv/62b21w/5dSLBUcoCezkO2HyfejkO2Pz+w5dHhx56qIwVK1ZQgCnAhBBCCKEAx1+Ao94BGuupWLGi/CB42mmnRb4Blpvlz/lmf3MJ7f/93/9lhNcSHKUy6Kg1wurSpYs4+uijRZX/z955QFttZOma1d0z0z3u1+62sbHBZBswGDBgwOSMyQaTTc4555xzzjnneMnpkqPBOWDAOds4u8PMvJm3pt7dhUtdR1dZJR2dc/5/rX8dSSWV8r36tKu2smdXnggLAGwNwKoyQQOAIQiCIAgADABOwU8g9e/fn/3mN79hs2fPVpoAyw8Aq+r/G2/4NYJeM6uAYKdRYFXNoFMZgDdv3swBavTo0SkHwEFmggYAQxAEQRAEAAYAK0uAZQTAdC1Q82eR/CqKCbDCbv7sF37dgK8fCA47Chzk94DDzAStAoDpbxgljaO/L8kOwGF+CgkADEEQBEFQ5AHYCfwmCwDnz58/U+ZkOLouWLCgkuRXYcCvHQh7jQJHoRl0gQIFcD0mkJ944gkAMARBEARBAOBEAWAv8OsUgOl4/u///i+cIKbz5RaAnTZ/dgu/RtvnBoJVR4HDBGA6DypeDETNdDyScb/ofAGAIQiCIAgCAAOA+QOTyky/cLCWAVh182en8OsE1OMRBbYDYJX9gOk8OPnGMRwNA4AhCIIgCAIAA4A1ADZqMgtH00EBsNPor5totcoocDz7AZsBMK7HxLpvAMAQBEEQBAGAAcCZmtTC0bYdAHvt/6safs0g2GkUOBEA2CjCDEfTAGAIgiAIggDAAGANgM36RsLRs1sAVtX82QhujeQFgoP8HFKQAKxfLixTdmb62xbmOml99DckXvvs1wBgCIIgCIIAwABgDYCNPv3ixStXrszkU6dO8bJly5bxz5/QcLNmzbRlzp49q2z9Zk5LS+PbYlZO27V27Vpl69i5cye/HkXddE2p2peoALCVVABwUP2AVQKw/lNIKpwrV65M3rJlC//bVbRoUf6ZoaNHj7I2bdpoy3Tu3DmQbZFN61uxYkWm6U2bNs20vTRNlNN2N2jQgGXPnp27RIkSbP/+/Zn2OejtjxIAm0EwABiCIAiCIABwSABs9n1bt6a66CFZ9okTJ3hZ6dKl2blz5/gwPfCKZeThoEzbMXLkSNPyiRMn8u+fpqenK1kHAcnq1asdrdvLMVYNwG6bPztRKgCw/ju/KlyxYkXD6fQt3+XLl/Phw4cPsxdffFEro3soiG2RTesT69dvL/3NNFqG/gbSttG9IKZdvHiRf5Jo27Zt2rSgjqXseAKw0ygwABiCIAiCIABwSAD8448/KrFVXRcuXGBff/01H6aHYvo9dOgQe/jhh/kv7TtN+/bbb/n4unXrtGnk69ev8+2lafRL06i+rVu3cou6hencrlq1iv9S9HnEiBGG20XrowfyxYsX84d8uUys02zcbB00H20nTR84cCCvlyLhqo5x2ADsJvqbSgD8ww8/KHeFChVMy/r27ct/6f5o3bo1/1tAv/Tyhn5feuklXr5x40ZWo0YNbmrZIJYfP348O3nyJOvatStf9u7du3warbNRo0bszJkz2rz0t7JFixa8bPr06bx+usaNtpfmNdreTp06scmTJ2eaTttJ0WAxHtSxlA0AhiAIgiAIAAwA1gD4+++/V2KruuhBmc4HDRMA06/88H7kyBEe5aL5mjdvzoYMGcKefPJJdvr0aW3eJk2a8PJr167xughce/bsyf3000/zY0XzLl26lOXNm5fXUa9ePQ4Cw4cPN9yuHTt28Lpp3bRddJxEGU0/ePCg4Tg1d5bXUaVKFW0dNB9tAzVdffbZZ9lTTz3F+vTpo+wYA4CjAcDfffedctP1bVaWM2dO/nvgwAHWqlUr9sUXX/BheolEv3TPb9++nZUtW5bduHGD3yPPPPMM2717t1a3GKdlBwwYwDp27MjvG2r9QPcT/d2iMhpesmQJL5s7dy6/T2ncaHtpPUbbS/cH/T01KqP7lVqE0HBQx1I2ABiCIAiCIAAwAFgDYIoEqTDVRQ/msilaRWXly5fn54yG6UFeLCMPL1iwgHXp0kUbp2hV5cqV+TDV1bhxY62sQ4cOvGmlGJ8xYwYbNGgQ7yNJ0SU6P6KsVq1abNiwYYbbTGViG/v378/rEWW0Turbqx+ndRB00PUkyurWrautg+ajiDIN06/Zur0eYwBwNACYXpqo9r/927/xe0I29fmlMhqmX+pD27JlS20ZMZ1M1zPdN2KcXvCIeekepBc4ooyu4U8//VQbp/tn9uzZvEUF3WvydlG9dC3rt5fqfPTRR2O2t3fv3rzsgQce4DBttJ+0HO0HDQd1LGUDgCEIgiAIAgADgDUApubDKkx17du3L8Z0PKmMHngpKkXD9JAslpGHKdEOzUcP7MKinIYXLVqkzfv444/z5DpivmrVqvFfihhTHfJ20XJDhw7NtL10fRAEiG2lh/9ChQpp5VQfTdePU+TKah3ytpqt288xjncSLDfwm8wA/NVXXyk3XVdmZXQv0O/evXv5NaafTs6TJw8rVaoUr0e4W7duWt3UPF/MS7Atz0deuHAhN7VskNdNTflputH2ynXKLlKkCDt+/Lg2TlFm+jtJwwTN9DebhoM6lrIBwBAEQRAEAYABwBoA02dVVNiqrnLlyvG+fzRMD+xiujxMfQanTZvG5xOm80xl1B+RHsDFvPRwTSAgz0vnhPoalyxZMmbds2bN4g/0+m2iafXr1+e/wgTAFOUV66R1iPnF+JUrVyzXIW+rgAmVxxhZoKMBwBTdVG26T8zK6F6h3z179vBrTD+dXLNmTR5ZNaubug+IcYJQ+rugn48SVNWpUydmWqVKlXgLDbs6ZU+aNInfX3K9VM+cOXNi9jOoYyk7Cz6DBEEQBEEQABgALACYmvSqsFVd9MB79epVPvzYY49p0wlkKTMsDVOfWcoqS9tG4xQxosQ7NEz9gufPn68tR801RZNnsSz1g6Th/Pnz80gtDdMxLliwYMy8ZFoHRcvoGpCn0/ooIRANU5NosZyoZ9euXYbrKFasmDavvK0UAaZm3SqPcVS+A6wHYSffAFb1HWCjfQ8bgKn5sGpTn3GzMrpv6JeuQbrG5OnU55z+JlFZ4cKF+YsaSrxG1zL1CxZ108sbsRxlJ3/uuef4fDQ/ZWqnF0n0t4LqGDt2LC+j65rulXnz5hlur1ynbKqH+t9TH/nNmzfzbaP7hPoT04sqMR8dS6pbNm2PyuMKAIYgCIIgCAAMANYAmL5Vq8L0IG5W1rBhQx4pomF60BbTKUJKkSj6XBCNU1NLgkt6sKaHZcpMS9O7d++uzUOm40t10oM6fVuUklDRuaIy6tNLsErrobJ+/frxbLfy9lAUjfrq6reTjjetl+qn64eGqQ6qi5poU3RYrIO2U6yjV69e2jrkbaU6aB55n/3YDoBV9QN2A8FWtoPfRAZgun9UmxJOmZXR9UW/ly5d4v3fxXTqI0z3DU2ncbq26VqlZtKUTV2um/6+yXVSVJe+y03zU+RYTKfrlrJOUxnNQ32JaT1G26uvUzb9HRLroH7ylCm9du3aMftJ2663vH8qDACGIAiCIAgADADWHgz10+Nt2ifqV+hkXjrudB6NypzWYWerelStw6mDAmAv3wN2C79BN38OG4D19xfszPS3kxJqhblOADAEQRAEQQBgALAGwPr64Og6TAA2g2A7EDZbxmv0N8j+v34AGNdjYt03AGAIgiAIggDACQLAXiHYKQCbrROOnmUA9toPWBUEu7FT+HUS/Y0KAOvvXTi6BgBDEARBEAQABgBrAKzfJji6dgLAqqPAfiHYrM4oNH/2A8BG9zccTQOAIQiCIAgCAFsAsFMIThYA1m83HF2rAuCwINgv/Eax/68AYLMXY1H0yZMn+ferE2mbVRoADEEQBEEQABgArAGwfl+jbPo+aSJtr2rrAdhpM2g3UWArCHYKwlbLW603qObPQQCw2d+OKJqylE+ZMiWhtlml9QDsBX4BwBAEQRAEAYCTBID1xyTKzp49e0Jtr2o7BWC/UWA7CPZqN/Ab1ebPAoD1f1ui7J49e7LJkycn1DarNAAYgiAIgiAAMABYA2Cj4xZVEwDL47t372ZTp05lBw8ezDQvfUt0zpw57KWXXuLfMaVfmk6/8+fP5xbTEsV+ADjeEOwXfp1Gf4MG4F9++YWfB/3fnyi7R48ebNKkSXz4xo0brHfv3qxUqVKsZs2abPv27dp858+f59/spbKOHTvye2jevHm87MCBA6xevXq8rGvXrryeRNn/qAOwF/gFAEMQBEEQADghADjMTyE5BeCXX345Yfzoo49qw23btmVlypRh3bt3ZwUKFGDDhw/n0wlqy5Urx6pWrcq6dOnCH9jz58/PDh06xM6ePcuefPJJ1rp1a748DdO0RNl/IwAOMgqsAoTt6vYDv0E2f7YDYALARHG3bt3YhAkT+PDzzz/Pr/9Tp05xwM2ZMyfbt28fL3v66adZ3759ednMmTN5GS1LZTS8bNkyDsnUpHrw4MEJs/8AYAiCIAiCAMAAYA2Ar1+/njAmAKbfdevWsaeeekqbTg/ljz32GDt8+DCPCFevXl0ro2l/+MMfeASLor61atXSynbt2sUTBCXK/tP5IqALMwrsFYSd1Ge0LaoBWHXzZwHA9KIlUUwR23HjxrH09HR+D8llQ4cOZe3bt2fbtm3j95Rc1qZNG74sDT/44INs1apVCbXfwgBgCIIgCIJSBoCD/BRSogGwEQTT8bx27VrCmB7e6bdfv378wVwuq1+/Pps1axZvwkkP+3JZyZIl2f79+9mRI0d4HQTI9NC/cOHChNp/MwAOE4JVOQj4DROAr169mjCmlhBjx47lkV66F+SypUuX8ntH/Mpl1KqClqXhuXPnsmeffZY9/PDD7IknnmBr1qxJmP1XAcB28AsAhiAIgiAIABxnAHYSBabjefny5YQxwSv9UvNLasIsl1F/Ruqv2KFDB14ulxUqVIjt2bOHD1OT57Vr17Lx48ezfPny8WUSZf8FALuJAruB4LBAWAX8qoz+uun/KwD40qVLCePOnTuz0aNH86bNjzzyCG8xIcooQRaV0/2RO3fumLLatWvzMppG/ezFdLpnypYtmzD7HwYAm32CCQAMQRAEQVDKALDKRFhBAvDFixcTxvTwTr+U/CpHjhy8eTONb926lY9Tc2ZqHp0rVy527NgxXkaff/ntb3/Ll6E+j9TkU9TXqlUr1qdPn4TZfysAdhsFjhcEBwW/YUV/BQBfuHAhYdypUyc2atQoPty8eXMOths3buRRYbpv0tLSeFndunW5qasANX9+6KGH+LL00ojuKepHTN0GqPVEgwYNEmb/kxGA9f//6H8iABiCIAiCAMCBAnCYmaCDBGCK7iSKixYtqg3TZ13ooZwS99DvkiVLtLJhw4bxB3ua3qRJEz7Pzp072ZkzZ3jz58cff5zXVbhwYR7ZSpT9lwE4aAhWDcJW6zHbNj/w6zT5lVcAPnfuXMJ4wYIFvMmyGB85ciT/pnbLli15hnQx/fTp07yMplPWaMoETQmvqIxeINH0ihUr8un0gilR9h8ADEEQBEFQUgBwkImwogbAQSXCouNJUJjIFmArfPz4cd6cU56WN29e3gdYjBP06udJBPsBYK8Q7AeE7er1Ar9+Mj/7zf4sA3Ci3zdGptYQ8n1BfX7pU2KJvl/xAmD9/xEAMARBEARBAGgXZScAAIAASURBVOAIADBlhk0mU3POPHny8G+fzp49W2vymQz7pgfgMCHYCRS7Wd5sO8Js+uwl+isAmPrTJpunT5/OP3dUvHhxfg/RvZMM+xUVADb76gAAGIIgCIKgyAGwykRYUQNg6jebbKZstxTNateuHRs0aBBvrpkM+2UEwCoh2AsIe3GQ8BtE9NcIgE+cOJG03rRpE9u7d2/S7I8MwFH7BBIAGIIgCIKgSABwkJmg7QA4zG8B0/GkJsNwYpjOF4GcEwD2A8FBgbBX8A0Cfr1GfwUAHz16FE4QA4AhCIIgCAIAxxGAo5QIK2vWrOKhCU4A0/kyAmCvEOwEhP3CsJP6vcBvvKK/5GzZsuF6TCA/+OCDkQVgO/gFAEMQBEEQBABWnAn6448/zuRPPvkkxp9++qmhP/vssxh//vnnMf7iiy8y+csvv4zxV199lclff/11Jn/zzTcxvnv3boy//fZb7goVKrCpU6dq4999912Mv//++xj/8MMPhv7xxx9j/NNPP8X4559/NrQelsygygjAjKKUZkAXNgQ7gWK3ddhtV1Dw6yf6SzY67/rrg6y/hsyuNf01qb9mxbUsW3/9k/X3iNF9pL/X5HuR/r4a3bNk/b2tv/eN/j7o/46Qjf7e6P8m6f9mGf1d0//9I+v/Rhr9HVXR/zdMAHb6DWAAMARBEAQBgCPxLeBkAWAzCPYCwE4g2AsAk+lzL6VKleJ1OAVgMwi2A+AgINgNAKuGYK8g7NVhwa/q6K/ZObeDXzMAtoNfpwDsBH7jDcBGf2tUAbAT+E2WTyABgCEIgiAIAJwS3wIOqh+w2UNp1ADYDoLpXDzwwAPs7NmzMaBgBBTJHAX2C8FBgrDT9QcJv0FEf50AsMrob9gArL+vAcAAYAiCIAiCAMAAYAMA9toM2gsAN2zYkPXv398QFoICYDMINgKnRINglTDsdp1hwm8Q0d+oArBRdwMvAOwEfp0CsB38AoCdwS8AGIIgCIJSFIDxLWD3AJwM/YC3bt3K8ubNy9fnBYCdNoMOIgocVFNoKwj2AsJOoNhPnVbgawa/YTZ9jkf012vzZy/9f6MKwGZdO8Lq/5soGaABwBAEQRCUIgCcLJ9CSrVEWKr6AZNp+7Nnz87S0tJikmFFrRl0kFHgeECwSlttoyr4TfXob9gAHGQCLAAwABiCIAiCAMDIBJ0SibCMILhLly6sVatWmbJBB9UMOhEhOMog7AV8w4bfMJJfRRWAE6H/LwAYAAxBEARBAOAUBeBU6wd89OhR9vDDD/Nj4ReAg4gC+20KHSYEhwnCdtvhNeqrAn6jEv0N+vNHMgDT38Qs0ndz77vvPv43Kujmz8mQACsqn0ACAEMQBEEQANgSgIP8FFKqJcJS1Q/YbTNoqqNQoUJs1apVpt8EjncyrKCiwHYQ7BeEg4BhJ+v0E/V1A7/xjv5GsflztmzZNADu2rUrEmAlWAZoADAEQRAEAYCRCMsCgBO1HzDtP0WnqlSpwps916xZ0/CbwPGIAgeVECueEOwHit3WbbWtXuE3zKbPfqK/UQDg1q1bs3/5l3/hFtHfZO7/CwCGIAiCIAgAHDEARj/gzAC8YcMGdv/99/MHvd/97ne8+fPLL79s2Cw07GRYqqLAYUKwVxBWabvtCwt+3TR9jmr01+vnj8jLly/n91Xt2rUj3f8XCbCM4RcADEEQBEEpBMDoBxwcAHvtBxxUM+hOnTqx3/zmN/xB709/+hM7cOCA4TeBEyEKHCYEOwHhsGFYBfgGCb+pFP2le5H+JlLriuvXr4fS/Dmo/r8AYAiCIAiCkg6AKeIHAE6OfsBuo8A5c+bkAFyxYkW+D0bfBHYLwGFlhFbRHzgMCA4Shp2u28m+xAt+g47+qk5+5RSAyRQFjsr3f6PY/xcADEEQBEEQANghAKMfsH8Apv2m/olTpkwx/ByS02bQiR4F9gvBbkHYDxC7XYeTbc+fP39MxmIYThTnyZMnsP6/AGAIgiAIAgAbAjA9gOBBDIbDdYECBZREfMlU3+LFi7mXLFnCli5dypYtW8ZNEcwVK1Zwr1y5kmcIJ69evZqtWbOGe+3atWzdunVs/fr13NSvfOPGjdybNm1imzdv5t6yZQv31q1b2bZt29j27du5d+zYwXbu3Mm9a9cu7t27d7M9e/Zw7927l3vfvn1s//793GlpadzUZP/gwYOaDx06xA4fPsx95MgRbvq0F/nYsWPcx48f13zixAnukydPcp86dSrG6enpmk+fPq35zJkzjn327FlX88vrkdev3zaxzWIf5P0S+yr2XRwLcWzoOMnHjY6jOKbiGNPxFsdenAs6L+Ic0fmic0cW55LOK51fca7FuafrQFwTdH2Ia4WuG7p+xLVE15W4xuh6E9ceXYd0PdK1SabrlK5XunYBwBAEQRAEhQbAZFp+0aJFmR6g9Q/R+gdp/cO0/oFa/1AtHqz1D9dmD9jiIVt+0DZ72BYP3EYP3eLBW374lh/AxUO40YO4sHjolB/K5Qdz/QO6/iFd/7AuWzwEm1n/0OzWdvUbbZPRtsv7pt9v+ZjIx0o+hvqHdfmBXX5oFw/u8sO7/gFefogXD/LiYV6+VsT1I64no4d6/YO9/HAvP+DrH/L1D/r6h33xwG/00C8e/P2Cr2jyLACY6jcDYPmeNYJfca/K96h8f+rhV9yXAqD8wq/+XrODX6fg6wR6CXD92g0Mm92f+vsvCAjW3z96CJbvGfl+MbtPxLVjBMHyfaG/HwDAEARBEATFFYBl+LUDYPFQIz9Mm8GvPqqkAn7Nok1O4dcIfM3g1wx89fDrFnyDAF0VYOwHhPXHxwiEjSDYDITdQLAMwKohWH7AN4Ng+Z6wA2D5wd8r9Or7+soAHET01+je1L+QMroXVUZ+nUZ9nYCvHl7PnTvn23ZA7BSEzaLBTiBYfz8Z3Uf6+8fo5ZH+flERBZbvCfl+8AvAVvALAIYgCIKgFANgpxCcxaD5pNvor3jwMYooGcGvWdNKowhT0PArP4Q7gV+n4KsKeuUmlF4cFAw7AWE7CLaLBgcBwfK1ZvRgbwfB8rXuJwpM951f8HUKwKqjv06aPhvdg2b3nhn8mjV59gK+TqH3/Pnzju0Fht2AcNAQ7LYptPyySFUUGAAMQRAEQZBnAPYaBc5i0HzSKorktOmzyuhvGPBr1OTZLOrrB3z9wq78AG1kv1CsAoTNosFemkSrgOB4NoU2iwK7BWCzJF8CgPX3bhDRXz9Nn/3Cr5uorxn4OoXdCxcu2NopENuBsJtosF8IdtIf2OqlURBRYAHAdvALAIYgCIIgKBAAdhP9jTf8mjW39AO/bqK+bsDXLfDaQa5XuwViLyDsNRrsBYLt+gOrhGA3TaGdRIGdArAV+BoBcNjRXzdNn1XDrxfwdQO7Fy9ezGQ3UOwHhFVAsJv+wFZNob1Egc3uC/394BSAvfT/BQBDEARBEADYFIBVR3/1ia/8Nn12k/QqKPh1GvW1A1+3wOsm+6xdEh4nQOwGhN1Gg1VCsN+kWE6bQgcVBbYDYCfgKz4BpQdgoz77YUV/7Zo+Bw2/TsDXCei6tRUM24GwKgjW31N+m0KrigIDgCEIgiAIiiQAu4n+Pp4Pn04K0vkzjq9f8HULxUGAsF00OGgINusPrI8CG0W3go4CGwGwE+iVwVcGYCfRXwHAbqK/ThNfyS+inMCvAGCrhFcy/OpfIllFfY3A1wnwXrp0ybXtYNgOhPX3pUoIVtUU2k0UWL439PeF0SeR/ACwHfwCgCEIgiAoBQHYCQRbAbBZ9JeW+X/f3oYDMh1fP59oyZMnN14kBOxHHnnEVxSY6nAKvGbg6xaA/UR/zVpk+Gn6rAp+vYKvEdBevnzZsZ0AsRMQNmsSrX/55AaCjV4oWTWFjlcUmK5dPfyqAmD6nwgAhiAIgqAkB2AvUWDx8Oym7y8H4G9uwgGZjq8R2Dr9JAs/P9/dgQM0HWM/UWA3AGwGvkYAbJf8St9VIR7RX7Omz0HBrxX4GoHtlStXbG0HxHYgbBUNNoJgu0iwm/7AcksKVVFgs24CdsmwjABYVfNnADAEQRAEAYBdAbBV318OWF+/BQdkOr5+vknKz8/dd+EAbQXATqLAdgBsB73Cf/3rX3ldQUZ/5b6/TqO/bpo+G/X79QK/VuBrBr1OYNcLEJvBsBkIu4Vgo+4GfppCq4oCu/0kEgAYgiAIgqC4AbCbzM+0zP98+TockM0A2On3Se9F6N+GA7QAYK9RYCMAdgq9AnyFVQCw18zPfps+m8GvPuGVV/h1A75Xr151bTsYDguCnTSFDiIK7LcZtFcA1v9fAwBDEARBEADYFID1EJzFpP+gVeZnDsBfvAIHZDq+TmHXDID/56s34ABNx9joQd9pFJiWdwO8RuCrB2Cj5s92ya/MsrPbZX72Ev2Vmz7nypkTfckDdM6M4+sEgr1GgfWZ0+2SYZk1g1YFwEbwCwCGIAiCoBQBYLdR4Cy66JGT7/5ywPrsBhyQZQC2+u6ome+9oHgVDtAyAHuJArsFYCPwNQPgIJNfeYn+GjV9zsL7qb8HB2Q6vlZNofUJseIVBdYDsB5+AcAQBEEQBIUGwFbf/aVl/vvTa3BApuNrBLZOv0nKAfjzl+EALQDYTRTYLQBbQa9KALZLfuX0u7920V+56TMH4G9vwQFZALCKKLDZJ8ScJMPyC8B++v8CgCEIgiAIAOwIgK2aP4uHGQ7AH1+GAzIdX6ewawbA//3pS3CA1gOw0cO+VTNoMwB2Cr16AFbR/Nlt9FfO/Owm+qsB8DfvwAGZjq9dQiy7vsAi4ZmXKLD+5ZBZM2gAMARBEARBoQCwDMFy9EgfQTJ6kNYA+MMLcEA2A2Cj744a+d4LiitwgKZjbJX1Vh/xMur76AV29f7ll19iADiezZ+dRn8paVMWZJIPPJO8m4RYbqPAqppBewFgO/gFAEMQBEFQCgKwmyhwFpMEOmbRX3qgoWX+7wdnA3f6tvmaXzu2jv3w1tGY8huHV9vWcfe1Q47mi5Lp+DqFXVMA/ugiHKCNANhNFNgvABP4CvsBYCfJr6w+faT/7q+T6K8AYGR8DzaTvNVnkawyQotzbJYMS2UzaBmAVUd/AcAQBEEQBAA2BWCnya9iAPi904H7j//+B9b+hee4G9WswHLneIQtmtCPl711YgP73W9/y768vs+yjvQt81jlssVD2V5VlgFY/31RJ+YA/MG5yHjd7JGZfG7nYl52YM0M9uNbx/hwiwbVI7XdVhYAbJT11kkyLK8ALIOvGQDrX1wF0fzZT/RXA+DPX4EDMh1ft59FUhEFdpINGgAMQRAEQVCkANgq+hsDwHdOBu7cObLFjH92ZSf785/+yH54/QAfv/vyvkzLfHRhG/vHu8e08fQtc1jlMsUs1/PLW4f5ckZltI4vX9odyv4K0/E1Alujb44aOawXFG6Afly/9jHeu3wyLyuYLye7un85H6YXHIn0kkIGYH20yyoK7BaAjaBXD8BG/X/j2fzZKvpL367lAPzZdTggxwuAraLA+pdCXgDY6fd/AcAQBEEQBAA27QecxSCBjlXzZw2Abx0L3ByApfEfXt3HsmX9C/+Vy9M3zWS1K5XiJtjNnys7e+3QCq2MA7DJOhaN683np3mKPJGbvXV0lVbWq01DDmhlixdi3VvVy7Q9QZmOr1PYNQXgEIHdCdCbldFLDfHCQv/C47Ujq/gLDPHCI2r7JB7yraLAZs2g7QDYDnqNADiI7M/65s92ya/MvvsrR38FAP/3J1fhgEzHl461gGB9Rmi7ZFhemkF76QcsAFj1548AwBAEQRCUggDsNAqcxaD/oFXzZ3qgoWX+690jgfuP//571q5JTc0EqDsWjtLKCZjo99TGGRyMP724hY9vmTeCtahXWSurXKaoYf1nt85mZTLg9ufX9/PxK7sXZADvY3x4w+yhrF7VMuzvbx/U6gxrv40A+OrVq44d1gsKN0BvVkYvHt47vSHmhcY/3jmccf6q8Bca/do3vhcl3rMocvukf9B3kwxLBmA3sCv7559/5nYDwKKJqlX/3yCbPxP8agCMjO+BZpKXAVhlFFj0EZcBWN8M2i8Aq2j+DACGIAiCIACwYwC2iv5qAPzOwcBNUHtqwzTuw6smslE9WrBSRR5n39/Yxctz53iY/1L5C7XLa8vdObmGVS79lFYmhvUe1rUpWzq+d8y08iULs+t7F7J2jWvwdeq3J4z9puPrBniNADgMUHcD9GP7vBhjejFBZfRy4k76upgXGvSyoVOz57TlL+6Yy2pXLBW5fTIDYCdRYFreD/TKlgE4Cv1/9c2fCcJkAD5x4gQyyYeQSV4AsBwFtkuGpboZtF0/YAAwBEEQBEGeAPjGjRu+ANhp8qsYAH57f+DOnf3hTNNa1K3ItswZElN+av1k1u756to8d06uvAe9v5aJYb37tWvA1k7tFzOtdsUS7MrOOax1gyrs+NqJMWU5sj0Yyn7LAHzt2jXXDusFhRugXzttQIzpJQOV0bmhFxbyC41uLerwFxpje7fWLMqitE9ywh+3ybDcArAR+JoBsNH3f932/xVNXq2yP9s1fzaL/goADiOj+trZI2JMGeVF2T/upPNp9GtVB2Wf37l0YsJlkqdjbRUF9tIMWvX3gN0AsNv+vwBgCIIgCAIAG0JwFl3zSbvmzwKA//PNPYE7d/aHYsb/9tpOVqZYAbZ/6aiY8pPrJmYAcFVtvtvHl7PKzxTRysSw3ltmD2JNaj2rjX9yZg2H3J9ubGcLRnWJqZPqCWu/aT16qH3ppZccO6wXFG6A3qzsHgCvjHmh0at1PTaqR3P+8kL44raZkdsnIwB22gzaDoCtgNcJAHvp/2v3+SNVzZ81AH4vPXBT0jw5+VrtyqVZvWrP8rIvr+/lzevp16qO985tvdc8P4TtVWUnAGwVBRZ9vWUA1jeDVtEPmLYzqOgvABiCIAiCUhCAnUSBsxj0H7Rq/qwB8Os7A/fv//VfMuC1sOb8OR9hnZpU18o5AGf8nlwznrVrVEWbfvvoEj6/KLv//9wXUw/50LJRvLz5c+VZ7fLF2dBOz7OCebKzHXMH8el/e2Ubq162KJ+X5mlYrXQGHD8Qyn7T8XUDvEYAHAaouwF6szJ6OUEvLOQXGrsXDmcNq5fR5vng1Er+AiJq+2T0oO+0GbQegN0ArxEAq/r+bxjNn0+e/DUx2u0TgZuDq8G0j85v5cPi9x83j7LPLu9gd2/sZTsXj2VvHVujzf/emU2G9ch+7fBKvpyoT3b65tnswMrJ7Jc3D/F10LqC3m89AKtuBm3UD9jL94ABwBAEQRAERQKArZo/0wMNB5rXtgfu20cWxfibC2tjyj84vpT//vTSJvbF2VXa9L+9vIV9kr5cK9PXQ/7u8npt/us7ZrAdcwZq9cl+ZfcsXk7DHIBD2G8jAL5+/bpjh/WCwg3Qm5XRCwZ6YSG/0CDTC4liBfPwlw/0e2HzlMjtk/67p26aQdPyfqDXCwAbJcDy0v9XNJWVmz/LAGwX/RUA/F+3jgZu3q9cGv/w3CaW45Gs7PtX98aU3zm9ng9T4rWxfdvwzPBb5o+IKTNbR7dW9Vj1ck+zUb1a8UR9K6cO4NP//s4h3sf9hecqsmHdmvO6ixfKx+sLer8FAOujwE6yQQf1PWA/AOyl+TMAGIIgCIKSHIC99gN2CsByIh1a5j9e2ZLUfmXXDFa51JP898er69nycV1ZkxplQlk3HV8noCvOuyEAv7Yt4f3NhTXs9pGF7G8vb47ctgkAtooCBwnAP/30k2YzANa/uPKaAEt1/18NgG8eDty//7d/ZZVLF+UuU7wgzyy/Z8lYrZz3Lc/4vXNqHcuRLSv7+bV9fPzVA0v5/KJMzKf38XVTOfyK8W9e2snn/f7lPWzl5P6sdcNqWhnVQ9tDv0HvtxkAWzWDNusHHGQiLK8AbNS9BwAMQRAEQQBgRxCsf3i2a/6sAfDLG5PeR5cPZ3UrPZ0BwoVY16bV2LcXVoayXhmAxbl143svKDbDAdoJAFs1g/YCwDL0GgGwqgzQRgmwjL7/67b/L8FvDAC/cyBwU5/+OydXaz68agLLn+tRdvPYCl5+L7naAV52L1v8P5eVy8Sw3hP7t2ELx3SPmdaibiV2asNU1qlpLZa2fGxMGUE11Rf0fnsBYCf9gI2+B+w1EZYRAKts/gwAhiAIgiAAsG8AFg/5HLCur4MDsnxO7c6t2UPff7y8AQ7QegB20gzaCwCbQa8ZAHvJAC2SGflNgGXU/5fgS9//99SpU/cA+K19gZsnVtNNG9W9GZs5pENM+Z0TKzIAuIjhslRmVA956sC2bO6IzjHTXqhVjl3cOoP1al2XbZgxIKasYN4cvL6g95uOLx1vq37Abr8HrCoRlnwvBAnA4mUgHhEgCIIgKIkB2Esz6CwGzSetmj9rAPzSajggZ7H4trNjAMaLhMBfUogHfS/NoM0A2Anw2gGwqgzQXhJgiT6mVs2fBQD/55u7A/e9xGr/HP/kzGrep/zY6nEx5bePL/s1W3zmZalMX4/w9d2zeX3fXdvMx984sIDP+9ONbezkugmsZJH8WtmW2QPZ7377W15f0PstA7CTfsAqE2E5AWBxLzgBYK/NnwHAEARBEJTCAGwFwVkMmk86AeB/XF3hynfPLGTn1gx3vZzsa5vGsBNLB/Hhd/dN5XWKsh/OL9aGX98xgX11ar6vdcXTRgBs9NBnZn5+rq2GAzQdY6uHfSsAFg/+XmBX9o8//sjtBYBVZYD2kgBLA+AQkpURcBKQkimLfJHHc7I5wzpkyiQvZ443KpPrERb1LBnTlWeQp+Xp98z6iVodVEbJ82j6wPYNYpK+BZ2kzQyAnXwP2EsiLC+ZoGUAVt38GQAMQRAEQQBgxwBs1fyZHmo4YF1Z5srzB7fgnzV6L22a62WFV49tx8Z0qc+HyxTJy2YPaMaHaZqYTm5b79kMUB7oeT3xtgBgK8i1eij08oKCvGlyF/bZsTmewf2jw7NY35bV+fBbuyez9BVD+PAvl5bxcTGfXJbILyn0AOymGbRXABbQK9srAKvKAO02AZYGwEmQqE2YErVRwjZ52neX17Evzq6MmUbgTNPDSNLmFICNEmF5BWB9JmgAMARBEARBgQOw22bQWUz6D9oC8KXFrlzsiRxsQOvqbEznuq6XfXfPRP67enRbbfm7p+awX84v5MM0Ta63bd2y7MTifpZ1fnRwKq/DbH2ibjfL0XQqd7t/etPxNQNdJ+bn5/JSV35582j+gmJKr+ddLyv87p5JrFKJJ/jw8pFtWPOapTJNJ68e046/pPC6nijYKwDLD/5+oNctAHv9BJKqDNByAiwBwMmeKO3jk0tY/pzZ2JLRndiJVaNYzxa1WLuGlUJL0iaOebwyQasAYD/NnwHAEARBEJTiAGwGwXYArG/+LAD47xcWOPbZFQNZ3fJF2KeHprJ8ObKyn8/O08ra1i3Dji/qYzievrQfn7/S04+zckXzsQGtqrHRnerEzEfT7v/jH7hpHqM6ZX+YNpnXV6ZwblY476OsxwuVtO15bcsoViDXw7weKh/ftR6vi8q+PDqdVStVgJUsmJN7RPvavB4qo+W7Pl+e10fL0XSaX2wLbSOV3dk7wdHxouOrf/Azi44YmQPwxUWu3DPjOMwf1Izlzzje8vS7J2ezr47N1MZ/OD2XfXZ4esx4+tIB7OVNI9m7uydkgO7jMfOJ8jJF8vDyX84tYKtHt+EvKay259q64Xw5Wl6eTsufWzGIl9P4e/smxZRfWj2El9N8Hx2Ywn9FGc17YlG/mGXE/tF65P20swBgr82gzQDYDnb9ALDKTyDZZYAWSZf0CbBiADgFkqV9fGIhmzGgJc8iv3xsJ/bXl9aFlqSNjrUXANZnghbXQBDfAhYAHET0FwAMQRAEQQBgtQB8fq5jt61Tmu2d2YUPN6v+tDYsyo4v7JVp/Oczs1nuRx5gF1cN4NPvHp+WAZGPsNEda2dajqaJ6UZ1ym5Y8Sm2amSrmHkXDW7Kh4s9nl1b7vtTMzOA9wleTuMd65dlk7vX15br2qhcBujm58PrxryozUeeP6AJG9CyqlZ/3XKFeX1Oj5cAYDfQqwfgv1+Y79jfp8/mLxrot3HV4uzAnO5a2apRrdnoTs9p48cX9c6A19J8+OausXw5Gm9WowT3vZcC/5zv6roh/KUAvaCgsg/TJvE6RR1G20IvGmqVKaS9VLi4ehAv+/TQlIxzlINvIy1PpmuEyn4+O5evn5bt07yy9rKCtpHK6WUGLUtl9Lttckdt/xpWKsqn0bDTY6YCgL3Art4//PCDIQCr/Aawl08gWWWA1gAYydQCTdJmBMBuM0HHE4CNWrcAgCEIgiAIMgRgN82gaXk3/X81AD47y5G/PDSR5XjofvbWluHs5vaRbNWIFhlA+KRW3rbOM+z4/B6Zxq+uHpABTPli6prfvzEb3aFWpuVomphuVKds2hZ5nNZTq0xB9uHesRyA5bJN417kddFw7kf+wr4/MU0ro/0R29ewYhE2oEVlvm/k6T3ra2W0/IFZXRwfLzIdX/3DoJwp1c78/Jyb49irRrbMAPYqfPjA7K78JYFcxl8u/Dp+fEHPe8ckY5heZvCXB7+Wze7b6N5LAd18N3eM1qaLOkWZ3tN7NWB9mlXSxi+uHJABso/x4R5NKvByUbZpfFt+Xmh43ZjWrHGVYlrZjfVD2O9++xu+7re2juDn9ufTs3jZl4cna8vRthDE3z021dUxo2NslPXWaT9gPwBM0CtbJQCbfQNY5SeQBAAjmVqwSdr0AKzPBO3nW8BOAFj/LWCz+8Br9Neu+TMAGIIgCIKSGICvX7/uOQrsFYD/dmaGI8/r34gVyPUQq1Q8n+YH77+P3dw2nJe3ea4UOza/mza/GD+7pBerVbpATF3LhjbNAN2amZajaWK6UZ2ysz3wf2LGr68dyKqVfJzd3jmSg5ZctnNyO14XDefKAKafTk3Tyt7cPJTvCw3XebYQG9y6Kls5ornmtJmdbLfFzOKh0Axw33jjDUPHAPDZmY5dpnAutmhQE3Z8fnduOj93do3iZasy9oWOrZiXytvWKcWH770UmKqVfXlowq/gHzvfze0jtOmiTlGmN70coWXlafTSguomiKW6xPSf06ffA9mM4a4Ny7KdU9pn2i+an9ZXrmge7TohU52irE/Tiq6OFzlMANYDbyIBsFECLA2AXSYeo8zu6yd08pW87OCCfmz12A58+OjiAVpyNqpbTtRGSdrk8URM0uYEgO0+hWQGwPpvAccDgO2ivwBgCIIgCAIAWwKw0+bP9FDDATh9qiMXyPkQu719eMy0yV1rs8GtKvPhLg3KsJXDmmpl5Z7KzY7N7cK+O0qR4z+xD3aN1MrqPFuQjW5fnQ+3qV2Sz0fD03rUYcPbVNXmk8v0Lv1kTnZhWS9tfHynmtqytK3X1/SLWR/VRcONKxeJ2U7ajkrF82r7Q/WIsk/2jmavrh9guy1mzqL7NqYZ8JqZn5/T0x35yirqZ/1gBqiX1FzuqQxYbF+Dl68c3kwbJh+e04XPQ8O03DeHJ2plH+wede+lQMbwsXldtflubh2mTRd1ijK9G1QozNchTyNYpfXQuXt1wyBtOk2jFxM03KNxObZ2VIuY5QrnycbXTdMbV36Kb5Ps745NzrR/Ti0DsJdm0FYAbAe8QQMwQY4XAHb6DWANgF0mHpvRtwlP1HZiyQDPycsoAduYzvX4cNfGFdniYa0yTSdTkjaalshJ2mQANvoWcDwAWP4WsBUAq2j+DACGIAiCoBQDYKfNoD0D8KlJtj42pxOrVfrxTNM/2DmM5cj6J/bT8QnsyopeLFe2P2cAcSXWoHwhli/7A3w5mm/lsCZ8fPiLVVidsgV4+eh21XhZm9oltPmur+7D6xjQvIJWViz/IxnQlUdzs6pFedmFpT0yQDcrn5fmK1kgO/vm4Bhedmp+F15PqxrFWbWS+fj6aB4qu7llUAZQPcwaVyrMt4WmU71URstTPR3qluLbSusW2yZvp1PT8bUC3DfffNPQMQCcPsWRuzQonXGcX4iZdnv7MH4cfjqRcQ7nduYvJUTZgOYV7x2TjGHa31m962ll/3wpMIUvJ+aj+orlf1Sbj9YnyvReNKARP/5iPG1Gh4xzkZ8P00sG2l55fbSdYj4C5G8OjdPGeRPorUP4+unlhiij341jWmjbcu+lyhRXVgHAbkHXLQDL928kAfjSIlcunO9RtnxEa9a8RsmY6Z8dmc5+ODNXG//q+Ex299Rsbfy9/ZPYtqmd2bt7JvAEbPcyxv9zPvqd0acx69uiKp+XyihJG81rti20vn2zu7OjC/uwX84viCn76OAUvr7Xt47m89H26ZdLXzZA2zZ5WZq+a0bXmGXE/un308qqAFifCTpeAOyl+TMAGIIgCIKSHIC9RoE9A/CJCbb+Yu8IbqOy21sHsZ+OjuPDH+wYwraObcFubhrAPtk9jH13aIw2H03bOaEV//0mbZRWn34+Gj+7sIs2TPPLpvWJeamevZNfZKfmddK2QS6j6VTHskGN2IBm5bUympfWQfW9uqYPq1YiX0wZLUfbSsvK2yVvpxPrAdgMeM3Mz8/Jibb+5sBo/iLiu8NjM5UR5O+d8iIfblb1qQy4fIzVeuZx/gKgTe2n+fRPdg/n4E/HgTyte21WqVgeXnZsTkdtPjK9TKAXAx/sHMpWDm3M7vvDv3J4lX12YVf207HxGWBdktdL20AvHV5d25fXQWVUJ70UofJ5ferdA+Bf1zG6XVU+Xu6pXKxL/Wf4783NA3kZnUt68dGqRjH+S9tA0+mXlnNyvGQDgP0DsJtM8pTVnZKfUcZ16rNNGeXNMslTpvhVo17kwzundeGZ3Ye0qcGTnVEiNpFJXsxH81Dmd0qERhnbRZ2iDr1Ftvg+zauwjg2e5cuJ7aHM9ZSYjdZHSdnuJWy7l0n+5q5xfDnKPE+m6feSuC1g36fP4dvWqvYzfFma7+q6odq2UF20jbRup5nkkxmAnUR/AcAQBEEQBAC2BWAn/X8FAP/1+Nikc+/GZdikztXZ1/uHs3c29mWlC+VgJ+d24GUrhzRibWoVZ+9vH8g+3zuUdahTgs3tXSeQ7RAArAfbt956y5HvnZ9xSn1ry4CM/R7musyrP941JOMc9DMs+/bgKG0eAmSzcoLhr/eP0KbTMNUpT/NqAcBe+wGrAODvv/+eO2EB2EUmeUq4dmBONy3r+/ReDU2zvlO5yPROgHlz5+iYekTGeHk++tVnkpezxcumDOVyJvvZfZ/Xsr4TuN7YMFQro8RtIkN8q1ql2LJhLbSye0ncHuDDlEyO5hVltD88qduv20Lb7eZ4JTIAq2r+DACGIAiCoBQEYCfNoD0D8NFRSeev9w5hg5uXY5WK5mI1S+VjG0Y0jimf26s2q/Z0Hl4+rUt19uOhEYFsBx1fK+h9++23DR0DwMdGJ50PTX+RFc79ENszsQU7OrMNa1CuAJvUqRove3/bAF62dlgjXja8dQXWrErhwLYlHgAsgFfvLAnaB9hpVvRP08bzxGyU2Z1MmdYpsZ5Z1nfKCE/zUNZ5SoQm17VzSgctY7yYj4bpV59JXpTpTa0X5HFaDyVoo+28l5Ttn2W0XSKTPPWZv3t0ilZGWeXF/DTPvc+uPcPdqlaJmLK90zu5ziSfqH2AVcEv/U8EAEMQBEFQkgOwlyiwGwAWDzUcsA4PhwMyHV8nwGtmfn6OjEhKX1jQgXVvUJJ1eK44Wzu0YUzZO+t6ssHNn2VtahZls7rXYN/uHxLYdtAxlh/4rZpBewVgM+ANGoDDygLtNOHYtB71eBI1OVEb9ekWydJoXE6cNrhVFZ7c7PaOEaxkgRwxdW0c20pLeka/NJ9RsjeqU5TpTTD+08mp2jglZqP+55RUjWeZl+bdO62DlvCN+sHTNslJ40QStw71SrPxnWrFJGmjTPhiW2jcbZK2RMwCrTL6CwCGIAiCIACwKQC76f8rAPiXQ0PggCwA2Axw33nnHUPLAPzLoaFwgHYDwEYP/zIAOwXdRARgq+8AO004di+T/LCYaVvHt+KZ2UUyNErORsOUuI1AUyR2o2UpIZsooyRtIukZ/Yr5KClaj+fLavVTkjZ9cjhhSv42r1+DmERy97LAT2G1Sj/Blg1urCVbo/WJhG+0PjmJG61PJHGT94d8fU1ftnPSi9q2iH1wk6QtEb8DrCr5FQAYgiAIglIIgN1CsGcAPjAIDsh0fJ1Cr5H5+Tk4GA7QKgDYL/h+99133GEAsIBgMwCWm0E7BuCTk2ydNq0dT4amn/7TsQk8gRtllP9k9wieFI0StZFp/pVDm/D5Xl3bjyc9o4RoVDa+Y417meQzyuhXzPfFvlG8nJK10ThB54P3/3umRG1URuujpG80P/VBp3lpe6iMtofKaF6xPg7Av25zl/qleRktt3ZEU61O8uCWlfh0SiRHCeNo28W28EzyDo6XsBEA0znxAsB0DXgBYLk7gCoAdhP9BQBDEARBEABYLQCn9YcDsgzAMtjevHnTke+dnwFwgI4XAAvolW0EwPJ97AaA5X7AAoDlKLAbAJabQQsAFhBM2/zXE+OV+tbWgezHo2MNy97Z1N+0zKs/3j2Ufb53eMy093cMZt8eGq2NbxnbnHVvVFqb/+u0kTHbVLrQYzHLUznth99tMwNgOjdWAEznNl4ArLr5MwAYgiAIglIYgK2SYQkAdpoASwDwz/v6wAGZjq8V+L777ruGjgHg/X3hAC0DsJdEWE4A2Ah2/QCwuIdVALCAYDMA1vcDJhCTo8AcgI+NSTpP6lSd1SnzBEub0pptGdOUJ2Z7aUV3XnZoehs+TtOpvFqJvGzl4IaBbAcdX3HM3QIwnWM9ANO14AWA9feAGwD22/wZAAxBEARBKQTAbqLAngF4b084IMsAbAe9Rr53fnrBAVo1ADuFXa8ALEeB3QCwvh+wgCO3AKxvBp0lSTPJk9OmtGS9ny/NM8q/tLxrTNnlJZ3Z8FYVeDnNF9Q2CAA2ygAdFADr7wU3ABxE9BcADEEQBEFJDMAvvfRS+AC8uxsckOn4WkV8b926ZegYAN7dHQ7QKgDYD/QKf/vtt54BWECwGwDWN4MmcNJ/CskxACPje6CZ5J0CsDiHdt8AdgLAcncAPwCsIvoLAIYgCIKgFAdgMwj2AsB/uf9P4sECDsAPPfiAK/DVm5bHcQzW999/f1wAmIBXby8ALEeBBQDrvwXsBIC9fgs4CzLJB55J3uobwHoANkqAZQfA4prRA7DV58AE/LoFYC/RXwAwBEEQBCU5AHuNArsFYP3Ds3iANooiGT1IGz1MGz1QGzWtNIouGT1gy9lm09PT2enTp7nPnDnDffbsWXbu3Dnu8+fPc1+4cIFdvHiR+9KlS9yXL19mV65c4b569Sr3tWvXMh1v+diKBzZ6iBN9215//XXuN954g/vNN9/k1n/qSG72bAa+t2/ftrRRRFj0D9Z/LolM2yC2R2yf2F65b57+QVR+2BTHg46NOE7iuNExFMdTHF861uK4i/NA50ScH3G+6NyJ8yjOq/6h3iiyZdS80+gh3+hB3+hh3yjiZdTv0c23gJ0CsBHw2gGwnAnaLJGdHoD1maC9ArDTTNAcgA8MhAOyDMBeM0DrAZiuCT0Ay/eCGwCma1UAsNfoLwAYgiAIggDAnqLARgBslQE6agAsw6/+UysEUAKAZfjVAzABmR6ACdz0AEyApwdg/XEV8GsHwASeMgCb9fk1A987d+7E2AyE9Qmy9BAstsMOgMWDqf7BUw/AAoJlABYQLAOwgGAZgPUQLM6fDMFyYh+zh/soArC+CagegJ2ArhMA1n8KSb6X4/EpJLNEWByA0/rBAdkIgO36/zr9BrAegJ18A9gIgOmadQLATuEXAAxBEARBAGBHUeCoArD8QG3UtzDRANgo+qsHYCfwqwdfKxC2gmAjANZDcCICsPyAb/eQH28A9gO9RgCs6lvAQWWClqPAWXgm+d5wQKbjG2YCLDsAlltABAXA+v999DcJAAxBEARBSQ7AXppBRwGA5YfpRAFgu+bPTgDYSdNnM/B97733YmwGwk6aQtsBsJdm0FEHYLOmnskEwKo+heQkE7TbRFgcgPf0hAOyGQAb9f9VkQDLyzeAjQBYVfIrADAEQRAEpTAA20Fw2AAsN6WMNwDHq/+vl+ivGfiagbDXKHC8+gFHAYDNHvijCMB3797V7BSAg8oE7aUf8MN//nckUwsykd6f77Ps/xtUAiw3GaCdALDX6C8AGIIgCIJSCIDdRoFTEYDjnQDLKQCbwe/7779vaCsIjjoAGyXCigIAm0W9wgZgGXj1prpWrFgRSCZofT9gOwB2+zkkOVGd2T1q1DrD7t406pqgvzfl+1O+R91YXt5JiwmjbgPy/WLWYsLuPtHfI26bPzvp/6siAZYVAKtMfiWOLwAYgiAIggDAmR4MgwRg+QE6XgAcpQzQTps/W0V/Bex+8MEHMZYh2EkU2G0z6Khkgk4FALaCXacA7DYTtJNEWKr7AZvdp1ZdFMzuT6f98426KOhB2A6G9fPqwdcN/Bp1GXDaUkJ/j5hlSvfT/1efAMtL/1+zDNB2AOw3+gsAhiAIgqAUBmCrKDAAWC0Aq+j/KwOwUfRXQO+HH37ILUOwURRYFQAbJcKKwqeQEhWA3YKuUwBWkQladTNos2zQTltqqExSJ9+nehA2gmEj65cxasarKmGc2+iv3eeP/DZ/VpUASwbgIKK/AGAIgiAISiEAdhMFTiYAjsI3gMMCYAG/MgQDgFMLgL/55htuGYDDSoQVdjNoL02hzSDYKQg7sRX4qoJfJ9Fflc2fvfT/1SfActL82QqA/Xz6CAAMQRAEQQBgyygwABgADACOPgAL2DWyGwC26wesbwZt1A/YTzNofTIst1Fgu6bQZhDsBoTNoNio3Aza9HBmBL92TZ/9Rn/dZn+2a/6suv+vAOCgor8AYAiCIAhKMQB2GgXOgibQaAINAI4UAFvBrhUAu02EFWYzaK99gf1kbDe7X40+B2cEYXbAawZrXuHXrulzFKK/bpo/6wFY9E0XAEzXqxEAq4r+AoAhCIIgCABsCMEAYCTBMgLgKGSBTvYkWG5B1wqAly9fHql+wE6jwCqaQruFYCsQtgJip4BmBGNe4FdV9DdMADb6/q9ZAiy/AGyV/AoADEEQBEFJDsD6Jn9uosBZ8BkkfAYJn0FKSAD++uuvufUArOJ7wG6aQcsQLJrWOkmG5SQKbNcU2ioplhEEm4GwFQxb2QzIzOBXbJ/VfeGk6bOX6K9V8iu/zZ+99P81AmCV0V8AMARBEAQBgA0f+lIRgO0yzEYFgK0g2MxOor9RBGC5iWeUAFh++I83AAvg1dsPAEc5CmzUFNoKgvX3rRsQNusiYmcrCDOL+rqFXydNn6MQ/Q0TgJ1EfwHAEARBEJTkAOw1Chw2AJs9PMcbgFV+ZkVFP2CzKLAZCOvLjaK/Kvr/WgGwm+y2Vg/68QJgOfIVTwA2A10nAGz3PWCVzaCtkmF56QtslxBLFQQb/a00Aiqn1tclr8foZZAX+DVr+uwl+muV/EoGYKsWEH6//0vXKV2zMgCrjv4CgCEIgiAIAGwIwVEAYLsIUrICsNsosBEIm4Gvl+iv2wRYfj7vEm8Alh/84wXAbkHXCoCXLVvmKgqs/xySXTZo+V62epHlJQps1hTaqvuCEwj2AsJ2fzet5rcCX1Xw66Tps+rob1DNn70CsJvzAgCGIAiCoCQHYC9R4KgCsNEDtF0UKcoA7KQZtFMINgJhfbkV/Hpp/hx1AJYf+I0e+gHA4TSD9hMFdtsf2AsEyyCsh2E7ILYCXhl6zcDXD/z6afpsFv2VAdiqC0BQzZ9lAA4i+kvnBAAMQRAEQQDgTBBsBMBGD8pm0aJ4A3AUvwXspB+wvhm0XVNoMxA2A18nTZ+tmj9HMQO0UcQrigAsZ8I1yoYbNACrbAZtlQzLSxTYSUIsFRDsFoSNYNiJ9XXo12EGvn7h1y76a3Yf2EV/rZJfyQAsEqc5zf6s//wRXateANhN9BcADEEQBEEpAsBuo8BuAZic87Ec4sECDsD58+XLBMF6ELYyLY/jGKyzZ89uCAJhAPBXX30VYwHAfqLAZi+2VEWB3TaF9gLBVtFgKxA2gmE3NoNeGXzNor5BwK/Tps/xjv7S9UrXblDRXwAwBEEQBAGADd+eewFgWub/fXcHDsh0fEWTZTcgLMpxfsI5R2EAsB52jawCgN00g/YTBfaaFdoMgs1ac5hFg2UQNoNhr7YD36Dg167ps+rob7wA2G30FwAMQRAEQSkEwG4g2DMA370FB2Q6vmZ9gp0Y5yecc6QCgJ0ArhMAXrp0qeNm0F6TYTmNAqtoCu0Xgp2AsB6G3UCx0XL6up2Ar1P49dsHPojor1XyK7vsz3YArCL6CwCGIAiCoCQGYDfJXMwA2OwB2RSAv3kHDsgyAMsQbAfDohznJ5xz5BeAVcCvHoCjGgW2awqtEoKNosF6EDaCYTMgNrPR8vp1yOsPCn7tmj6bJb5ymvk5qOivGwD2Ev0FAEMQBEFQigGwUwjWA7CTTNC0zP989SYckOn46hNjGYGwmXF+wjlHXjNA073mB4C//PLLGDsFYKtkWHIUWEC92yiw/F3goD5tZtYn2A8IWwGxnc3qcgu+KuHXTdNnFdFfN8mvnACwqugvABiCIAiCkhyAvUaBPQPwl68p8e0rB9m6hZM0n09bH1O+a/Uc9vXbZ5Wtz60/fz2dffzKcT5Mv/vWzw98nTIAG4GwnVWeH9j8HIUBwHrYNTLVtWTJkphm0DIEiyaofqPAZt8FdpIQy64ptFVSLDMIdhoNNmsabQXDbi3XKUOvGfiaRX3N+vwG+e3rsKO/AoDpenUCwF6jvwBgCIIgCEpBAHYCwZ4B+PNXlHjd/Ins+eeq8l9ymxfqsfo1K7G/fXCVl3dv15TdvpSmbH1eto9Mw5cPbmCj+3cJfJ10fPWfSHICw6Jc5fmBzc+RCgB2ArhuAdhNM2iVUWCvWaHdQLCTJtFOQVgPw07B2GwZfd36ddtFfZ3Cr5d+v04SX/mN/srXuT76a9T82QiAVUZ/AcAQBEEQlAIA7CUKTMsbPRxb9QPmgPXZdSVeN388Gz+oW8w0GhfTjm5ZxH66dZ4PH9w4n/Xu2JzNnzhYm0bet3YO69+lFS8/v281LyPT8KZFk3jZS0c2avN//cZJNnlYTz79jfTt2nSan6ZNH9WHL3/74j7WufXz3FT2+avH+K+Yn+qm7dm2bKqy40EWACzbDIaNrPL8wObnyCkA6/v/CgBWAb96ALZKhhVWFNiuKXSQEGwGwk5g2A6KrWDXDnqdgq8q+FXR9FlF9Ncs+ZW4Vu0A2E/0l/4fAoAhCIIgKAUB2A6CvQLwf39yVYnXzRvLxg3sEjPtx5tnWMH8uflwlWdLsvcv72OX0tawzq0asbfP7GCLJg/hw1S+bPpw9mKT57TpOR59mM9PzvrAn9m2pZPZtUPrWZGC+dhXrx1jf71znpUtUYTtWzOLT69Qujivm+an6a+f3MrLqP7PXj7Mhvdqx031p+9cyto3q8/X27tDMza6X0c+vXubJnzdqo4JHd8333yTWw/CVjAsylWeH9j8HHlNgKUCgL/44gvNVNfixYuVR4HdJsRykxVaJQTbRYPtQNgJFDuBXTfg6wR+xX5bNXt2Cr92TZ9VRn+dJL+i65SuVxmAVUd/AcAQBEEQlCIA7DYK7BmAP7qkxOvmjGbjBnTOND33Y4/w3yrPlmDvX9zDh7994zhL376Yu0iBfHxaqaKF2F9vndGWq1+9PJ+f/ELdqtr00X078OV2LZ8as75rBzPAumUD9vKR9Xxd9PvjO6dito9Mw7R8+2Z12X+8f15bP5nGv3r1iLJjQsf3jTfe0CDYCoaNrPL8wObnKAwAlkHXzHYArDIKrKIptFVSLLvEWG5B2A6G7YDYqfV16tfpBny9wq+bfr9Omj4LADa7zt1Ef2UAFtFfGYCdwK/b6C8AGIIgCIJSGICtIFgGYKf9gDlgfXheidfNGZkBpB1jpr19ajOrUvZpPky/71/YyY5smMNqVCjF5yVzQM4oL1W0YMyy7ZvW4fOTaVhMp2XSty/k61s1Y5g2neZ7rnIZPrxr2eQMGK7PypYozAZ3a6ltH5mGaXmq86tXDmrbF4QFAAvrQdjOKs8PbH6OvPb/1QOwE8h1CsBhRYHdNIVWBcFuo8F2IGwEw35sVL8d+Dpt8uwFfu36/fpJfKUi+kvXrBkA+4FfADAEQRAEpSAAu4kCewXg//v+GSVeO2sEG9e/gzb+5Y39rMIzT7ETm+bw8cpli7P3zm9j7V94jt04tIpP++jSTpY7xyN8+IU6ldmR9TP58A9vHmHZHnqAzy+WEfXSOtK3zmcX9yxhjWpV0KavnD6UTRrUmc9/df9ybXr+3Dm07Vs6ZRAfpuVFncWffJx9dnUPH37rZMZxmTda2THRA7CR9dArl6k8P069d8VkfqyMpp/dschRHb+8c5wtmtCf1av2LKteviQb1bstvx7C3hen58gvAPsFX+EHH3xQPOjDcEKZrl0/TZ+tor8AYAiCIAhKYgC+cuWK5yiwZwB+L12J184algGz2TjoVij1FPeRdTO0cg7A57ZmAPFsVuqpAhkAWpub5qPyz67s4sM0H4FT7cql+fxkmk/UM65fe5a+ZS4fHtW7DQesFvWr8WV+eOMQN03r3roBh+phPVrxeT+6uJ0VKZCHLZ08gC8v6jy7Y4G2PbT+t06sV3ZM6Pi+/vrrmu1gWG+V58ep6fj+7re/ZRd3L840nc6x3fJ0/Ok40vG/cXAlP56zRvbg18a76ZtC3x8n5ygqACz7888/9+TPPvvMkT/99FNX/uSTTyz98ccfO/JHH31k6A8//NDUH3zwgaXff/99Q7/33nua6TyJ4Tt37pj69u3bhr5161Ymv/vuu6Y2+7a3VdZ3s1wAsq1ajBj9DZH//gi/9tprhn711Vcz+ZVXXomx/lNHQSW+AgBDEARBUAoDsNMosABgN/2AOWDdPhG6f3gtjX10fmvMtC+v7Wa/vHlIG69XtSz77PIO27ru3tibqS4yTaM6nWwPrfe9M5vYP24eVbqfdHzpwdLoIdQMiuWyeJyfcX3bsn4dmrCC+XLyYytPXztziDb+7qkMsB3RjVs+TzRf91b1M9W7cc5wVr18CT5M85/dNo+9dnglmzSwA1s5dVDMuQ/TMgC77f9L95oqAPYKvG6gN4rgGwb8ygAcBvyaAbAb+HULwGHArxUAB5H4CgAMQRAEQSkCwF6jwF4B+L9uHY2ED6+ZwsqXKsIWjuvF+rV/nrVrUjMy2+bVAoD1tgJiPQCHvc1j+7Zha2cMZnNHdWcvPFcx03QavrhzHivyRG62dGJfPl/BfI+xN4+u4mU0fPPkGsO6s2X9C/vy6g52atNMVrxQPtaoRjleZ682Dfm5//s7h+JyjsIGYD+w6wV6gwDfoOHXDnzdwK8AYFXw6yX6GyT8GgGwG/g1AmDV8Osl8RUAGIIgCIJSGICdRIFpeTk5jpNm0Bywbh6KjD+9sIntWDiSnd0yM1Lb5dV0fOlh0urB08rxOD9j+7Rma6cP5MONajzLlk7onWl6+ZKF2cXtc7Rl9iwZw1rUq6Tts1ndlUsX5ef21MZpLHeOh9nf30rTymhdacvHx+UceW3+bAXAKiDXD/B6gd4ogG8Q8GsHwMkGv8kW/QUAQxAEQVAKALCXKLBnAH47DQ7IAoD1dgXAIW/z2N6t2Nqp/fnwlxc3sfy5HmXX98yPmZ4t659jlvn++g4+Hw3nyPYg+/TcBsO6C+bNwe6cXMVOrZ/C2j1fPaZs6fiefB3xOEd+AVg17IYNvarBNx5Nnq3glyDXDIABv69GPvpL/w8BwBAEQRCUggBsB8EyADttBs0B6619cECm4ys/RBo9bFo5HudnbK+WGaDbTxs/u2kqK14oLxvU8XltOoHslxc3aPO8un8BK1OsAB/u1uI5NnVg20z1Xtw6gy9Hw6fWTWa1K5TItN6Fo7vG5Rx5af6sGoC9wK5f6E0k8PUS9ZX7/BoBcDzgN0r9fqOe+ErALwAYgiAIglIEgN02hfYKwP/5xu64+Pqu2WzNlD4xPrNhsrL6f7q+jV3YPC1u+0fO8uu3MY0iKkZgrJ8ej/Mztmdzfi7kaRP7tmJ//Pffa9NHdmvKujWvxf726g5+nBtWK80WjOrCyz44uYJHgZeM7cbLadrJtRP4NPoV41TfoeVj+PgbaQt4+e1jy+Jyjrz2//UKwH5gVwX0hg2+8YBfPezqATjR4dcpAPuF36CaPruJ/gKAIQiCICiFANhNU2gjALZrBs0B6/UdcfHYns1YpybV2JrJvTSfWT9RWf23jy5mlZ8pHLf9IwsA1tsOiGMAOORtpvNwaNmomGl/e2Ura9eoijadxge2b8BKFs7HihXMzab2b53p2NP8ObI9wF27fHF2YfNkrfzkmnGsdf2KrG+buqx8iYK8Hv06wzxHXps/WwGwCsiNB/S6BV8/Ud+w4FcPwEHDL5o+q43+AoAhCIIgKMUB2CwKLADYTT9gDlivbouLx/ZoytZM6plp+hv75rBXds/Sxt85MJ9d3z6dD59ZN571fbEOmzmoDfvu0jo+jcpofqqP/M35NbyM5imYJzvbMXtA3PaRji89GFo1IbRyPM9PkD65eixr17ByJLaFjrFV9NcKgOleo+WDgF0VwBtP8A0y6usWfmUABvz6z/ocRtNnADAEQRAEpTAAO40C6wHYSTNoWuY/XtkcF4/p3oQN7diAnVg1SvOtQ/PZ63tnsVrli2nztWtYiZ3fMJ4dXT6CNaxair2yazrbPL0Pq1vpaa2e6mWKsMubJ7Hl47qwJjVKsx+vruPzFCuQiy8br30UAKy3GwCO17YHaTrXdF6jsC12AGzV/Fk1AKsCXjfQGzb4xgN+BQADfhMr8RUAGIIgCIIAwJYQ7AaABQRzwLqxIS4e060xK//0E6xdg4qad8/pz8uqlynM3j8yn/14eTUr81R+Pq1hlZLs6LJh7NbBOdyVSxVin59awuvZv2CgVi/VSb9innjtH5mOr3gYNHpotHM8z0+Q/vrMMvb67umR2BY9ABtFf4MAYJWw6wV6owq+fpNduQXgqMGv26RXydj0GQAMQRAEQSkEwJcvX/YMwbT8ihUrXDWD5oB1fW1cPKZbI7Z6fGfDMpo+pU9Ttn5SV7ZgWBs+rXLJgmxoh7p8OeGvTy/mvydWDNOWpfno99aBmdpwvCwDsBfH8/ykip0AsFnzZzsADgJyw4ZeJ+CrorlzEFFfuc+vEQDHE34Tsd9v2ImvAMAQBEEQBABWAsByM2ha5h/XVsXFY7o2ZKvHdTQs++HCUlbmqXysboVi7Kv0BXxa35Y12K5ZvbV5zq0ZodVzYtkQbXqlkgX470dH5rDyxR+P2/6R6fgaPQS6AeB4bn8qWACw288fyQAcBuj6gd54gW9U4NcIgFMNfhMx8RUAGIIgCIJSBIC9QrARANs1g+aAdXVFXDymSwNWIPcjHFiFp/RuopX3bFaVta1fThv/7NgcPk/zms9kgHFRNqJjXa2eE0sHafNxAP51uNazRVjXJpXjto9mAOzUXs8PHbu3dk/2vN3vHZjOjzMNp68YwhYPf5EP3z2zkO2a1VObj8qWj2obt+Or6hx5bf4cFgB7AV6v0JsI4OsEfvXRXhmAg4Zf9PtV1/QZAAxBEARBKQ7AVgmx9ADspBk0B6zLSxPK7+2fyr46MSchtpWOr9lnqxwDsMt1nlgygD14/x9Zz6ZVPG/3u3smsUolnuDDB+f1YSM61Mk0nbx6TDvWtt6zCXcN6c+Rl+zPQQKwV+D1A71hg2+Y8CsA2Ax8kwF+k7XpM5n+JwKAIQiCICjJAdhLFFgAsJtm0BywLi6CAzIdX6vM3Xb2cn6a1yjJDs7txXI/8gD74fRcbfpbO8axa+uGa+MfHZjC0pcOiCmf0qMhWz6iNXt96+gM0H08Zj76ndGnMSuQKxtbPboNu3tyNv9tW7es6bZ8dWwmmz+oGa9XXjf53d0T+HQqp7q2TelsuBzNR/tD81DZL+cWsE0TO7IxneqyfbO6Z9q/lzeNzLQuu3Pktfkz3Wt+AdgP7IYFvU7ANwpRX6vPHJkBsBX4RhF+o9rvN8joLwAYgiAIglIEgN1CMC2/fPlyV82gaZm/X5gPB2QZgL3Y7fn5MG0SK5z3UT7c9fnybNWo1loZDY/u9Jw2fnxR7wx4Lc2HL64exPLlyMrmD3yBze7XmFV6+nFueb6bu8ayPs0rc7Cmer48Oo3XKerQ+9NDUzJg+WE2uUcDtmx4S1ayYE62c1pnXnZj4zBeD61r0ZDmrFqpAnycyqhe2ocR7Wvx5WqVKcS3jdZP5c1qlGA9XqjI1924anFtn2i8XNG8rG75wnyb3Zwjr82f3QCwCtBVAbzxBN94wS+BrhEARxl+0fT5n/ALAIYgCIIgALAhBMsA7DQbNAesc3PggJzF5NNWrgDYxfomd6/HTcMXVw5gZQrn1spWjWzJRnesrY0fX9CTta3zDB+uVuoJdmRed61s59SOGQCcP9N8N3eM1qaLOkWZ3iPa1WSz+zbSxu/sHpsBuX/hw61qlWSbxrfVys4u66uVzR/QhA1oWUUr+/LwZHbfH/6Nr5v2ibZVlP18ehZf7vuTM/i2yPvr5hx5bf4sAFgl3EYBeqMEvl6bPOv7++oBON7wmwj9fo0A2A38+k18BQCGIAiCoBQEYDdRYCsANosCc8A6OxMOyFksvu3sxG7PD8Fg14Zl2egONbnv+8O/shvrBvGyVSOa82li3uPzu2fAayltObme709MzQDdfJnmu7l9hDZd1CnK9K5VpgBfVr99n6aNYwVyPcTr0pfRL9V3YFbnmDJaJ81P66P5aFz4/j/+QSsb0rqqp3Mk4Neq+bNR9DcoAFYBu4kAvkFGffXJrmQABvwmTtNnADAEQRAEJTkAX7p0yXMUWACwVTNoPQTTMn87Pd2Xd05uyz7ZN8bTsn2aVuC/V1b1YxeW9+HDN7cO0+r7YPcodmphd9/bGC+7falhBMBO15U2sxOrVDwfWzm8meYejctxUzmND25VRZt/77QOrM1zJflw4TzZ+LEWZW9uGsLrouFj87pq89G5EdNFnaJM71Y1n+brEOM/nZzKcjx0P/+tVbqAdr7Jt3eMYLkywJaGaRvn9WsUUxeV0bq3TniRdckAfKP10baMbl/D0zlyGv3VAzDda34BWDXsBgW9YYKviibPegsABvwmVtNnADAEQRAEpSAAO40C6wHYSTNoDljpUzz79vZhLNsDf8wAj+qelq9UPC//XTsyA9iGvcCHqa5jczvzYfptU7uEr22Mp8U5NTuvdnZzfhpXLsLSZnSImfbNoXEZ0Pkn/vvq+v4cdL9IG8N+OjGJ1Xm2oHZs6Zh3qFuKT//u6ATWqkZx7dzI5+CTvaNYrmx/5vXROJ0zWu/NrUNiTHXsndqWlX4yJ18fzTu+U01erzjf5Z7KzeujcppO9VLZ9TV9Wb7sD/Dtpe2hbbv/j7/n9dK8NB/NI66/wa0qa9vi5TrUA7DT5FduATgo0PUKvUGBbxhRX7fwKwA4WeA36v1+VTZ9BgBDEARBUAoAsFcIpuWXLVvmqhk0B6yTEz17dLuqbOPoZqxY/kfYT8fGa9PTprXVhj/YOZRdWd6TD9M8i/o3YD0alWFnF3ZllYrl4dPf3NCfm+ZrUL4QG9yyIh8+NqdjBnw9bbjubw6MZrN61mF9mjzLTs3rbDj9+qreMdtCwzR97YgXeP20bTT9k93D2fiO1bVlRD1U7+GZ7bXtd2v5nHqx0/Pz3eGx/LjJ50CY9ov2gYYnd6nJzxUd93l96vFxcV5o36ms3FO52N4pL/JxKqN9F/OJ+grneZjd3jaYH0+qS29xvFYObczrLFkgewZgl+THVNRD14FYH10LHICl80T1UDnVQcM3Nw/kZTQvLSPWReNiGZrXyznykvxKBuAw4FYF8LqB3rDBV3WTZ31/XzsABvxGN/oLAIYgCIKgFABgL02hZQDWN4M2iwLTMn89Ps6TfzwyhpV44lH+279pObZnUiutrFKx3Nrw0Vnt2ai2Vfhwm1rF+fBLK3qw3o3LcpCi6SuHNOJ+Z2M/Ps+sHs/xYVqWxo3WT+tYO7wxr6tBuYLs0Iy2fHqdsk+wub3r8uld6pdixfI9wqfT9tV8Jj+fvmHUCxy4qP5vD45ipQvlYGlTX2QXFnfl9b6xrg9ff46sf+L79v72QZ6OER3fixcvZrIbAPZ6fqJsOrZkMf7K6l78HNDw53uH8XMkX2d0Hug8BbEtRgDsJPmVuM/CBuCoQW9Y4OsHfu0AOIrwi36//7T4W4hHBAiCIAhKMQC2iwILAHYTBeaAdWy0J++Z2IINblGOD7+yqgerU+ZxrYwD8K/DR2e2YaPaVGKf7xmcAaPZtOk/Hh6pja8c3JCbhmleWkYs26ZWsUzrPjO/A+vw3NPa+K3N/VjNUvn4r7wd3x4YzgrnfogP03QqF2Vd6pXk9e8Y34x1b/gMe2dDH+65vZ9j07rW4MPyfnixGQA7tZ/zE2W/sbYXK5DzQTa8dQU2q0ctfh3Q9URlX+8byl+s0Pmhc1GtRF5+TQS1LXSMvUZ/6V4LGoC9AG9UwDfo5s5W4Kvv72sEwCrBF/AbTNNnADAEQRAEpRAAu4VgPQA7SYbFAevISE8moCxdKDurVDQX94N/+gO7tbEPL6NxMd/RGS+yUS9WZO+s68UaVygUU4eYb+Wg+tw0TPPSMmLZNjWLZlr3llFN+HzytBJPPMIuLOjIWlYrEjOdtlGs69v9Q7Xpw1uV5/XTegmeqT5hmk7ba7RuN6bje+HChUx2BcA+1h9lf75rINsw4nl+/OlYy2U/HhzO9kxozsvOzG0f6HboAdhp8qsgANgP7LoF3niCb9jwawTAqQi/Uej36yX6CwCGIAiCoBQCYDdNoc0A2CoZFgesw8Nc+9aGnhnQmDdm2sqBddngZmX5MMHo5zv78+Gl/etkQGUF9uOBIaxYvofZ13sG8OlvrO7K8j36F21ZMg3TvIemtuDDR6e3Ym1qPGW4/nKFH+N10vie8U1Zl7pPs2/3DWIFHntAW0fapGYs18P38+H+TUpr66D5CufOyuu/vKgDa/DsE1rd76zrzt7f3Jv/Gq3bjen4nj9/ntsIhO3s9fzA7s6Rl+RXfgDYL+jGA3oTFXyNADjZ4TeZmj4DgCEIgiAIAGwKwbT80qVLXUWBaZlfDg527VGty7ENw+rHTLu7pz+Hzx/2D+RlRfM+xFpWfZLVKZ2Pz0/zbB/9PJ/epkYR9nz5Aqx0wUf59BUD6nDT8NFpzTmc0rw0nO0v97GKT+XU3LtRST7f3B7V+Titg37f29iDT989rjFfvmrxXKx7/ad5GU3/bHsfPq1c4Rz8t2mlgrx+KhvWoiyfRnXR70dberG313Tl2+nl+AjLAOzFXs8P7O4ceY3+GgGwSrhVBbx+oDds8A0CfmUATib4jXLTZzfwCwCGIAiCoBQGYLMESU4g2AqAzaLAHLAODAzEn23rxd7b0C3T9K929mG31nVVsg6juuTxu7v73oPsX7eH5hdl3esXZ+fnto7ZXlXbJUzH99y5c4a2g1+aJ8jzA//zHHmN/goAjiLwhgG9TsA3nlFfPQADfhOv6TMAGIIgCIJSAICdQrAZAOsh2CoZFges/f2SyoNeKMWeL/c4W9K7BqtZIjdbM7A2n35+TktWIv/DbG63KmxY89K8LOhtsQJgJ07G8xM1CwB2Ev2Vk1+R6V5TCcB+YFcF9EYRfFXAL0GuGQBHFX6TKemVX/gFAEMQBEEQANgQgs0A2CoKTMv8vK930vnaglZs87Dn2GvL2sRM/3BDZ7Z9RF12cGIj9v3unoFvBx3fs2fPGtoOfmmeZD0/UbIVANtFf70CsArQjQf0qgZfP1FfN/BrBMBm4JvK8Bt2v1+nACwSAuIRAYIgCIKSGIC9QDAtv2TJEldRYA5Ye3rAAZmO75kzZ7jNQNjKOD/hnCOv0V8jAFYNt0EAbxDRXpXgqyrqK1sGYK/gm0jwm0zRXwAwBEEQBKUIALttCm0FwGYQnPX++8SDBRyAs/7lTxoAezEtj+MYrO+//37P0V8BwIkAu0FFe8MEX6/wKwOw6qgv4Dd4+AUAQxAEQRAA2BCCBQC7jQLLn0Uii4RAlBl3/fr1mjds2MA2btyoedOmTWzz5s3cW7Zs0bx161a2bds27u3bt2vesWMH27lzJ/euXbs07969m3vPnj3ce/fu5d63bx/3/v37udPS0jQfOHCA++DBg5oPHTrEffjwYc1HjhzRfPToUe5jx45pPn78eIxPnDih+eTJk5pPnTqVyenp6Zl8+vRpU/sBYbJV3UbbYrTN8j7J+6o/DvIxEsdNPpbyMRbHXT4X4vzI50ycR3FexXkW511cB/K1Ia4Xunbka0lcX3StydeeuB7p2pSvVbp25WvZ7rNHTqO/dK+pBGDVsOsFeoMC3zCivlZZnuk8hdnkOQz4TdR+v26aPgOAIQiCICjFANgNBNPyixcvdh0FFhBsBMAyBBNEyBBMkGEEwQQlRhBMEBMWBMsgbATBKkHYDIbtgNiPzdZntn1+wNcMfuVjHTT8ygAsw68MwDL8ygAsrlsZfs0A2Ap+VQJwUKAbBvSGDb5+or5yk2czAAb8RhN+AcAQBEEQlMIA7BSCZQBWGQUOA4LNADgICPYSDdaDsBUMWwGxU0B2urzVNui3V78/bqO+quHXCoDjAb9eor9mABwG4PoF3qCjvWGBrxP4NQLgoPr7Jjr8Rq3pMwAYgiAIggDAtgCsIgosIFgPwG6bQgsI1jeFDgKCvTaJtgNhq4iwExh2C8VeYNcMeu0ivm6jvnZNnoOEXwHAevh12/RZALB8rXuN/tK9RvddPIA3DOhNBvA1AmDAb3Siv07gFwAMQRAEQUkMwBcuXPAMwXoAtoNg8aAvfxYpjChwlCDYDwibwbBTIFZhs/XbRXudgm/U4Dfo6K98T9jBbzwA2CvwuoXeoMA3XvArA3BQya6iCr+J3vQZAAxBEARBKQzAdhD8wAMPIKsvDIed6Ttr1sjBrhfg9Qq9UQJfqyzPdK4Av4kJvwBgCIIgCEpyAPYDwUafR9Lb6MHF6AHH6oHIrO+Y0cOW1cOZ2Sc8zB4ArR4azR40zR5M7R5orR6GnTShNHsQJ1t9o9QpCJDtoMIJmHiFHrIXyPILdrLj1fQ4SqAbZehVCb5eor5yk2c7APYKvoDf4Pr9AoAhCIIgKIUA2E9TaDsABgQ7h2ArEFYJwTIAuwVhO8BwCsJ+YNgLeAUBxCrhOH/+/IhwJ1mkPpngN4ykV0b/E8L45JHe9P8QAAxBEARBKQzAgODEgmAnIGwEwKqjwWGAsF8YDhKI3ZrOSVS2xU+E12+kV3XEN4zmzn4TXYXd5DmK8BuVps/i/yEAGIIgCIJSAIATGYLNso5GFYLj3STaDIATGYRVAXG8oDjeAKziuIUFvarB109zZ8Bv8sAvABiCIAiCUhCA49EfOFkgOIr9gs1AOMuvGWrtIl7xAuEowXBYYBwWAKs+JirOk0roVQm+KqK+UWryDPh1Br8AYAiCIAhKYgA+f/68KwgOMwoMCA4uGpxF+kZpPEA4HjAcBBCrhGOVABzkfqo6F6qjvWGCbxhR30SE36gnvXLS9BkADEEQBEEAYEBwkkCwDMJ6AFYNwUGBsEoYDhqI3ZrOSZS2RyXseoHeeIFv0FFfP02eUwF+49nvFwAMQRAEQSkCwFGHYKsHqGSC4DATZBkBcLxBON4wHG8ojjcAB3EsowK9yRD1BfyG0/QZAAxBEARBKQLAgODkgGCnIGwFwEGBcJBR4aBgOEwwDhOAgz5WXs5fooNvPJo8A36DgV/6fwgAhiAIgqAUAOCg+gNH7fNIUYHgeDaJpnNvlyk6CiAcVRgOApBVAnDY++7lHLmB3mQH3yD6+0YVfqOa9EqGXwAwBEEQBAGAQ48CJxsERy0aLADYySeTnEBwGCDsFYbjBcRuTeckEbYz6tCrAnyjEPUNE36t/hYmCvyq6Pcr/y8EAEMQBEFQigAwIDj5INgIhPUAHG8QDhOGowrEUQdgP8fb7bkNCnzDivomS3/fKMJvGNFfADAEQRAEpRgAA4LtQdjsITOeTaLdRIPNADgKIBw2DEcFiqMEwCqOZ5SgVyX4xivqmyrwG++mzwBgCIIgCEpRAE7GzyOphuCw+wWrjAbbAbCq/sFhR4VVAnHYYBwPAFZ5nLyeq2QB30Rq8gz4tYZfADAEQRAEJTEAnzt3LlAABgTHp0m0HQg7AeAogbAfGFYNxEHBcVAAHNS++zkffq6DRAVfwG/iwC8AGIIgCIJSFICDjAIDguMbDaZz7+bbwVECYb8wHCQQ+4FlpwAcj22PJ/CqhN54NHcOsskz4BcADEEQBEGQBwAGBCc3BBuBsABgN98OdgPCYcKwCiCOFxTLpnMS721QAbuqoDfK4BsF+LXKQxBV+I1Xxme38AsAhiAIgqAUAOB4QbBdts9kgeAgm0R7AWE9AEcBhKMEw/GA4ngBsMrjFSb0pir4eon6An7dwS/9PwQAQxAEQVAKADAgWA0EJ0I02AyAkwmEgwDiIOE4KAAOav9VAa9b6FX9HV/V4JtoTZ4Bv5nhFwAMQRAEQSkEwG6TYqU6BEetSbRTELYDYLcQnCgwHCQQ+wVmNwAcj+1XfR6CgN4gwDfqUd+wmjyHCb/xSHolwy8AGIIgCIIAwHHPDK0Sgs1AOEwIjnc0OGvWrOIBD46Q4wXnYQCvF+hNBvAF/EYHfq0AWP9/EAAMQRAEQUkKwGfPngUEJ2i/YJV9g51+PzioiLDbqHBQ0eF4RovjBcBBH8MgoTfe4Osn6pso/X0TEX79Rn8BwBAEQRCU5ACcSBBs9gmNqEOw3ybRqQTCUYXhoOE4SAAO+9hEDXrDBt94RX1TBX6DbPoMAIYgCIKgFAFgQHB8IVhFNFhVkiwnEOwVhMOC4XgAsV9gdgPAUds3r+fI7bWQ6OAbzybPgF/n8Ev/DwHAEARBEJSiAJxI3wiOEgQH2SQ6StHgRIHhKAKx3nQ/Rn0b/QJvWNAbBPiGEfUNqskz4Ncd/AKAIQiCIChFAFhVFBgQHP1ocKKDsF8YjiIURxWAVRxnL+c3WcA3qk2eAb/nAMAQBEEQBAAGBLuF4Hg3iU42EPYKw6qAOJ5QHG8AVnX8vAKvH+hNZPCNUpPnZIJfP9FfADAEQRAEJTEAnzlzJuEhOKzs0PFoEh1WNNgtCEcdhlUCcVhwHBYAB3Fc/JynoKE3CPANurlzFJs8m8Gv2UvIRIZfADAEQRAEJTkABwnBdp+qAAQnbjQ4TBD2C8NBAbFKUPYLwGHun99z4fdaiCr4Jiv8uon6eoXfML716xR+AcAQBEEQlKIAnOoQHLV+wckCwipgWAUQhw3Fdqb7MUrbowp2VQCvG+gF+AJ+/cIv/U8EAEMQBEFQkgOwmyhwIkJw1PoFRyEanCwgrAqG4w3FUQBglccx6tAbNvgCftXBr+qMzwBgCIL+f3t3Ah9Vfe99PGSHBLKHhCwECARCgEBAwqYgImoV60LVQqtWW616tdflaotVq6Vqa+VqL4pKLRXrUuujt7ijIu7U+tTWKm51ARV3pFDZOc/5DvnzHKaTZJYzM+fMfN6v1+9FMpnlzDkzw/nOfwOQpgGYEOyfLtGJDsJ+CsNuB+JEheNEBeB47Rs3j58fgm8iWn39MtmVH8NvV62/BGAAANIoALs1HpgQHH4ITkRrsFsTZSUiCLsdhuMZiN0MyrEG4EQ+P7ePTyJCr9eCr1dbfQm/BGAAANIuABOC/dUl2k9B2AthOFmhuLvS+9Fr2+SVwOu34JvMVl/Cb+zhlwAMAEAaBmBCcHJCsJeCcDy7RkcbhuMZiJMdjL0QgOO9bxMZer0YfN1o9U10l2cvLHUUTfiNdNIrAjAAAARg12aG9mMIjleXaDdag93sFu2VIOzVMJyIcKxjkMgAnOh95sXQ67XgG89W33iM90318EsABgAghQPwo48+mpAQ3NmSFYRgb7QGuxmEkxWGkxGI3QjJc+fOta6//vo9AXj58uXW+eef7/lg62bgjTX0JjP4JrvV189dnr0afgnAAACkeAAmBMevS3SyW4P9HoRjCcNeCcTd1e23325lZmYGTrjb2tqsrKws65ZbbvHFtscSeBMZer0YfJPd6pvu4ZcADABAmgdgP4Zgv4wLTkRrsNeDsBfCsJdDcUFBgTnhtmbOnJmSYdet0Ot2a69Xgy/hN3nhV/8fEoABAEiDABxNCHZrjWA/hGCvd4kOJwTHKwgnIwy7FYi9EIqnT58eOOHOzc1NmaDrZuCNR2svwZfw21n4JQADAJBGAZgQHHsIJgi/lJQw7GYgTnQ4VjdovR9vvPFG3wZdtwNvpKE3mcE3VVt90zX8EoABAEizAEwITm6X6FQJwskOw/EKxPEKyPPmzfNFwI1n4I1n6PVz8CX8Jjb8EoABAEjhAKwZZ2MJwYMaKveMXaQoiqIoL9SA/v2iDr8EYAAAUjwAdxaCuzphMCFY92G9/j2KoiiK8kzp/6Zowy8BGACANAjA0YZgAjBFURTl1QAcTfjV/4cEYAAA0iAARxOCCcAURVGUFwNwd1/gdhZ+CcAAAKRRAI40BBOAKYqiKL8F4K7CLwEYAIA0C8CRhGACMEVRFOWnANxd+CUAAwCQwgH4kUceiSkEE4ApiqIovwTgcMIvARgAgBQPwLGEYAIwRVEU5YcAHG74JQADAJAGAbizENzdGsEEYIqiKMrrATiS8Kv/DwnAAACkQQCOJgQTgCmKoigvB+BIwy8BGACANArAkYZgAjBFURTl1QAcTfglAAMAkGYBOJIQTACmKIqivBiAow2/BGAAANIwAIcbggnAVKS1+W8nWc/d+fVA6Wcvb+v2V0+21q6c82+Xr3/hhEDFct8P/foQa90zc/fcX2fXe+fx4zy7fz59/tuuHsONfzkxsF94n1DxCMDhhl8CMAAAaRqAwwnBBGAqkrrvhoOshpre1tEzBwZKP//h2hkx3Wd7a2XctlfhU9sYfLm2feo+/WK6b91+xdJDAyFS76M3Hzl2z307Q6+X32MnHDkk8Bzc3N+x7leKChWAIwm/BGAAAFI4AD/88MMxhWACMBVuvXzf0YEw6Qx3agHVZSb8RVOhAmo8A7C2uXVYmTW2pcJa/eA3Yg7AZt84LycA836h3AvAkYZfAjAAACkegGMJwQRgKtyaO2uwtXj+vv92ubpCmyA5ua1qr7+Z39VFWC2jjf37WEMHFltLrpga6JqscJqdlRn4V/ej6151fnvgerrs2K8NCnSr1eX/PW+ide5JIwMtxlXlvQK/L/rJFKtlcGnguvded2BYAfiKc/fZc1vdX6jtDfW77t9s/6nHNQf+ZsKjuZ7+zc/LsmqrCgKP0V0AVnDWbbSNCuRP3TZrr9Z2PZYeU/tej2n2kfa3rq/bKXDqsczjaX+Zfa3S83Q+H/1+wMSafwvA2hcq5/48Y+7wwDao/nLvUXuO909/MC5wbHT/ur7ZbgIw5XYAjib8EoABAEiDABxtCCYAU+GWQlB3Lb3BYdP8fsl/tAVCkwloClYanxt8mzsWTLcOnVa/Z1yqwqpCmrkPBTfdTveh8HXy7KF7AmGoluRQAVjPQ92WdR/6m3MMbGfbr/vX45nxxI8u+VrgvWPCo/N24bYA63F1n6b12GyrWqhNy7q5HwXMwl45ex5PoVPboJ+1Tbof7R8Tav/nokl7HkOh14RXhfMLvte65zk7A7B+NvtTx1nX1RcVJozruOhnXb+8JH/PdpueAfqSgwBMuRmAow2/+v+QAAwAQBoE4GhCMAGYCrcUsrqb0KmzAKlAdtCUusAEScHdjp23+foBDXt1yVXYVWuvCcCmldO0SDuvG04AVpBTy6X5XYFPobu77VcQV3XWBTqaAKwAq5Za52VqgVUru4JncOu0rqvHU5gNbqlWq7kJwAqnup4pfdlgtl2h1hn4gwNw8P40re86DuY56jrmSwlTegyNBScAU/EOwOGEXwIwAABpFIAjDcEEYCrcUgAL1c3YOaNyZwFSAUqhTt14FZBUJogFh8fOArICnmmR7CywdReA1Yqp7sm6zJRzEq6uWrCDu38rSMcSgBUYg4OkHsd00TaBNvj5qjVYXxQ4/6Zuzeb6ainWz84KtZ3hBOBQ+0LXUSuy828XnjYmcGwIwFQ8A3C44ZcADABACgfghx56KKYQTACmIln2R+NOTaugCbZqjTRjQNVaa7o2mzG+pguxuh07x6I6b2MuV7BSa6b5XV1x1WXZjQCs7VHX4eDr6DLTnVfbYoK5Qr3GJ5uw6myt1T7QdTsLwM6u4p29x3QdPbbZXyq1kqtlWONsFczN35yPZ352Lr2kYG8CsO7T+fimu7ebAViTiDm3W6+LF+4+ggBMxS0ARxJ+CcAAAKRhAA43BBOAqUhKIUsBS62UCqoKQj84fsRegUpBUS2SaqU0oUldjxXo1Iqq1k2FWhPKNK5XLcMKTwpruk8FYXWb1vXMurKxBmDTuhp8HT2OuvCaAK7t0XZq+4v75O4V9NVlWs9NIU/PJ1QA1n0oyJq/6T2mbXWWaU12Pp72W3D3bD2Otjv48czkX2p51XbqtiYAa39pv+l5mWNk1it2KwDr/hW6zX4y2+0MwAry6o5tQrj2pXO2bIoKNwBHGn4JwAAApHgAjiUEE4CpaJZDUohVuDKzEjtbhNVNWsFIocf5d91Ot1F4c7YGq8VVtzEhTcFY43J1PWdXYv1srmNalZ2toMHbYu7bXK6xs87rO1tz1XrpHCesx9ZjOe9Tz00twfqbWpOdjx98PYVQM/GUczyuKWc3b7WEa3+ZoO8sbZf2hfZX8JhntfLeetX+gf2isOsM97pM26m/d7WPnM+hu/1pfjdjgHV9PYb2l3N/m+et/epcI1o/O8cfU1Q4ATia8Kv/EwnAAACkeACONgQTgCnKe6WgqJZVE5QVdjUJmWk1V6u7CZ4KraYLciK2LdQkWBQVjwAcbfglAAMAkCYBOJoQTACmKG+WWlzV7VrhVv86W2TVwqtuz/qbujg7u4bHu9TCG6orOUUlMgB3FX4JwAAApFEAjjQEE4ApiqIoPwXg7sIvARgAgBQOwA8++GBMIZgATFEURfklAIcTfgnAAACkeACOJQQTgCmKoig/BOBwwy8BGACANAjAnYXg7tYJJgBTFEVRXg/AkYRf/X9IAAYAIA0CcDQhmABMURRFeTUAd9aTqavwSwAGACCNAnCkIZgATFEURXkxAEcbfgnAAACkWQCOJAQTgCmKoii/BOBwwi8BGACANAzA4YZgAjBFURTlhwAcbvglAAMAkKYBOJwQTACmKIqivB6AIwm/BGAAAFI4AD/wwAMxheCy0mJzokBRFEVRnqjS0pKowy8BGACAFA/AsYbg7tYKDlXLly8PWY8++min9dhjj3VZjz/+eKe1YsWKTuuJJ54Iq1auXNlpPfnkk2HVU0891W09/fTTXdYzzzwTUT377LNh1XPPPRd2Pf/88zHXqlWrYqo//elPcasXXngh7Sqe+zOW4+zGay2S13a475dI34eq7t7b3X02hPs5o+rq8yrcz7yuPjc7+6zt7nNa1dlnfGf/L6i6+z8llgmvgkv/JxKAAQBI8QDspRDcVRAO5+QqnkG4q5NKN4NwdyfK0QRhL4dhNwJxvEOxX0NyIvdJrGHXzdCbSsE3kvDb3WdUvIJvvMJvd/+PxCP8EoABAEiTAJyMEBxNa3CyQ7BbrcGpFoTdDMNuBeJEh+J0K7eOkZuvm3iE3ngFXzdbfd0IvvEMv1317vFa+CUAAwCQRgHYLyE41iDc1UleoluD/RCEkx2G3QzEBOPkBd14BN5IQ2+qBd9kdneOtdU3lvDrZvANDr8EYAAAUjgA33///SFDcHdBONIQnKxxwcluDfZLEPZbGI5HIE73cBzP/RmP4+/H0JvI4Ov18OuV8b6hwi8BGACAFA/A0YbgdBgX7MVu0V4KwtGE4XgF4niHYj+H5ETul3gd22heZ5G+lgm+7gVfP4dfAjAAAGkQgP0egtOxNTjcIOzVVuF4h+FkhOJ0q3gfO6+GXjeDbzq1+nppvG9X4Vf/HxKAAQBIgwDspRDsxS7RBOH4h+FEBGKCsfeCbiyBN5rQS/Al/HYVfgnAAACkUQD2SwiOtUt0IlqDUzEIRxOG/RKI0z0cJ2M/x/K68GvoTVbw9WKrbyxdnuMZfgnAAACkWQBOZAj2a5doN1uD4xGEUzUMJysQ+z0ke2WfxXrsExl6Cb7eDb+d/T/jVvglAAMAkMIB+L777ktYCPZql2g/tgZ7LQgnKwx7KRBT7obdaANvrKHX68HXS92d/dTlOZLwSwAGACDFA3AqhGC/tAb7IQgnKwy7FYgJxf4Mu34JvfEY3+t28PVzq68Xwq/+TyQAAwCQ4gE4HiHYS+OCE9EanOwg7MVW4VjCsJuBmGDszbAbS+D1YuiNJvgmsrtzMlt9vTzeNzj8EoABAEiTABxNCE7WuOBkdolOxyDshTAcj0BMOE5MyHUj7LoReAm+/mv1TUb4JQADAJBGAZgu0d5sDfZSEHYrDLsRiOMdilM1ICdqn7lxfL0cer3Q1TkR3Z291uU53uGXAAwAQBoGYEKwN1uD4x2EkxmG3QrEiQzFyQ7OXnqebh07N19Pkb6Wk9na63bw9Xp353i1+roRfgnAAACkaQD2yrhggrB7QdhPYdjNQOzFYOzXcvuYuP2aiVfoTZXgS/jtPvwSgAEASOEAvGzZMkJwArtFxyMIe7FVOB5hOB6BmGCcmJAbz8Ab79BL8PV3l+dowi8BGACA1LWu4z95iqIoiqI6Kicn5wtOEQAAAID4ocUJAAAAAEAABgAAAACAAAwAAAAAAAEYAAAAAAACMAAAAAAABGAAAAAAAAjAAAAAAAAQgAEAAAAAIAADAAAAAAjAAAAAAAAQgAEAAAAAIAADAAAAAEAABgAAAACAAAwAAAAAAAEYAAAAAAACMAAAAAAABGAAAAAAAAjAAAAAAAACMAAAAAAABGAAAAAAAAjAAAAAAAAQgAEAAAAAIAADAAAAADxndUdQTNUCAAAAAIIYRVFUCtRq/vsAAADwH1rr2O88N4DXOQAAACdxYL/z3ABe5wAAAJzEgf0e2roMusfSpZb3MAAAADiJA/sdvCY4XgAAAOAkjv0OXhPgeAEAAICTOPY7eE2A4wUAAABO4tjv4DUBjhcAAAA4iWO/g9cEOF4AAADgJI79Dl4T4HgBAABwEgf2O3hNgOMFAADASRzY7+A1wfECAAAAJ3Fgv4PXBMcLAAAAnMSB/Q5eExwvAAAAcBLHfgevCXC8AAAAwEkc+x28JsDxAgAAACdxPpEdtN+z2SXgvcjxAgAAACdxqegYu4Z27PfJdk1nl4D3IscLAAAAnMSlolq71nXsd/2bzy4B70VfoRcHAAAAJ92IwOsd+30ZuwK8F32HXhwAAACcdCMCP+vY74ezK8B70XfUi+PjjuOlf+nFAQAA4AP5QSfdheyShJ5Af8WJMwjAvvVGx/G6l10BAADgDzODTrpnsksS6hR2AQjAvvXjDHpxAAAA+MrFGbtbIHUSV2XXWeySbmmym2PKKvs+VlRc8ll+z15bO/ZfwkqPWVpe8eWQ5hHPN7eM+g6HhACMpFAvjn9l0IsDAADAN47pCL066V5gVzu7pEstubl5/2geOXr9/GsWW3988q/Wk39/3/rLmo0JLT3mPStetC7++ULLDsG7qmvr1zW2tA3i8Pgaswr7E704AAAAfEStvmY5nk/DPOkutmvs2H0mf3vStBnzx0+ednfb+MnPt46b8JfmkWPeG9TUvK6xqXlDeWXVZlN9q2u2O6uib/UW598HDm76WLcb1tL6ysi28S+OHjfhgdZx7dcNHTH67PyCAs2u2uiBfdWak5P7+WVX37g50YG3uzrvkiut0rLKrZOmTWviJe1bzCqcXA12TS0pKT+laWjzL0a1tt0zYtSY54a3jFw9pGnYB3Z9Utm3arOpquqabdX9arZXVffb3vHvVvO3isq+XzUNG76ucXDT2pZRo18Z3jLqafvfOwYNGnyZ/Rhz9Dgdn70AAABIgrc6Trr/Gir0VfWrPW9wU/OjtfUD1xQU9t6a37PXTjvgWnbwtWbNnmN998z/si647JfWpVcvshYsvsO66c4HrFvufdy6/9lXwq6b734kcLsrFi4J3M/ZF863vn3KWdbMWUdbYydMsext2JGZlbXLDs6f1g8cvKq2bsCvOk4iE9X1MN9+/DXnXHT5Bq+FX2cIrus/4DNezr7F2tCJ02LXKYWFvf9QVFzydnZ29vay8vLNY8dP2HrkN46zTjr1DOuHF/3UunrhTdaiJbdbd/7xYevu+x+znv3La2GXbqNauHhp4H7Om3dJ4H5nHTnbGjW67as+RSWbs3NytvTpU/RaTk7Ozfb2HJ/hjS/6AAAAUt71HSfdv+046T68oqr6qbz8nlsGDm7afMRxx1vzLr8mEFAfffGtpAW8VW9+at3x4DPWVYtutU487Wxr+KixmzUWtrKq319z8vO/HefAcPqAxiFrvRp+TTUNH2kdeNhRF/CS9q3VHe/FB9gVrlPr+i+ysrK+qKio/OjwI7/xkYLpfY89Y61e86m15vPNCa+/v/2Rde/DK635V12z7YADD17Tu0/Rx5mZmR9k7J6boZZDBgAAEB/HdJx0/6xXQcEXk6bO2KTxrc+s/sjyeuBTKF649B479B35Va/Cwo0dQdh1PXr0ePqKhUv+6fX9oTHBYyfs+yYvad/6SQazCrtNwzoW5OblfXDS98/4+6qX30pK2A23Vv7p5Q2HHTH7OTsIa23h8zl8AAAA7tNYtK0jW8d+oO7IXg95nZW6Uat7dm5uT9dPGu0AvEUTXnl9H2hirOra+q28pH2LtaHdt7RlROsTb37w5WYvB9/gWv70n9/Pys7WvAyXcQgBAIBn5Obm3Zfh/8lMyjMzMzcnYzZjt0tjj3Pzeq6Pwz7yxfPXMSwo7L2Td6avMauwy19evfTG2i1+Cr+mbr172YsZu8eDAwAAeIa6K672cQguzsrO/uPBh89+3+/h11RGfNZRTffn70v19fUzmppH/HpIc8tT9QMGvVleUfVFMtZtTqXKycndocno+g8YvLqxadgTw0a2LuxVVNTm5c/oseMn7Hrzw/W+C8Czv/mtz3g/AwAAz51czZx11DvZ2bnv2z/Py/DPxCWaEOayvLz8T04649xP/vz2eosATABOldA7bMSoR4tLy74aMqxlp2Yp18ziN9y+LNBFPBV6OsR7KMHRc0+y+lbXBCZVC95/Dzz3qnXX8lWBsfe6XLO119T131FWXrmhqXnkXWVVtft47TN6yNDhGw48+LBdT/75774Ivg8+scqavO/+W9snT91IAAYAAJ4LwGbspX3SuFaTSOXk5GpZoZsydi9r0WpXYZK3MbBub8burpVLc3PzPigqLvlMwfehVa+l3Ak8ATg9T5jrG5uHNw5tfr2u/4Bdp593keWHMdteKu2vfacfZNU1DLSi2X+aoV1hubSsYqda3DO80ysmMFneaWf/6MOS0tJtMw85zLpm0W8CMzB7KfS+uPpd6+fXXG+Nnzh5Z3W/mq/m/WzBP/XFJAEYAAB4MgAHT0R03sVXfja2fcrbZRWV72dmZm7VGrL2dZfb9ZuM3ZOaKIxqptepHeG0IYoTxtqO27V33M9Rdp1u1+Udj7PCftxPsrKzN1dW9Vs7adqB71z884X/8vNEVwRgAnAIhaPHTXigqLhkp9aPTqXeDIkozfauVtzi0jLLjf2n+1MQ7t2naHvLqLbFGbtnYfbEZ7SC8IU/W/BB++T9Pi3s3WfnhEn7Wqf94Fxr8a2/txI9O7Rao7XGsNYGHt02bldBYe8d02Yc8tGVC3/zpfMYEIABAIDnA3BnrSvXLrlrwwWX/vK9o7918uqJU2e8PKS55dW+1TWr7RP3d/Lye67Lzs4x473CKnVf1u1KyiverupX+9qwltZXps342ivf/M733/jR/AUfXve7//0qFVt4/RaAdTJ7xcIl1gGHfD3QumYHg4SP2ywoKNS4zU2Dh7U8V1hUOjtV3nzDRk/sX98w8PNZs+dYK156l0AbYWld7+aRY6x47D990aYW5QGNTW9kJLcXTKdLqF376zs/PPHUM99RILY/h3fY781dI1vt/XHkbOvMcy6wtE6wwvGdf3w4EFjVShtOuFWYXvH8S4HbKeSqZVdB+5BZR1jDR4y0evUq2FVeUblt8r77f3LqDy5478bblv2zsy8eCMAAAMCXAZhKzwCssZINg4ZYYydMsbS2sr4IScYYVD2meiZoneDGpuZtfYqKFUpq/fzGO+CgIyaXlFdsOfOHl/K6j3LJLI3zjef+U6g79oRTrKp+tV/UNDbWev0zWl8CLLn7kU8vuuKa9xSMDzj40HUKx3Yo/rJfbd0WheRwvnAqLS3bXte/YbNuN2m//T+ZeejXP/zuGee8e9nV171/6x8f3xjJZwABGAAAEIApXwRgjYksq+gbCL5e20fnXnzFzp69Cj73awieMG3GdC33pJZ1XvMbo3ptqidCovafulbb4XFrVUNDA5/RDGkAAAAEYCrFArC6V/arrbcuuOyXnt1P5/z48m15eT3/5rc3XMPw4VUVfau3XLXoVl7vUZRaOfXaTPT+UwjuV9/wsX0I8/mMJgADAAACMJVCAVjBV92evb6vausb1mfsnozNL7KHtoxae+JpZ/Naj7JL8sSpM6xk7b+Zs462hraMfILPaAIwAAAgAFMpFIBHj5tg+aGF8sdX/mpbVk7ObX55s03Y74Dfj588jZmeoywtb5TM/acZogcNGbZr9D4Tz+czmgAMAADSIACr+2HwMkTqLqsJabqbUTWSiVN0fc3wGo/JlLy+jJIXAnBuXr4v1qHV6y4vL3+NH95omvFZ437j8bpOlxmfNe432ftP44/7FBVvyUhcV2gCMAAAQKJPrnTSqSVB1P1Qy+E0DR9pXbvkD3vCqpYh6er2p/7nj6yb7nwg7JMmXf/SqxeF/JtaJjX7azStQNoG3bd+1n2YMKznFiqEJ6MbsBcCsF9OuvWFRlZ29kY/vNHGTdj3iTknnU6YjbK077yy/6bNPNQa0z7pVwRgAjAAAEjRADxizLi9AqyCh8KwgnFwAFYw1WVqGQ4OwLpdqBYcrffrbCHuKgArrOrxIumiq8fU/TsDsHM7NKlOpAFYzy9Ua7Iuj2X9YgJw6p1gH3LEMUMKe/fZReuvv1t/na3AmhU6IzGtwARgAACARJ5cKTRq8peuuiubAKx1Y5tHjgn8rtB8892P7Am0Xzvy2EDrsYKzub5CqUKm/qZxp/Muv6bLAKzHUgDWv7qfUC27Kt2/rqMwrvvWbfTY+tlcz4Rb/asuv8Fht6sArPCt7dX96Xma7sILl96z5/lrn+lvBGBOsNun7L+Mia+ir++e+V+W1/af3t+tY9svIQATgAEAQIoFYAVRZ7g875IrA7+bVl1nAFYoNK2iCrfqKm0C7bdPOWvPfRx7wimBEKnr/u6+lXtajnX7rgKwToJN12tNhmMeq7MArHVC9Vjmcm1DcACOtAVYE+Hoeelf/a7tV7jWz41NzXtastUKrGBNAOYEu6pf7UbzOqcir4ZBQyyv7T99fg1obHqDAEwABgAAKRaAFSKdrS+33Pt4IHDqsjN/eOleAVitn87bKhjq78FjgNVSqssUIhVK1ZKq+1CA7CwAKyArqF7884WBv+k2Zrs6C8C67Ibbl4VsKY42AOv5B495VnBX4A2+fjq2ACuoHD33pMCxVLfVjueTyNLSSMvtOt4Lb7ABTSObysordxFkoyu9D8sq+npuu/TZlZffc0dG/LtBE4ABAAASeXKlcXcKM84xvSoFUIVLZwBWS41zcioFQJ0oKnQ6x+wqwJ594fxAgFagDQ6MoQKwbq/H1OWmtF16PBPIg4O3HsN5PwrzsQZg0w3bOeZX261/TYu3CezFpWVpE4D1fNXarsnFdFw1TjKSmb/dnBjrqhtu3WS/Nj4rLa941X4uVcl8g03Zf+b1Rxx3PGE2ytKa1F7df+qF0tjcMpcATAAGAAApFIBVGpurkz213KqFT622ZlywMwBrnU61/ml5GgVN/WwCrcKkxgQrrCooKljrfnVfGkOr2ypAdxaANeY3eNIp3VbBWMszKQyrdXbB4jsCwdMspaTL9Zh6bLXUhgrAzlmtnUFXl+u2pnT/ZhZYnZjreZpQbrp262Rd19W/oYJ1KgZghV996aD9kozQ29W6sZqsaNy4ieOS9QYb1tL6ir54ScYXAfoCSO9NjaF1Tszm7BXh9dLrKhn7L5zSFz2NTS13EoAJwAAAIMUCsJn5VIFToVYnpKalVyfaCp3mevOvWRwIggq35joKhJogS+FTS5mYdYP1d11P11dANUFS19fjOQOWmSArOKSalmUFc92PWpS1PSaI6XH1mHps/Wy6YjsDti5XWAoOEGassymNfzatvgrA2hfBwVn7RvdlJgRLhwCsoKUvFKJZmioRYzXLyiu3TZo2rSkZb7BBQ4Z+EMkSYG6UjoNee3ov6D2i94O+zDHviWi+mElW6XWV6P0XbukzZEhzy1MEYAIwAABIwQBMhTdbrWldU2uxc6bqVA3AZomaWJZ+infpi4vq2vp1yXiD2eH7X6GWy4r3uNng7vt6PeoYaV/oeOkLHfPllb6QMl8wqTeFXsfqweBsKdbPukxfgpnnoy+p1CtEl2kIgu5P96/rOSet0n2rJVp/i/RLEoX1RO+/SNYVr+s/8G0+ownAAACAAJy265WqVVjhQyEgmnVL/RaA1Q3UOdO2V2tI84hdg4YOPyHRb7CcnNydwePnE9ECrK7/ZtZ0Z4BUENakUgpv2i4FU31Ro9/NTOwKuwrF6tKu6+v26oqsy3Q9E67VAqrx77pcPSI0/luPp2Cs4Qy6f7VCazt0HwrAoXpydFWaST3R+y/c0vMur6j6wuuf0WZCQFM6bs59quPl/LuOpVtDGQjAAACAAEylVABWAPJqF1VnKYgNHtbyXLq8xxRwNBxBrbMav68yS3c5u0ArAJvjp7CrCd7MmHd1bTfLl6ll2FxuxuorSJlAq/DsnPVcY+NN6NWXQmoRjiZUefkzSi3qRcUlm7z++tExM19yqDREQ13kzRd0Oo76ksL8XV9qRTODPQEYAAAQgKmUD8BebqELbq0rLi37ItFvsMysrF1eGButgGNmXA8OwKaFWN2h1brrbA3UZRrPrssVdhWWnAHYOZbe2e3a3K+eux5XvQQUukKt791VZWVne3JsuUqT9xWXlH7uhwAcvHSbWurVwh/qOJo1zd3Y7wRgAABAAE5AN+Rouh4TgKMLwH55Dan1Mb9nr62JfoOVlJVvTPQYVhNYnV9MqBVWLcLB4cYZgNWiqS7Q5m/60kCllmATnrUfNYY43ACsMcLm/nVfziXEUmEMcHVN3et+DMDOL0KcLcAKxhoDbmbwJwADAIC0DMCdzRqrmWWdszUHl06iEjU5kkKvTsB1gq3ximZsYqz3291zJABnpPuXC12q6z9orWY4T0aXb7129X5Qy6veiybYaoyn6QLrDMDmdnrvaJkzdZtW12ezJJiuq/eWWe86liIGxgAAD0FJREFUnACs96BCtX7Xv8GzpndXuk0y9l84pS8UGhqHrEyFAKzWYNPqr9eKjj8twAAAgAAcInQ6J94xXR6dJ8TBrTf63YxFdJZOtEONEezs+sGlpY5MC5dZFkkTUXW1feGUtiv49+DLzHa60RWYAJxaAXjQ4KEvmqW6kjVONdLXpd4joV7jsfSs0HZEE6gUuJO5/7pb/quuYcBv/RiAdTw6+yLDrGmu1nsCMAAAIAAHrdFpZhFVq5Fp5dHvCqTqKqnLTTdI/azJcfSvWTtYt1FQVYuDWqVMC5HG1+m+Ql1f3fV0/eATNrVshdpOtSAFb5/Zft1O26pWD+eyL5o0RieJJsTr5F23V0uJ/mYmB1ILl/O+nfdBACYA1zcM/Inzixgq8iWsvLr/2tonb7EP8eF+C8Dmc8181gYHYH1hor87l7IiAAMAAAKwIwBrzJhpLdDJlTlxcrYAK8hqlllzgmVmGdWJmWm5VQuwuVwB19yPWoAVjs31NYtpqNYpdffUBD36VzOZmpZj3da0aGl7zDhEbZ8J1to2baN5DmYNX/MczIy65vG0Dbp/Xc90tdbvsc6emi4BWF8oXLFwSaCFr65hYODLko7nnrDKys7emJOT+25WTs5tcQwyjWXlldsIs9FPNKXllby2XXqvd4wpL/R6ANZnm8Z967NMpa7tzi/q1MKuz0jzd32RF+lkZQRgAACQVgFYQdWMN1TrrAmbzgCskGNOsFRmIh3nMizO+9RjdnZ9TfLT3UmzxrKZoNuroGCv+zLPxzlm0QRlndgq3Jpul+Y5qKU41HI/wc9LsyITgLufpElfVGh/6YsFHS+31h2NdGIsfXlx4RXXbqmpa/giL6/n3+znU+v2zi+tqPzMjda0dC29Vry2//T5UFnV70WvfEbT6wIAACCBAdg5PlZLpahLcXAAVqtCqJDTWQA2YTTU9UPNCqvJe4Lv32y3GevW3farC7XuR5eb8YrmOSgUq8XS+VzN2qduLgOU6gFY3c/LKvruNV7bK3XuxVfs7FVQoKWShrq58/sParzRvCeoyEs9TLy2/6bsP3Nznz59TiMAE4ABAEAKBmAFFoVUZzkDpLobq+VXQVFj9nTCasbl6nd1T1bg0fhZtf6pu7QZw9tZAFYXPHVJ1vXV2mLGsHUWgHV9dUfWfau1SN2VzVIeOnnWNqmlUdtjulAHB2AFaIVmPZfgibzMTLgaT6ztVUBX8DVjFNWSqIAc6/IhqRyAtb+0fzsbq+2Vlr28vJ7r7edV5eL+ryooLNySqBnRU3Fps+LSMssr+0+fL/bx1GsknwBMAAYAACkYgM3yGM4yodOM59TPCoKaGdW0iKoFV7+bSac06YpCsQKzaa3VGFxnoHWOPevs+p11l9VYNxN8FbhNK67ZPl2uFmpzeahxbrqd80Rb1zGPp5CrcGzCvjM06b4VhsOZsToJJ4zbI5mBN14BWME3+EsHL9apZ/9oc3Z2znNuHoDq2vprDjt6zk4CbZTHxP7MCbWUTzKqdWz71l69ep/spc9oAjAAAEAanFylYsXphPGFG25btjbZAVjd4L26pM2/dfuv6/+O/dyOd/EYFBcVl2xK5JrSqVT6EkqTYSV7/+lLueKS0vft45nNZzQBGAAAEIApb54wnj98VNuqZAdgTRCmbuh+OA4/+eWil+3nttzNg9C3X7/vVNf23xZqjV2q+9IQA006l6z9p9dun6JiLX00mc9oAjAAACAAU949YSy2a921N//+lWQGYD+93lb+be179vaud/tA2AFuUdv4STsi6ZJO7T0hlnOSukQue1TXf8DWBE18RQAGAADg5IoAHKPpmZmZH19y1XWvE4CTeyxq6hpWaYI3QnB0pcn0Ern/FH73mbTfjuqa2lv4jCYAAwAAAjDlnxPG6T169Phs6PBRf79q0dINnc2qSwCO+7EorKlv+L9t7ZN3aYZjXveRB1Kt8a2W4HjvP3V7HtjYtL1vv9p7MhI37pcADAAAwMmVe5PpaBkTLZekGZ6/e9b5W2fNnvvPqTMPXTd6n4mfDmpqXlffMPCjPkXF68Op4pLSDQMah3w0rGXU5+1Tpn0y87CjNnzz5NO3aPZrzSitJZMiGfeagBNGdYc+3w7Cf7ZrR8fj/VsRgON/LJqaR/y6ql/tDibGin5maC2rFa/9d8Pty7T80vb+Awady2c0ARgAABCAPV86MdayR4d/41vbho1o3diroEDL22zp3bvPu32Kip7Pz+/5O3sfXGbX6Rm7Z/w93K6pdrXb1RBBTe643TEd93NOXs+e/11UUnp/aXnFXwt79/4sMzNzZ13/Af+aNvPQbTpxVzA2y0V58ISRAJygY9HWPvnMouKSrVrixy8ThHlt3WatU+7m/tPnhrpZ2+/dTbW1tQfzGU0ABgAABGBPlgKl1gmeOuOQHT179dpWXtH3g8rqfv/Hfp6ndATUqiTu63y7Wu06qrq2/sa+VTVvahs1tlAtxSYME4DT8uS9eJ+J+91VUla+Y85Jpwd6KBBuI+vVoS+VFISj3X8aT3zz3Y8EgnRpWeXWkWPHLeh4z/IZTQAGAAAEYO+VujPbJ8DbBzUNe7m6ru5U+7kV+mD/5/cuKT+ssWn4M0UlpdsUhAnA6XvyPmrC/jWTph74YL/a/l8pzB1x3PGBFk4Fs/uffYVJs7opLY+kWaIbBg2xutp/+rJJv6sXht5zmlCrsHefXf0HNq7fZ9J+v/XYZwcBGAAAgJOrvevE0862mkeOeW/0+PHNfj0Q+YWF++Xk5G4lAHPyLnNO+H77QYcffeuose1rm4aP2tC3umZ7VnZ2yPHaVGSl9agrq2q2NY8cvX7MPpPeOuTI427a/+CDB/EZTQAGAAApTifUmlHV760+6jo6pK2tPAUOSXteXv6ryd6IzKysXeG2NhKAgfjRRHV+/Yxe+bc1H2TEYS1tAACAqLWNn7Rm9LgJvg3BCmkzZx1tjZ807TGOpnvqBzZuvOXexwnABGAk37Ihw1q+CDVhntfL/r/lCXv7f8UhBAAAnnHyWWf1HTai9Z0RY8YFlvzwU/DVZFf2ieHOkWP2eS4jOetzpqz9ZhzymLqVE4AJwEi6qh49ejw/fFTbP/535Us7/PC++d19Kz+rrql7SeE9wx9zMQAAgHSjpViqa+vXDWgcsu308y6yvLouqSaz0SQ3lVX9ttY1DHpjcEvLkRw9902ZftDIopLSXZpdlwAMJJ1moz4nMzPz4+YRo9+85BfXfR7OezOR9eiLb1nnXHT5O3bwfd0O7G/Z23tSBl9MAgAAH2iv7T9gcZ+i4k+LS8u+soPQxvMuuTIQPBPdTVoneGqVPvOCn+xsn7L/l1rDt6S07L2+1TVanqSFQxVfzaPG3HPAIV/vdubheAZgve7U0k8ABvYEYa0zfk9mVtY/6xsGvnP4Md9686obbt300KrXEvre0JrLP71m8bqZs456o6Jv1T/scL7B3q6ldh1F8AUAAH7VaNecvJ49/6egsPer9gnXtl6FhRsbBg1Zt/9Bh6496YxzP7nw8ms+vXbJH/YsM6JWgHBOnnSyZpYhufqm27bqfr71vf9YN2najPfr+g9Yl5ffc1NWdvbmXgWFL2ZmZ/+y46SqlkOS2JPt8sqq1ybse8DOrr78iGcA1tJWWt+VAAyEDMMz7ZrXEYi/yM7O2VTRt3pN67gJb8yee+JbP5q/4MNfXL/0C33OKrBG8vl8z4oX7c/n+3f87NqbPz73oivXHP6Nua8Pbx37eklZ+Rr7sb6yA68muLrLrnMydq+xTugFAAApSSF0esbu7m2X2/Ubu5b16NHj6ays7LU6CcsIYxkS+3qf2AF3rf3zCp28ddzPZRm7Wzcm21XFrvaEwsLevR8vK6/c8J/z5m8K1TU+UQFYLdEX/3xh4HfneHVdbq531/JV1sKl9xCAka40E367XXM6gvFvOj5fV9ifuW9H8vlsf6a/1vH5rJB7k13n23WMXWPtKmZXAwAAIJWpBf7BzMzMf4U6YU5EAJ41e4515g8vDfQamDbz0MDfdPmxJ5wSWAdal+vnvtU1BGAAAAAAQFzEPQCrS+b4ydP2XK4lYTRzuf5tGj5yr9sQgAEAAAAAvg3AWpNYLcDOv42dMCUwplH/Oi9XMCYAAwAAAAB8GYA1CZdaes2M1JqgZ+LUGYGfdbmZ3EeXZ2VnE4ABAAAAAP4MwPr5qkW3Blp31RKs0Kuwa5ZK0u9asulrRx5LCzAAAAAAID4yMzN3drdWcLQBOLjUEqzls5yPp/HB+n3FS+/uaREmAAMAAAAAXFdaUfmOWmETEYBD1dkXzg+0/qq1WC3Abq4bTAAGAAAAAPz/AFxefulhR31zR7ICsBn7qwD8u/tWunq/BGAAAAAAgFNxQe/em7QOb7ICcLyKAAwAAAAA2EufkoqDi0pKt2miKgIwAAAAACCl1dTUHFpQ2HurxuMuWHxHYHIqAjAAAAAAIFUVt45rv6mu/4DPMrOydnUEyL2KAAwAAAAASAcEYAAAAAAAAZgADAAAAAAgABOAAQAAAAAEYAIwAAAAAIAAHFU9vXrdJnt7N3LIAAAAAAAR69Gjx44/v73eFwH4ioVLPrQ3+WWOGgAAAAAg8gCcmfnir+968Cs/BOAZhx6x2t7k6zlqAAAAAIBonD9xv+lveT38PrTqNSsnJ3eDvb1jOWQAAAAAgGgUZ2ZlfXLD7cs82wr8zOqPrP4DB2v87684XAAAAACAWEzPzeu5/heLlu7wYsvvqLHtW3r2Klhmb2c2hwoAAAAAEHMIzs7O+XLfAw7etGDxHYHgmazQu+rNT62b7nzAOvaEU6xeBQXbyyurLiH8AgAAAADcVJybmz+vpLzi7cysrF0Zu9fcTXjl5OTurK1v+LBlVJsmvKrisAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEJH/B4EKwogA/asqAAAAAElFTkSuQmCC) - - ------- - -## What does it log - -Each audit log implementation has access to the following attributes. For the default text-based logger, these fields are concatenated with `|` to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - - -### Example of Audit log messages -
-Type: AuditLog
-LogMessage: user:anonymous|host:127.0.0.1:7000|source:/127.0.0.1|port:53418|timestamp:1539978679457|type:SELECT|category:QUERY|ks:k1|scope:t1|operation:SELECT * from k1.t1 ;
-
-Type: AuditLog
-LogMessage: user:anonymous|host:127.0.0.1:7000|source:/127.0.0.1|port:53418|timestamp:1539978692456|type:SELECT|category:QUERY|ks:system|scope:peers|operation:SELECT * from system.peers limit 1;
-
-Type: AuditLog
-LogMessage: user:anonymous|host:127.0.0.1:7000|source:/127.0.0.1|port:53418|timestamp:1539980764310|type:SELECT|category:QUERY|ks:system_virtual_schema|scope:columns|operation:SELECT * from system_virtual_schema.columns ;
-
- ---- - -## How to configure -Auditlog can be configured using [cassandra.yaml](https://github.com/apache/cassandra/blob/trunk/conf/cassandra.yaml#L1216-L1234). If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -#### cassandra.yaml configurations for AuditLog -- ``enabled``: This option enables/ disables audit log -- ``logger``: Class name of the logger/ custom logger. -- ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ -- ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces -- ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace -- ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories -- ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category -- ``included_users``: Comma separated list of users to be included in audit log, default - includes all users -- ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - -Note: BinAuditLogger configurations can be tuned using cassandra.yaml properties as well. - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -#### NodeTool command to enable AuditLog -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - - nodetool enableauditlog - -**Options:** - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -#### NodeTool command to disable AuditLog - -``disableauditlog``: Disables AuditLog. - - - nodetool disableuditlog - - - - - - - -#### NodeTool command to reload AuditLog filters - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - - nodetool enableauditlog --loggername --included-keyspaces - ------ - -## Conclusion - -Now that Apache Cassandra ships with audit logging out of the box, users -can easily capture data change events to a persistent record indicating -what happened, when it happened, and where the event originated. This -type of information remains critical to modern enterprises operating in -a diverse regulatory environment. While audit logging represents one of -many steps forward in the 4.0 release, we believe that it will uniquely -enable enterprises to use the database in ways they could not -previously. diff --git a/src/_posts/2018-12-03-introducing-transient-replication.markdown b/src/_posts/2018-12-03-introducing-transient-replication.markdown deleted file mode 100644 index ec2942f53..000000000 --- a/src/_posts/2018-12-03-introducing-transient-replication.markdown +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: post -title: "Introducing Transient Replication" -date: 2018-12-03 01:00:00 -0700 -author: The Apache Cassandra Community -categories: blog ---- - -Transient Replication is a new experimental feature soon to be available in 4.0. When enabled, it allows for the creation of keyspaces where replication factor can be specified as a number of copies (full replicas) and temporary copies (transient replicas). Transient replicas retain the data they replicate only long enough for it to be propagated to full replicas, via incremental repair, at which point the data is deleted. Writing to transient replicas can be avoided almost entirely if monotonic reads are not required because it is possible to achieve a quorum of acknowledged writes without them. - -This results in a savings in disk space, CPU, and IO. By deleting data as soon as it is no longer needed, transient replicas require only a fraction of the disk space of a full replica. By not having to store the data indefinitely, the CPU and IO required for compaction is reduced, and read queries are faster as they have less data to process. - - So what are the benefits of not actually keeping a full copy of the data? Well, for some installations and use cases, transient replicas can be almost free if [monotonic reads](https://en.wikipedia.org/wiki/Consistency_model#Monotonic_Read_Consistency) are disabled. In future releases where monotonic reads are supported with Transient Replication, enabling monotonic reads would reduce the savings in CPU and IO, but even then they should still be significant. - -Transient Replication is designed to be transparent to applications: - -* Consistency levels continue to produce the same results for queries. -* The number of replicas that can be lost before data loss occurs is unchanged. -* The number of replicas that can be unavailable before some queries start to timeout or return unavailable is unchanged (with the exception of ONE). - -With Transient Replication, you can go from 3 replicas to 5 replicas, two of which are transient, without adding any hardware. - -If you are running an active-passive 2 DC setup with 3 replicas in each DC, you can make one replica in each DC transient and still have four full copies of the data in total. - -## Feature support - -Transient Replication is not intended to fully replace Cassandra's existing approach to replication. There are features that currently don't work with transiently replicated keyspaces and features that are unlikely ever to work with them. - -You can have keyspaces with and without Transient Replication enabled in the same cluster, so it is possible to use Transient Replication for just the use cases that are a good fit for the currently available functionality. - -### Currently unsupported but coming: - -* Monotonic reads -* Batch log -* LWT -* Counters - -### Will never be supported: - -* Secondary indexes -* Materialized views - -## How Transient Replication works - -### Overview - -Transient replication extends Cassandra's existing consistent hashing algorithm to designate some replicas of a point or range on the consistent hash ring as transient and some as full. The following image depicts a consistent hash ring with three replicas **A**, **B**, and **C**. The replicas are located at tokens 5, 10, 15 respectively. A key ***k*** hashes to token 3 on the ring. - -![A consistent hash ring without Transient Replication](/img/blog-post-introducing-transient-replication/diagram-hash-ring.gif "A consistent hash ring without Rransient Replication") - -Replicas are selected by walking the ring clockwise starting at the point on the ring the key hashes to. At RF=3, the replicas of key ***k ***are **A**, **B**, **C**. -With Transient Replication, the last N replicas (where N is the configured number of transient replicas) found while walking the ring are designated as transient. - -There are no nodes designated as transient replicas or full replicas. All nodes will fully replicate some ranges on the ring and transiently replicate others. - -The following image depicts a consistent hash ring at RF=3/1 (three replicas, one of which is transient). The replicas of ***k*** are still **A**, **B**, and **C**, but **C** is now transiently replicating ***k***. - -![A consistent hash ring with Transient Replication](/img/blog-post-introducing-transient-replication/diagram-hash-ring-with-transient-replica.gif "A consistent hash ring with Transient Replication") - -Normally all replicas of a range receive all writes for that range, as depicted in the following image. - -![Normal write behavior](/img/blog-post-introducing-transient-replication/diagram-regular-write.gif "Normal write behavior") - -Transient replicas do not receive writes in the normal write path. - -![Transient write behavior](/img/blog-post-introducing-transient-replication/diagram-transient-write.gif "Transient write behavior") - -If sufficient full replicas are unavailable, transient replicas will receive writes. - -![Transient write with unavailable node](/img/blog-post-introducing-transient-replication/diagram-transient-write-down-node.gif "Transient write with unavailable node") - -This optimization, which is possible with Transient Replication, is called Cheap Quorums. This minimizes the amount of work that transient replicas have to do at write time, and reduces the amount of background compaction they will have to do. - -**Cheap Quorums and monotonic reads:** Cheap Quorums may end up being incompatible with an initial implementation of monotonic reads, and operators will be able to make a conscious trade off between performance and monotonic reads. - -### Rapid write protection - -In keyspaces utilizing Transient Replication, writes are sent to every full replica and enough transient replicas to meet the requested consistency level (to make up for unavailable full replicas). In addition, enough transient replicas are selected to reach a quorum in every datacenter, though unless the consistency level requires it, the write will be acknowledged without ensuring all have been delivered. - -Because not all replicas are sent the write, it's possible that insufficient replicas will respond, causing timeouts. To prevent this, we implement rapid write protection, similar to rapid read protection, that sends writes to additional replicas if sufficient acknowledgements to meet the consistency level are not received promptly. - -The following animation shows rapid write protection in action. - -![Animation of rapid write protection preventing a write timeout](/img/blog-post-introducing-transient-replication/diagram-rapid-write-protection.gif "Rapid write protection preventing a write timeout") - -Rapid write protection is configured similarly to rapid read protection using the table option `additional_write_policy`. The policy determines how long to wait for acknowledgements before sending additional mutations. The default is to wait for P99 of the observed latency. - -### Incremental repair - -Incremental repair is used to clean up transient data at transient replicas and propagate it to full replicas. - -When incremental repair occurs transient replicas stream out transient data, but don't receive any. Anti-compaction is used to separate transient and fully replicated data so that only fully replicated data is retained once incremental repair completes. - -The result of running an incremental repair is that all full replicas for a range are synchronized and can be used interchangeably to retrieve the repaired data set for a query. - -### Read path - -Reads must always include at least one full replica and can include as many replicas (transient or full) as necessary to achieve the desired consistency level. At least one full replica is required in order to provide the data not available at transient replicas, but it doesn't matter which full replica is picked because incremental repair synchronizes the repaired data set across full replicas. - - Reads at transient replicas are faster than reads at full replicas because reads at transient replicas are unlikely to return any results if monotonic reads are disabled, and they haven't been receiving writes. - -## Creating keyspaces with Transient Replication - -Transient Replication is supported by SimpleStrategy and NetworkTopologyStrategy. When specifying the replication factor, you can specify the number of transient replicas in addition to the total number of replicas (including transient replicas). The syntax for a replication factor of 3 replicas total with one of them being transient would be “3/1”. - -``` -ALTER KEYSPACE foo WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'DC1' : '3/1'}; -ALTER KEYSPACE foo WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : '3/1'}; -``` - -Monotonic reads are not supported with Transient Replication in 4.0, so any existing tables in the keyspace must have monotonic reads disabled by setting `read_repair = 'NONE'` - -Once the keyspace has been altered, you will need to run incremental repair and then nodetool cleanup to ensure transient data is cleaned up. - -## Operational matters - -Transient replication requires rolling incremental repair to be run regularly in order to move data from transient replicas to full replicas. By default transient replicas will receive 1% of writes for transiently replicated ranges due to rapid write protection. If a node is down for an extended period of time, its transient replicas will receive additional write load and that data should be cleaned up using incremental repair. Running incremental repair regularly will ensure that the size of each repair is small. - -It's also a good idea to run a small number of vnodes with transient replication so that when a node goes down the load is spread out over several other nodes that transiently replicate that range. Larges numbers of vnodes are known to be problematic, so it's best to start with a cluster that is already close to or at its maximum size so that a small number of vnodes will be sufficient. If you intend to grow the cluster in the future, you will need to be cognizant of how this will interact with the number of vnodes you select. - -While the odds of any data loss should multiple nodes be permanently lost remain the same with transient replication, the magnitude of potential data loss does not. With 3/1 transient replication the permanent loss of two nodes could result in the loss of the entirety of the repaired data set. If you are running a multi-DC setup with a high level of replication such as 2 DCs, with 3/1 replicas in each, then you will have 4 full copies total and the added risk of transient replication is minimal. - -## Experimental features - -Experimental features are a relatively new idea for Apache Cassandra. Although we recently voted to make materialized views an experimental feature retroactively, Transient Replication is the first experimental feature to be introduced as such. - -The goal of introducing experimental features is to allow for incremental development across multiple releases. In the case of Transient Replication, we can avoid a giant code drop that heavily modifies the code base, and the associated risks with incorporating a new feature that way. - -What it means for a feature to be experimental doesn't have a set definition, but for Transient Replication it's intended to set expectations. As of 4.0, Transient Replication's intended audience is expert operators of Cassandra with the ability to write the book on how to safely deploy Transient Replication, debug any issues that result, and if necessary contribute code back to address problems as they are discovered. - -It's expected that the feature set for Transient Replication will not change in minor updates to 4.0, but eventually it should be ready for use by a wider audience. - -## Next steps for Transient Replication - -If increasing availability or saving on capacity sounds good to you, then you can help make transient replication production-ready by testing it out or even deploying it. Experience and feedback from the community is one the of the things that will drive transient replication bug fixing and development. diff --git a/src/_posts/2019-04-09-benchmarking_streaming.markdown b/src/_posts/2019-04-09-benchmarking_streaming.markdown deleted file mode 100644 index 71b04a138..000000000 --- a/src/_posts/2019-04-09-benchmarking_streaming.markdown +++ /dev/null @@ -1,78 +0,0 @@ ---- -layout: post -title: "Even Higher Availability with 5x Faster Streaming in Cassandra 4.0" -date: 2019-04-09 01:00:00 -0700 -author: The Apache Cassandra Community -categories: blog ---- - -Streaming is a process where nodes of a cluster exchange data in the form of SSTables. Streaming can kick in during many situations such as bootstrap, repair, rebuild, range movement, cluster expansion, etc. In this post, we discuss the massive performance improvements made to the streaming process in Apache Cassandra 4.0. - -## High Availability -As we know Cassandra is a Highly Available, Eventually Consistent database. The way it maintains its legendary availability is by storing redundant copies of data in nodes known as replicas, usually running on commodity hardware. During normal operations, these replicas may end up having hardware issues causing them to fail. As a result, we need to replace them with new nodes on fresh hardware. - -As part of this replacement operation, the new Cassandra node streams data from the neighboring nodes that hold copies of the data belonging to this new node’s token range. Depending on the amount of data stored, this process can require substantial network bandwidth, taking some time to complete. The longer these types of operations take, the more we are exposing ourselves to loss of availability. Depending on your replication factor and consistency requirements, if another node fails during this replacement operation, ability will be impacted. - -## Increasing Availability -To minimize the failure window, we want to make these operations as fast as possible. The faster the new node completes streaming its data, the faster it can serve traffic, increasing the availability of the cluster. Towards this goal, Cassandra 4.0 saw the addition of [Zero Copy](https://en.wikipedia.org/wiki/Zero-copy) streaming. For more details on Cassandra's zero copy implementation, see this blog post and [CASSANDRA-14556](https://issues.apache.org/jira/browse/CASSANDRA-14556) for more information. - -## Talking Numbers -To quantify the results of these improvements, we, at Netflix, measured the performance impact of streaming in 4.0 vs 3.0, using our open source [NDBench](https://github.com/Netflix/ndbench) benchmarking tool with the CassJavaDriverGeneric plugin. Though we knew there would be improvements, we were still amazed with the overall results of a **five fold increase** in streaming performance. The test setup and operations are all detailed below. - -### Test Setup -In our test setup, we used the following configurations: -* 6-node clusters on i3.xl, i3.2xl, i3.4xl and i3.8xl EC2 instances, each on 3.0 and trunk (sha dd7ec5a2d6736b26d3c5f137388f2d0028df7a03). -* Table schema -
-CREATE TABLE testing.test (
-    key text,
-    column1 int,
-    value text,
-    PRIMARY KEY (key, column1)
-) WITH CLUSTERING ORDER BY (column1 ASC)
-    AND bloom_filter_fp_chance = 0.01
-    AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
-    AND comment = ''
-    AND compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy'}
-    AND compression = {'enabled': 'false'}
-    AND crc_check_chance = 1.0
-    AND dclocal_read_repair_chance = 0.1
-    AND default_time_to_live = 0
-    AND gc_grace_seconds = 864000
-    AND max_index_interval = 2048
-    AND memtable_flush_period_in_ms = 0
-    AND min_index_interval = 128
-    AND read_repair_chance = 0.0
-    AND speculative_retry = '99PERCENTILE';
-
- -* Data size per node: 500GB -* No. of tokens per node: 1 (no vnodes) - -To trigger the streaming process we used the following steps in each of the clusters: -* terminated a node -* add a new node as a replacement -* measure the time taken to complete streaming data by the new node replacing the terminated node - -For each cluster and version, we repeated this exercise multiple times to collect several samples. - -Below is the distribution of streaming times we found across the clusters -![Benchmark results](/img/blog-post-benchmarking-streaming/cassandra_streaming.png "Benchmark results") - -### Interpreting the Results -Based on the graph above, there are many conclusions one can draw from it. Some of them are -* 3.0 streaming times are inconsistent and show high degree of variability (fat distributions across multiple samples) -* 3.0 streaming is highly affected by the instance type and generally looks generally CPU bound -* Zero Copy streaming is approximately 5x faster -* Zero Copy streaming time shows little variability in its performance (thin distributions across multiple samples) -* Zero Copy streaming performance is not CPU bound and remains consistent across instance types - -It is clear from the performance test results that Zero Copy Streaming has a huge performance benefit over the current streaming infrastructure in Cassandra. But what does it mean in the real world? The following key points are the main take aways. - -**MTTR (Mean Time to Recovery):** MTTR is a KPI (Key Performance Indicator) that is used to measure how quickly a system recovers from a failure. Zero Copy Streaming has a very direct impact here with a **five fold improvement** on performance. - -**Costs:** Zero Copy Streaming is ~5x faster. This translates directly into cost for some organizations primarily as a result of reducing the need to maintain spare server or cloud capacity. In other situations where you’re migrating data to larger instance types or moving AZs or DCs, this means that instances that are sending data can be turned off sooner saving costs. An added cost benefit is that now you don’t have to over provision the instance. You get a similar streaming performance whether you use a i3.xl or an i3.8xl provided the bandwidth is available to the instance. - -**Risk Reduction:** There is a great reduction in the risk due to Zero Copy Streaming as well. Since a Cluster’s recovery mainly depends on the streaming speed, Cassandra clusters with failed nodes will be able to recover much more quickly (5x faster). This means the window of vulnerability is reduced significantly, in some situations down to few minutes. - -Finally, a benefit that we generally don’t talk about is the environmental benefit of this change. Zero Copy Streaming enables us to move data very quickly through the cluster. It objectively reduces the number and sizes of instances that are used to build Cassandra cluster. As a result not only does it reduce Cassandra’s TCO (Total Cost of Ownership), it also helps the environment by consuming fewer resources! diff --git a/src/_posts/2020-07-20-apache-cassandra-4-0-beta1.markdown b/src/_posts/2020-07-20-apache-cassandra-4-0-beta1.markdown deleted file mode 100644 index b62fb87e2..000000000 --- a/src/_posts/2020-07-20-apache-cassandra-4-0-beta1.markdown +++ /dev/null @@ -1,72 +0,0 @@ ---- -layout: post -title: "Introducing Apache Cassandra 4.0 Beta: Battle Tested From Day One" -date: 2020-07-20 23:00:00 +0200 -author: The Apache Cassandra Community -categories: blog ---- - - -This is the most **stable** Apache Cassandra in history; you should start using Apache Cassandra 4.0 Beta today in your test and QA environments, head to the [downloads site](https://cassandra.apache.org/download/) to get your hands on it. The Cassandra community is on a mission to deliver a 4.0 GA release that is ready to be deployed to production. You can guarantee this holds true by running your application workloads against the Beta release and contributing to the community's validation effort to get Cassandra 4.0 to GA. - -With over 1,000 bug fixes, improvements and new features and the project's wholehearted [focus on quality](https://cassandra.apache.org/blog/2018/08/21/testing_apache_cassandra.html) with [replay, fuzz, property-based, fault-injection](https://cassandra.apache.org/blog/2018/10/17/finding_bugs_with_property_based_testing.html), and performance tests on clusters as large as 1,000 nodes and with hundreds of real world use cases and schemas tested, Cassandra 4.0 redefines what users should expect from any open or closed source database. With software, hardware, and QA testing donations from the likes of Instaclustr, iland, Amazon, and Datastax, this release has seen an unprecedented cross-industry collaboration towards releasing a battle-tested database with enterprise security features and an understanding of what it takes to deliver scale in the cloud. - -There will be no new features or breaking API changes in future Beta or GA builds. You can expect the time you put into the beta to translate into transitioning your production workloads to 4.0 in the near future. - -Quality in distributed infrastructure software takes time and this release is no exception. Open source projects are only as strong as the community of people that build and use them, so your feedback is a critical part of making this the best release in project history; share your thoughts on the [user or dev mailing lists](https://cassandra.apache.org/community/) or in the [#cassandra ASF slack channel](https://cassandra.apache.org/community/). - - -#### **Redefining the elasticity you should expect from your distributed systems with Zero Copy Streaming** - -**5x faster scaling operations** - -Cassandra streams data between nodes during scaling operations such as adding a new node or datacenter during peak traffic times. Thanks to the new Zero Copy Streaming functionality in 4.0, this critical operation is now up to [5x faster](https://cassandra.apache.org/blog/2019/04/09/benchmarking_streaming.html) without vnodes compared to previous versions, which means a more elastic architecture particularly in cloud and Kubernetes environments. - -Globally distributed systems have unique consistency caveats and Cassandra keeps the data replicas in sync through a process called repair. Many of the fundamentals of the algorithm for incremental repair were rewritten to harden and [optimize incremental repair](https://thelastpickle.com/blog/2018/09/10/incremental-repair-improvements-in-cassandra-4.html) for a faster and less resource intensive operation to maintain consistency across data replicas. - - -#### **Giving you visibility and control over what's happening in your cluster with real time Audit Logging and Traffic Replay** - -**Enterprise-grade security & observability** - -To ensure regulatory and security compliance with SOX, PCI or GDPR, it's critical to understand who is accessing data and when they are accessing it. Cassandra 4.0 delivers a long awaited [audit logging feature](https://cassandra.apache.org/blog/2018/10/29/audit_logging_cassandra.html) for operators to track the DML, DDL, and DCL activity with minimal impact to normal workload performance. Built on the same underlying implementation, there is also a new [fqltool](https://cassandra.apache.org/doc/latest/new/fqllogging.html) that allows the capture and replay of production workloads for analysis. - -There are [new controls](https://thelastpickle.com/blog/2018/05/08/cassandra-4.0-datacentre-security-improvements.html) to enable use cases that require data access on a per data center basis. For example, if you have a data center in the United States and a data center in Europe, you can now configure a Cassandra role to only have access to a single data center using the new CassandraNetworkAuthorizer. - -For years, the primary way to observe Cassandra clusters has been through JMX and open source tools such as Instaclustr's [Cassandra Exporter](https://github.com/instaclustr/cassandra-exporter) and DataStax's [Metrics Collector](https://github.com/datastax/metric-collector-for-apache-cassandra). In this most recent version of Cassandra you can selectively expose system metrics or configuration settings via [Virtual Tables](https://thelastpickle.com/blog/2019/03/08/virtual-tables-in-cassandra-4_0.html) that are consumed like any other Cassandra table. This delivers flexibility for operators to ensure that they have the signals in place to keep their deployments healthy. - - -#### **Looking to the future with Java 11 support and ZGC** - -One of the most exciting features of Java 11 is the new [Z Garbage Collector (ZGC)](https://thelastpickle.com/blog/2018/08/16/java11.html) that aims to reduce GC pause times to a max of a few milliseconds with no latency degradation as heap sizes increase. This feature is still experimental and thorough testing should be performed before deploying to production. These improvements significantly improve the node availability profiles from garbage collection on a cluster which is why this feature has been included as experimental in the Cassandra 4.0 release. - - -#### **Part of a vibrant and healthy ecosystem** - -The third-party ecosystem has their eyes on this release and a number of utilities have already added support for Cassandra 4.0. These include the client driver libraries, Spring Boot and Spring Data, Quarkus, the DataStax Kafka Connector and Bulk Loader, The Last Pickle's Cassandra Reaper tool for managing repairs, Medusa for handling backup and restore, the Spark Cassandra Connector, The Definitive Guide for Apache Cassandra, and the list goes on. - -**Get started today** - -There's no doubt that open source drives innovation and the Cassandra 4.0 Beta exemplifies the value in a community of contributors that run Cassandra in some of the largest deployments in the world. - -To put it in perspective, if you use a website or a smartphone today, you're probably touching a Cassandra-backed system. - -To download the Beta, head to the [Apache Cassandra downloads site](https://cassandra.apache.org/download/). - -**Resources:** - -Apache Cassandra Blog: [Even Higher Availability with 5x Faster Streaming in Cassandra 4.0](https://cassandra.apache.org/blog/2019/04/09/benchmarking_streaming.html) - -The Last Pickle Blog: [Incremental Repair Improvements in Cassandra 4](https://thelastpickle.com/blog/2018/09/10/incremental-repair-improvements-in-cassandra-4.html) - -Apache Cassandra Blog: [Audit Logging in Apache Cassandra 4.0](https://cassandra.apache.org/blog/2018/10/29/audit_logging_cassandra.html) - -The Last Pickle Blog: [Cassandra 4.0 Data Center Security Enhancements](https://thelastpickle.com/blog/2018/05/08/cassandra-4.0-datacentre-security-improvements.html) - -The Last Pickle Blog: [Virtual tables are coming in Cassandra 4.0](https://thelastpickle.com/blog/2019/03/08/virtual-tables-in-cassandra-4_0.html) - -The Last Pickle Blog: [Java 11 Support in Apache Cassandra 4.0](https://thelastpickle.com/blog/2018/08/16/java11.html) - - - -![Apache Cassandra Infographic](/img/blog-post-apache-cassandra-4-0-beta1/apache-cassandra-infographic-final.jpg "Apache Cassandra Infographic") diff --git a/src/_sass/_code.scss b/src/_sass/_code.scss deleted file mode 100644 index 7b441eb10..000000000 --- a/src/_sass/_code.scss +++ /dev/null @@ -1,87 +0,0 @@ -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ - - code { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ - white-space: pre; - } -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} diff --git a/src/_sass/_colors.scss b/src/_sass/_colors.scss deleted file mode 100644 index fad887c75..000000000 --- a/src/_sass/_colors.scss +++ /dev/null @@ -1,12 +0,0 @@ -$green: #90bd50; -$dark-green: #557826; -$darkish-green: #80ac40; -$light-green: #a1cb65; - -$white: #f5f5f5; - -$gray: #404141; -$dark-gray: #2f3030; -$light-gray: #666; - -$yellow: #f5d35f; diff --git a/src/_sass/_pygments-light.sass b/src/_sass/_pygments-light.sass deleted file mode 100644 index 94ac46c8f..000000000 --- a/src/_sass/_pygments-light.sass +++ /dev/null @@ -1,146 +0,0 @@ -.hll - background-color: #ffffcc - margin: 0 -12px - padding: 0 12px - display: block -.c - color: #999988 - font-style: italic -.err - color: #a61717 - background-color: #e3d2d2 -.k - font-weight: bold -.o - font-weight: bold -.cm - color: #999988 - font-style: italic -.cp - color: #999999 - font-weight: bold -.c1 - color: #999988 - font-style: italic -.cs - color: #999999 - font-weight: bold - font-style: italic -.gd - color: #000000 - background-color: #ffdddd -.gd .x - color: #000000 - background-color: #ffaaaa -.ge - font-style: italic -.gr - color: #aa0000 -.gh - color: #999999 -.gi - color: #000000 - background-color: #ddffdd -.gi .x - color: #000000 - background-color: #aaffaa -.go - color: #888888 -.gp - color: #555555 -.gs - font-weight: bold -.gu - color: #800080 - font-weight: bold -.gt - color: #aa0000 -.kc - font-weight: bold -.kd - font-weight: bold -.kn - font-weight: bold -.kp - font-weight: bold -.kr - font-weight: bold -.kt - color: #445588 - font-weight: bold -.m - color: #009999 -.s - color: #dd1144 -.n - color: #333333 -.na - color: teal -.nb - color: #0086b3 -.nc - color: #445588 - font-weight: bold -.no - color: teal -.ni - color: purple -.ne - color: #990000 - font-weight: bold -.nf - color: #990000 - font-weight: bold -.nn - color: #555555 -.nt - color: navy -.nv - color: teal -.ow - font-weight: bold -.w - color: #bbbbbb -.mf - color: #009999 -.mh - color: #009999 -.mi - color: #009999 -.mo - color: #009999 -.sb - color: #dd1144 -.sc - color: #dd1144 -.sd - color: #dd1144 -.s2 - color: #dd1144 -.se - color: #dd1144 -.sh - color: #dd1144 -.si - color: #dd1144 -.sx - color: #dd1144 -.sr - color: #009926 -.s1 - color: #dd1144 -.ss - color: #990073 -.bp - color: #999999 -.vc - color: teal -.vg - color: teal -.vi - color: teal -.il - color: #009999 -.gc - color: #999 - background-color: #EAF2F5 diff --git a/src/_sass/_site-footer.scss b/src/_sass/_site-footer.scss deleted file mode 100644 index d27d12ac3..000000000 --- a/src/_sass/_site-footer.scss +++ /dev/null @@ -1,39 +0,0 @@ -footer { - margin-top: 38px; - padding-top: 10px; - padding-bottom: 20px; -} - -.social-blk { - float: right; -} - -.nopadding { - padding: 0 !important; - margin: 0 !important; -} - -.splash { - display:block; -} - -.author_contact { - display: inline-block; -} - -.author_gravatar { - display: inline; - padding: 0 20px 20px 5px; -} - -.share { - display: block; -} - -@media (max-width: 767px) { - .social-blk { - float: left; - width: 100%; - margin-bottom: 32px; - } -} diff --git a/src/_sass/_site-header.scss b/src/_sass/_site-header.scss deleted file mode 100644 index 2710884fb..000000000 --- a/src/_sass/_site-header.scss +++ /dev/null @@ -1,83 +0,0 @@ -/* Navigation bars. */ -.topnav { - - background-color: $dark-gray; - - .breadcrumb { - background-color: $dark-gray; - padding: 8px 0px; - margin: auto; - color: $white; - - .asf-logo { - margin-right: 4px; - margin-top: -4px; - margin-bottom: -8px; - height: 30px; - } - } - - a { - color: $white; - &:hover, &:focus { - color: $yellow; - text-decoration: none; - } - } - - .navbar { - background-color: $green; - min-height:120px; - z-index: 0; - margin-bottom: 0px; - - .navbar-brand { - min-height:120px; - - img { float: left; } - } - - .nav { - padding: 30px 0 0 0; - color: #fff; - - li a { - text-align: center; - font-size: 24px; - padding: 5px 0 5px 5px; - margin: 0 10px; - color: $white; - - &:hover { - color: $yellow; - background: none; - } - } - } - - .navbar-header { - height:120px; - } - - .navbar-toggle { - position: relative; - top: 50%; - transform: translateY(-50%); - margin-top:0; - - .icon-bar { background-color:#FFF; } - &:focus .icon-bar { background-color:#000; } - } - - .navbar-collapse { - border-color: rgba(0, 0, 0, 0); - box-shadow: none; - } - } -} - -@media (min-width: 768px) and (max-width: 1024px) { - .navbar .nav li a { - padding: 8px 0 15px 8px; - } -} diff --git a/src/_sass/_site-landing.scss b/src/_sass/_site-landing.scss deleted file mode 100644 index 4d0d82c55..000000000 --- a/src/_sass/_site-landing.scss +++ /dev/null @@ -1,115 +0,0 @@ -/* Jumbotron part. */ -.jumbotron { - background: none; - max-width: 748px; - margin: 30px auto 40px auto; - padding: 0px; - - h1 { - font-size: 40px; - font-family: HelveticaNeue-Light,Helvetica,Arial,sans-serif; - line-height: 1.2; - color: $white; - text-align: center; - padding: 0; - margin-bottom: 28px; - } - - .lead { - line-height: 1.25; - color: white; - font-size: 15px; - font-weight: 100; - letter-spacing: 0.8px; - margin-bottom: 48px; - } - - p { - text-align: center; - margin-bottom: 0px; - } - - .btn.download { - border-radius: 8px; - font-size: 16px; - padding: 11px 28px; - margin-bottom: 4px; - color: $dark-green; - - &:hover, &:focus { color: $yellow; } - } - - a.changelog { - font-size: 15px; - display: inline-block; - color: $dark-green; - - &:hover, &:focus { - color: $yellow; - text-decoration: none; - } - } -} - -/* Features list. */ -.feature-list-group { - - padding-top: 20px; - padding-bottom: 61px; - - .header { - text-align: center; - - h3 { - font-size: 30px; - font-weight: 200; - text-align: center; - margin-bottom: 26px; - } - - .lead { - color: #777777; - font-size: 14px; - font-weight: normal; - line-height: 1.42857143; - margin-bottom: 55px; - } - } - - .feature-list { - margin-bottom: 25px; - } - - .feature-item h4 { - color: $green; - font-family: HelveticaNeue-Medium,Helvetica,Arial,sans-serif; - font-size: 16px; - text-transform: uppercase; - margin-bottom: 16px; - } - - .feature-item p { - font-family: HelveticaNeue-Light,Helvetica,Arial,sans-serif; - font-size: 15px; - } -} - - -@media (max-width: 767px) { - - .jumbotron > .lead { - font-weight: 200; - } - - .feature-list-group { - padding: 36px 9px 6px 9px; - } - - .feature-list { - margin-bottom: 0px; - } - - .feature-item { - margin-bottom: 48px; - } -} diff --git a/src/_sass/_sphinx-basic.scss b/src/_sass/_sphinx-basic.scss deleted file mode 100644 index 3f29bd7c2..000000000 --- a/src/_sass/_sphinx-basic.scss +++ /dev/null @@ -1,604 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.field-list ul { - padding-left: 1em; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.field-list td, table.field-list th { - border: 0 !important; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} diff --git a/src/_templates/2018-06-22-blog_post_template.markdown b/src/_templates/2018-06-22-blog_post_template.markdown deleted file mode 100644 index f5f847303..000000000 --- a/src/_templates/2018-06-22-blog_post_template.markdown +++ /dev/null @@ -1,33 +0,0 @@ ---- -layout: post -title: "Lorem ipsum dolor sit amet: consectetur adipiscing elit" -date: 2018-06-05 20:00:00 -0700 -author: the Apache Cassandra Community -categories: blog ---- - -Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla vel hendrerit nisi, pellentesque placerat magna. Sed et placerat erat. Duis nec ex a dui ullamcorper hendrerit eget id dui. Pellentesque eget varius tortor. Nullam non nisi interdum, consectetur neque nec, interdum augue. Fusce vitae facilisis urna. Donec iaculis sapien sed gravida laoreet. Donec convallis enim non lacus blandit commodo. Nullam viverra risus et lorem aliquam, ac tincidunt justo finibus. Morbi a felis ac justo fringilla porttitor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Mauris faucibus est ante, eu luctus tortor egestas in. Aliquam erat volutpat. - -Ut imperdiet euismod sollicitudin: - -#### Nunc at semper tellus -##### Vel pharetra lectus - -Vestibulum ut neque accumsan, placerat felis fermentum, tempor lacus. Suspendisse hendrerit lacus a risus commodo, quis pulvinar ipsum dictum. Integer auctor, tortor non sollicitudin iaculis, justo lacus dignissim tellus, in posuere massa metus vitae turpis. Maecenas vel turpis quis nulla maximus pretium ornare ultricies enim. Nam tristique velit sed lorem malesuada, ut rutrum libero maximus. Etiam interdum erat id nisl facilisis imperdiet ac at orci. Quisque quis porttitor neque. Pellentesque elit neque, viverra vel congue in, ullamcorper in felis. Nunc dignissim tellus et felis porta pellentesque. - - ---- - -#### Aliquam orci orci, sodales eget justo sit amet -##### Nunc rhoncus massa - -Suspendisse blandit nunc vitae commodo pretium. Interdum et malesuada fames ac ante ipsum primis in faucibus. Pellentesque pharetra, nisi convallis blandit molestie, quam enim sodales nisl, ac suscipit diam lectus at justo. Curabitur at dignissim enim, eu pellentesque nisi. Nulla facilisi. Cras eget blandit felis, non tincidunt risus. Ut eleifend nisi in felis lacinia, quis volutpat odio sagittis. - -Vivamus tincidunt efficitur quam ac tempor. Proin varius vel ex tincidunt placerat. - ---- - -#### Mauris dapibus mauris eget tortor interdum -##### Et congue lacus rutrum -Suspendisse ac dignissim diam. Donec rutrum nisi nec vestibulum congue. Ut placerat sodales turpis, at tincidunt tortor scelerisque consequat. Nullam ac neque rutrum, malesuada ex vel, pharetra turpis. Donec nisl justo, viverra at blandit a, dignissim in est. Quisque et varius lacus. Suspendisse placerat laoreet felis id elementum. Nunc ut magna laoreet odio faucibus faucibus sit amet ac dolor. Nam ac tortor tellus. Mauris scelerisque magna urna, nec aliquam est pharetra eu. Curabitur vitae mauris dictum, tempor nisl et, vestibulum velit. - diff --git a/src/apachecon_cfp.md b/src/apachecon_cfp.md deleted file mode 100644 index 086854735..000000000 --- a/src/apachecon_cfp.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -layout: page -permalink: /events/2019-apache-cassandra-summit/ -title: Announcing the 2019 Apache Cassandra Summit -is_homepage: false -is_sphinx_doc: false ---- - -Announcing the 2019 Apache Cassandra Summit ---------- - -Join the Apache Cassandra community for the 2019 Apache Cassandra Summit, hosted with ApacheCon in Las Vegas, NV (Sept 9 - 13). - ---- - - -### Apache Cassandra at ApacheCon - -![ApacheCon 2019](/img/apachecon-2019.jpg){:style="float: right; width: 400px"} - -For more information about other events at ApacheCon, see [ApacheCon 2019](https://apachecon.com/acna19/index.html). - -  - -#### Day One: Next Generation Cassandra Conference (NGCC) -NGCC (Next Generation Cassandra Conference), a one-day event for Apache Cassandra contributors and large system operators to meet, discuss, and plan future development in the project. NGCC will take place on **Tuesday, September 10th**. - -NGCC is an advanced event targeted toward Apache Cassandra contributors and large system / platform operators. Content will focus on Cassandra internals and is geared toward those with detailed knowledge of the codebase and architecture. All are welcome to attend. - -  - -#### Day Two: Apache Cassandra Summit -The Apache Cassandra Summit, a one-day event for Apache Cassandra users to meet, network, and learn about what's new and what's coming in Cassandra. The Apache Cassandra Summit will be held on **Wednesday, Sept 11**. - -The Apache Cassandra Summit is targeted toward a wider audience. Topics should be interesting and accessible to those whose first introduction to Cassandra is at this event, and those who have been active in the community for many years. - ---- - -### Call for Presentations - -We're excited to announce the Call for Presentations is now open for both, closing Monday, May 13 at 2pm BST (UTC+1). - -  - -#### Apache Cassandra Summit CFP (40-minute presentation, 5-minute Q&A) – - -**Example proposals might include:** -- Lessons learned operating Apache Cassandra at scale. -- Customizations and ways members of the community have extended Apache Cassandra to make it a great fit for their use case. -- Stability improvements, performance enhancements, and new features in an upcoming Cassandra release. -- Something we haven't thought about that a general audience would be interested to hear. - -  - -#### Next Generation Cassandra Conference CFP (30-minute presentation, 15m breakout) – - -**Example proposals might include:** -- Presentations from contributors and large operators covering pain points and proposals to address them -- Planned or proposed improvements in specific areas of the Apache Cassandra codebase -- Planned or proposed improvements we can make to strengthen and empower the Apache Cassandra community. -- Something we haven't thought about that advanced operators / contributors would be interested to hear. - -  - -[Click here](https://asf.jamhosted.net/cfp.html) to submit a proposal for the Apache Cassandra Summit or NGCC. - -When submitting, please ensure you select "Cassandra" as the category. For NGCC submissions, please include "NGCC Proposal" in the "Optional Notes" field. - diff --git a/src/blog/index.html b/src/blog/index.html deleted file mode 100644 index 5cfd801d2..000000000 --- a/src/blog/index.html +++ /dev/null @@ -1,11 +0,0 @@ ---- -layout: blog -title: "Blog" -is_homepage: false -pagination: - enabled: true ---- - -

Apache Cassandra Blog

-

Have something to share with the community? Let us know on the mailing list!

- diff --git a/src/community.md b/src/community.md deleted file mode 100644 index 3c98c75b2..000000000 --- a/src/community.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -layout: page -permalink: /community/ -title: Community -is_homepage: false -is_sphinx_doc: false ---- - -Community ---------- - -### Getting and keeping in touch - -#### Mailing lists - -Discussion and questions on Cassandra's usage and development happens mainly on the following mailing lists: - -* [Users](http://www.mail-archive.com/user@cassandra.apache.org/): General mailing list for user questions and discussions. This is also where new releases are announced - ([subscribe](mailto:user-subscribe@cassandra.apache.org) | [unsubscribe](mailto:user-unsubscribe@cassandra.apache.org) | [Archives](https://lists.apache.org/list.html?user@cassandra.apache.org)). -* [Developers](http://www.mail-archive.com/dev@cassandra.apache.org/): Questions and discussions related to Cassandra development - ([subscribe](mailto:dev-subscribe@cassandra.apache.org) | [unsubscribe](mailto:dev-unsubscribe@cassandra.apache.org) | [Archives](https://lists.apache.org/list.html?dev@cassandra.apache.org)). -* [Commits](http://www.mail-archive.com/commits@cassandra.apache.org/): Notification on commits done to the source - repository and on [JIRA](https://issues.apache.org/jira/browse/CASSANDRA) updates. This is a fairly noisy mailing list - mostly useful for Cassandra developers and those who would like to keep close tabs on Cassandra's development - ([subscribe](mailto:commits-subscribe@cassandra.apache.org) | [unsubscribe](mailto:commits-unsubscribe@cassandra.apache.org) | [Archives](https://lists.apache.org/list.html?commits@cassandra.apache.org)). - -#### Slack - -We have recently moved to the ASF Slack organization for all chat. Please [sign up for an account](https://s.apache.org/slack-invite) to participate. - -* `#cassandra` - for user questions and general discussions -* `#cassandra-dev` - strictly for questions or discussions related to Cassandra development -* `#cassandra-builds` - results of automated test builds -* `#cassandra-builds-patches` - results of patch test builds - -### Stack Overflow - -You can also check the [Q&A about using Cassandra](http://stackoverflow.com/questions/tagged/cassandra) on Stack -Overflow. - -### Books and publications - -* [Cassandra: The Definitive Guide, 3rd Edition](http://shop.oreilly.com/product/0636920299837.do), by Jeff Carpenter and Eben Hewitt. Updated for Cassandra 4.0 -* [Mastering Apache Cassandra, 2nd Edition](https://www.amazon.com/Mastering-Apache-Cassandra-Nishant-Neeraj/dp/1784392618/), by Nishant Neeraj -* [Learning Apache Cassandra - Manage Fault Tolerant and Scalable Real-Time Data](https://www.amazon.com/Learning-Apache-Cassandra-Tolerant-Real-Time/dp/1783989203/), by Mat Brown -* [Cassandra: a decentralized structured storage system](https://dl.acm.org/citation.cfm?id=1773922), by Avinash Lakshman and Prashant Malik - -### Third-party projects - -There are a number of third-party Cassandra projects that could be useful. Check out this [listing](https://cassandra.apache.org/third-party/). - -### Reporting bugs - -If you encounter a problem with Cassandra, the first places to ask for help are the [user mailing list](#mailing) -and the `#cassandra` [Slack channel](https://s.apache.org/slack-invite). - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the [Apache Cassandra JIRA tracking system](https://issues.apache.org/jira/browse/CASSANDRA). Please provide as much -detail as you can on your problem. Don't forget to indicate which version of Cassandra you are running and on which environment. diff --git a/src/css/sphinx.scss b/src/css/sphinx.scss deleted file mode 100644 index 85648548a..000000000 --- a/src/css/sphinx.scss +++ /dev/null @@ -1,254 +0,0 @@ ---- ---- - -/* - * Additional styling for sphinc doc pages - */ - -@import 'colors'; -@import 'pygments-light'; -@import 'sphinx-basic'; - -$menu-main-background: $green; -$menu-main-color: $white; -$menu-main-hover-color: $yellow; - -$menu-selected-background: #e2e2e2; -$menu-selected-color: $gray; -$menu-selected-hover-color: $dark-green; - -/* Documentation menu navigation */ - -.doc-navigation { - - button.navbar-toggle { - border-color: #000; - } - - .icon-bar { - background-color: #000; - } -} - -.doc-navigation.fixed-navigation { - margin-top: 10px; -} - -@media (max-width: 991px) { - .doc-navigation { - .collapse { - display: none!important; - } - - .navbar-toggle { - display: block; - } - - .navbar-header { - float: none; - } - - .collapsing { - overflow: hidden!important; - } - - .navbar-collapse.collapse.in { - display: block!important; - } - } -} - -@media (min-width: 991px) { - - .doc-navigation { - - margin-top: 40px; - background-color: $menu-main-background; - max-width: 350px; - z-index: 200; - overflow-x: hidden; - overflow-y: hidden; - padding-top: 10px; - padding-bottom: 10px; - border-radius: 8px; - - .nav { - padding-top: 10px; - padding-bottom: 10px; - } - - .navbar-collapse { - padding: 0; - max-height: none; - } - - #doc-search-form { - text-align: center; - margin-bottom: 20px; - - input { - border-radius: 30px; - width: 100%; - } - } - - ul { - padding-left: 0; - float: none; - } - - ul:not { - display: block; - } - - li { - list-style-type: none; - } - - li.toctree-l1 { - padding-top: 2px; - padding-bottom: 2px; - - a { - color: $menu-main-color; - padding-top: 2px; - padding-bottom: 2px; - margin: 0; - display: block; - padding-left: 10px; - padding-right: 10px; - - &:hover { - color: $menu-main-hover-color; - text-decoration: none; - } - } - } - - li.toctree-l1.current { - background-color: $menu-selected-background; - border-top: 1px solid $darkish-green; - border-bottom: 1px solid $darkish-green; - - > a { - color: $menu-selected-color; - - &:hover { - color: $menu-selected-hover-color; - text-decoration: none; - } - } - } - - li.toctree-l2 { - a { - color: $menu-selected-color; - margin-left: 10px; - font-size: 96%; - //border-left: 1px solid #99c65d; - &:hover { - color: $menu-selected-hover-color; - } - } - } - - li.toctree-l3 { - margin: 0; - - a { - padding-left: 30px; - color: $menu-selected-color; - font-size: 95%; - } - } - } -} - -ul.search li { - background-image: none; -} - -.doc-prev-next-links { - margin-top: 40px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - - border: 1px solid transparent; - border-radius: 4px; - box-shadow: 0 1px 1px rgba(0,0,0,.05); - -webkit-box-shadow: 0 1px 1px rgba(0,0,0,.05); - - margin: 20px 10px; - padding: 0; - - dt { - font-weight: bold; - } - - dl { - margin-bottom: 0; - } - - p { - padding: 15px; - } - - p.admonition-title { - padding: 10px 15px; - font-weight: bold; - border-bottom: 1px solid transparent; - } -} - -.admonition.admonition-todo { - border-color: #337ab7; - - .admonition-title { - color: #fff; - border-color: #337ab7; - background-color: #337ab7; - } -} - -.admonition.warning { - border-color: #ebccd1; - - .admonition-title { - color: #a94442; - border-color: #ebccd1; - background-color: #f2d2d2; - } -} - -.admonition.note, .admonition.hint { - border-color: #d6e9c6; - - .admonition-title { - color: #3c763d; - border-color: #d6e9c6; - background-color: #dff0d8; - } -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin: 20px 0px 30px 20px; - - td, th { - border:0; - } - - thead { - background-color: #ddd; - border-top: 1px solid #aaa; - border-bottom: 1px solid #aaa; - } - - tbody { - border-bottom: 1px solid #aaa; - } -} diff --git a/src/css/style.scss b/src/css/style.scss deleted file mode 100644 index a8ad56cba..000000000 --- a/src/css/style.scss +++ /dev/null @@ -1,85 +0,0 @@ ---- ---- - -/* - * Main website stylesheet - */ - -@import 'colors'; -@import 'site-header'; -@import 'site-landing'; -@import 'site-footer'; -@import 'code'; - - -img { - max-width:100%; -} - -body { - background-color: #fff; -} - -.doc-content { - margin-top: 40px; -} - -#wipwarning { - font-size: 14px; - border: 1px solid #ebccd1; - border-radius: 4px; - color: #a94442; - background-color: #f2dede; - padding: 10px 30px; - margin: 30px 20px; -} - - -// For the documentation landing page - -.biglink { - font-size: 120%; -} - -.linkdescr { - font-size: 90%; -} - -.doc-landing-table { - margin: 10px 30px 20px 30px; - width: 90%; - - td { - padding-top: 10px; - } - - .right-column { - padding-left: 100px; - } -} - -.doc-landing-metainfos { - margin: 10px 30px 0px 30px; - - p { - margin-bottom: 10px; - } -} - -ul.blog-post-listing { - list-style: none; - padding-left: 0; -} - -li.blog-post { - padding-bottom: 20px; -} - - -//RSS - -.subscribe-rss { - font-size: 2em; - line-height: 2em; - vertical-align: .1em; -} \ No newline at end of file diff --git a/src/doc/.htaccess b/src/doc/.htaccess deleted file mode 100644 index c744671d9..000000000 --- a/src/doc/.htaccess +++ /dev/null @@ -1,2 +0,0 @@ -Options +Indexes -IndexOptions +VersionSort diff --git a/src/doc/3.11 b/src/doc/3.11 deleted file mode 120000 index 82920a35c..000000000 --- a/src/doc/3.11 +++ /dev/null @@ -1 +0,0 @@ -3.11.7 \ No newline at end of file diff --git a/src/doc/3.11.3/.buildinfo b/src/doc/3.11.3/.buildinfo deleted file mode 100644 index 674cf1361..000000000 --- a/src/doc/3.11.3/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: e47c73ee84c65416f43d72ffa766e896 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/3.11.3/_sources/architecture/dynamo.rst.txt b/src/doc/3.11.3/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index a7dbb8750..000000000 --- a/src/doc/3.11.3/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,139 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and availability through *Consistency Levels*. -Essentially, an operation's consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this: - -- Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded - within a specified time window. -- Based on ``read_repair_chance`` and ``dclocal_read_repair_chance`` (part of a table's schema), read requests may be - randomly sent to all replicas in order to repair potentially inconsistent data. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels that are high enough to overlap, resulting in "strong" -consistency. This is typically expressed as ``W + R > RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/3.11.3/_sources/architecture/guarantees.rst.txt b/src/doc/3.11.3/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/3.11.3/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/3.11.3/_sources/architecture/index.rst.txt b/src/doc/3.11.3/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/3.11.3/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/3.11.3/_sources/architecture/overview.rst.txt b/src/doc/3.11.3/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/3.11.3/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/3.11.3/_sources/architecture/storage_engine.rst.txt b/src/doc/3.11.3/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index e4114e5af..000000000 --- a/src/doc/3.11.3/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -.. todo:: todo - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. diff --git a/src/doc/3.11.3/_sources/bugs.rst.txt b/src/doc/3.11.3/_sources/bugs.rst.txt deleted file mode 100644 index 240cfd495..000000000 --- a/src/doc/3.11.3/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs and Contributing -=============================== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``#cassandra`` :ref:`IRC channel `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/3.11.3/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/3.11.3/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index e685300f1..000000000 --- a/src/doc/3.11.3/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,1880 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -*Default Value:* KEYSPACE - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using. - -Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down gossip and Thrift and kill the JVM, so the node can be replaced. - -stop - shut down gossip and Thrift, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``thrift_prepared_statements_cache_size_mb`` --------------------------------------------- - -Maximum size of the Thrift prepared statement cache - -If you do not use Thrift at all, it is safe to leave this value at "auto". - -See description of 'prepared_statements_cache_size_mb' above for more information. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync`` ------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic" or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.) - - -*Default Value:* batch - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -*Default Value:* 2 - -``commitlog_sync`` ------------------- - -the other option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``start_rpc`` -------------- - -Whether to start the thrift rpc server. - -*Default Value:* false - -``rpc_address`` ---------------- - -The address or interface to bind the Thrift RPC service and native transport -server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``rpc_port`` ------------- - -port for Thrift to listen for clients on - -*Default Value:* 9160 - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``rpc_server_type`` -------------------- - -Cassandra provides two out-of-the-box options for the RPC Server: - -sync - One thread per thrift connection. For a very large number of clients, memory - will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size - per thread, and that will correspond to your use of virtual memory (but physical memory - may be limited depending on use of stack space). - -hsha - Stands for "half synchronous, half asynchronous." All thrift clients are handled - asynchronously using a small number of threads that does not vary with the amount - of thrift clients (and thus scales well to many clients). The rpc requests are still - synchronous (one thread per active request). If hsha is selected then it is essential - that rpc_max_threads is changed from the default value of unlimited. - -The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory. - -Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it. - -*Default Value:* sync - -``rpc_min_threads`` -------------------- -*This option is commented out by default.* - -Uncomment rpc_min|max_thread to set request pool size limits. - -Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all). - -The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently. - - -*Default Value:* 16 - -``rpc_max_threads`` -------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``rpc_send_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -uncomment to set socket buffer sizes on rpc connections - -``rpc_recv_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``thrift_framed_transport_size_in_mb`` --------------------------------------- - -Frame size for thrift (maximum message length). - -*Default Value:* 15 - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations - -*Default Value:* 10000 - -``slow_query_log_timeout_in_ms`` --------------------------------- - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero and read_repair_chance is < 1.0, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``request_scheduler`` ---------------------- - -request_scheduler -- Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below. - -*Default Value:* org.apache.cassandra.scheduler.NoScheduler - -``request_scheduler_options`` ------------------------------ -*This option is commented out by default.* - -Scheduler Options vary based on the type of scheduler - -NoScheduler - Has no options - -RoundRobin - throttle_limit - The throttle_limit is the number of in-flight - requests per client. Requests beyond - that limit are queued up until - running requests can complete. - The value of 80 here is twice the number of - concurrent_reads + concurrent_writes. - default_weight - default_weight is optional and allows for - overriding the default which is 1. - weights - Weights are optional and will default to 1 or the - overridden default_weight. The weight translates into how - many requests are handled during each turn of the - RoundRobin, based on the scheduler id. - - -*Default Value (complex option)*:: - - # throttle_limit: 80 - # default_weight: 5 - # weights: - # Keyspace1: 1 - # Keyspace2: 5 - -``request_scheduler_id`` ------------------------- -*This option is commented out by default.* -request_scheduler_id -- An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace. - -*Default Value:* keyspace - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack - -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client/server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* true - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_warn_threshold_in_ms`` ---------------------------- - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 diff --git a/src/doc/3.11.3/_sources/configuration/index.rst.txt b/src/doc/3.11.3/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/3.11.3/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/3.11.3/_sources/contactus.rst.txt b/src/doc/3.11.3/_sources/contactus.rst.txt deleted file mode 100644 index 8d0f5dd04..000000000 --- a/src/doc/3.11.3/_sources/contactus.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _irc-channels: - -IRC ---- - -To chat with developers or users in real-time, join our channels on `IRC freenode `__. The -following channels are available: - -- ``#cassandra`` - for user questions and general discussions. -- ``#cassandra-dev`` - strictly for questions or discussions related to Cassandra development. -- ``#cassandra-builds`` - results of automated test builds. - diff --git a/src/doc/3.11.3/_sources/cql/appendices.rst.txt b/src/doc/3.11.3/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/3.11.3/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/3.11.3/_sources/cql/changes.rst.txt b/src/doc/3.11.3/_sources/cql/changes.rst.txt deleted file mode 100644 index 1eee5369a..000000000 --- a/src/doc/3.11.3/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,204 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to -inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/3.11.3/_sources/cql/ddl.rst.txt b/src/doc/3.11.3/_sources/cql/ddl.rst.txt deleted file mode 100644 index 302777544..000000000 --- a/src/doc/3.11.3/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,649 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE Excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -- ``'SimpleStrategy'``: A simple strategy that defines a replication factor for the whole cluster. The only sub-options - supported is ``'replication_factor'`` to define that replication factor and is mandatory. -- ``'NetworkTopologyStrategy'``: A replication strategy that allows to set the replication factor independently for - each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is - the associated replication factor. - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records' - AND read_repair_chance = 1.0; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, c, d) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see `www.datastax.com/dev/blog/thrift-to-cql3 -`__ for more details) and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair_chance`` | *simple* | 0.1 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) for | -| | | | the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``dclocal_read_repair_chance`` | *simple* | 0 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) | -| | | | belonging to the same data center than the read | -| | | | coordinator for the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table' - AND read_repair_chance = 0.2; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/3.11.3/_sources/cql/definitions.rst.txt b/src/doc/3.11.3/_sources/cql/definitions.rst.txt deleted file mode 100644 index d4a5b59b9..000000000 --- a/src/doc/3.11.3/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,232 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/3.11.3/_sources/cql/dml.rst.txt b/src/doc/3.11.3/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/3.11.3/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/3.11.3/_sources/cql/functions.rst.txt b/src/doc/3.11.3/_sources/cql/functions.rst.txt deleted file mode 100644 index 47026cd94..000000000 --- a/src/doc/3.11.3/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,558 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Time conversion functions -````````````````````````` - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/3.11.3/_sources/cql/index.rst.txt b/src/doc/3.11.3/_sources/cql/index.rst.txt deleted file mode 100644 index 00d90e41e..000000000 --- a/src/doc/3.11.3/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do **not** refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL). - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/3.11.3/_sources/cql/indexes.rst.txt b/src/doc/3.11.3/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/3.11.3/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/3.11.3/_sources/cql/json.rst.txt b/src/doc/3.11.3/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/3.11.3/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/3.11.3/_sources/cql/mvs.rst.txt b/src/doc/3.11.3/_sources/cql/mvs.rst.txt deleted file mode 100644 index aabea10d8..000000000 --- a/src/doc/3.11.3/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,166 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. diff --git a/src/doc/3.11.3/_sources/cql/security.rst.txt b/src/doc/3.11.3/_sources/cql/security.rst.txt deleted file mode 100644 index 9efe27f2d..000000000 --- a/src/doc/3.11.3/_sources/cql/security.rst.txt +++ /dev/null @@ -1,502 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/3.11.3/_sources/cql/triggers.rst.txt b/src/doc/3.11.3/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/3.11.3/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/3.11.3/_sources/cql/types.rst.txt b/src/doc/3.11.3/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/3.11.3/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/3.11.3/_sources/data_modeling/index.rst.txt b/src/doc/3.11.3/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/3.11.3/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/3.11.3/_sources/development/code_style.rst.txt b/src/doc/3.11.3/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/3.11.3/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/3.11.3/_sources/development/how_to_commit.rst.txt b/src/doc/3.11.3/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index d956c72d8..000000000 --- a/src/doc/3.11.3/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``—atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/3.11.3/_sources/development/how_to_review.rst.txt b/src/doc/3.11.3/_sources/development/how_to_review.rst.txt deleted file mode 100644 index dc9774362..000000000 --- a/src/doc/3.11.3/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,71 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/3.11.3/_sources/development/ide.rst.txt b/src/doc/3.11.3/_sources/development/ide.rst.txt deleted file mode 100644 index 298649576..000000000 --- a/src/doc/3.11.3/_sources/development/ide.rst.txt +++ /dev/null @@ -1,161 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -.. note:: - - `Bleeding edge development snapshots `_ of Cassandra are available from Jenkins continuous integration. - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/3.11.3/_sources/development/index.rst.txt b/src/doc/3.11.3/_sources/development/index.rst.txt deleted file mode 100644 index aefc5999c..000000000 --- a/src/doc/3.11.3/_sources/development/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Development -********************* - -.. toctree:: - :maxdepth: 2 - - ide - testing - patches - code_style - how_to_review - how_to_commit diff --git a/src/doc/3.11.3/_sources/development/patches.rst.txt b/src/doc/3.11.3/_sources/development/patches.rst.txt deleted file mode 100644 index e3d968fab..000000000 --- a/src/doc/3.11.3/_sources/development/patches.rst.txt +++ /dev/null @@ -1,125 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue tagged with the `low hanging fruit label `_ in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our `community page `_. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -3.x Tick-tock (see below) -3.0 Bug fixes only -2.2 Bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -Tick-Tock Releases -"""""""""""""""""" - -New releases created as part of the `tick-tock release process `_ will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk. - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: ``patch by X; reviewed by Y for CASSANDRA-ZZZZZ`` - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/3.11.3/_sources/development/testing.rst.txt b/src/doc/3.11.3/_sources/development/testing.rst.txt deleted file mode 100644 index b8eea6b28..000000000 --- a/src/doc/3.11.3/_sources/development/testing.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -TODO: `CASSANDRA-12365 `_ - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/3.11.3/_sources/faq/index.rst.txt b/src/doc/3.11.3/_sources/faq/index.rst.txt deleted file mode 100644 index d985e3716..000000000 --- a/src/doc/3.11.3/_sources/faq/index.rst.txt +++ /dev/null @@ -1,298 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/3.11.3/_sources/getting_started/configuring.rst.txt b/src/doc/3.11.3/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index 27fac7872..000000000 --- a/src/doc/3.11.3/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the steps above are enough, you don't really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/3.11.3/_sources/getting_started/drivers.rst.txt b/src/doc/3.11.3/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index baec82378..000000000 --- a/src/doc/3.11.3/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ diff --git a/src/doc/3.11.3/_sources/getting_started/index.rst.txt b/src/doc/3.11.3/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/3.11.3/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/3.11.3/_sources/getting_started/installing.rst.txt b/src/doc/3.11.3/_sources/getting_started/installing.rst.txt deleted file mode 100644 index 1a7b8ad3b..000000000 --- a/src/doc/3.11.3/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/3.11.3/_sources/getting_started/querying.rst.txt b/src/doc/3.11.3/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/3.11.3/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/3.11.3/_sources/index.rst.txt b/src/doc/3.11.3/_sources/index.rst.txt deleted file mode 100644 index 562603d19..000000000 --- a/src/doc/3.11.3/_sources/index.rst.txt +++ /dev/null @@ -1,41 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - - bugs - contactus diff --git a/src/doc/3.11.3/_sources/operating/backups.rst.txt b/src/doc/3.11.3/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/3.11.3/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/3.11.3/_sources/operating/bloom_filters.rst.txt b/src/doc/3.11.3/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/3.11.3/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/3.11.3/_sources/operating/bulk_loading.rst.txt b/src/doc/3.11.3/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/3.11.3/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/3.11.3/_sources/operating/cdc.rst.txt b/src/doc/3.11.3/_sources/operating/cdc.rst.txt deleted file mode 100644 index 192f62a09..000000000 --- a/src/doc/3.11.3/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property ``cdc=true`` (either when :ref:`creating the table -` or :ref:`altering it `), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in ``cassandra.yaml`` on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disable CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -This implementation included a refactor of CommitLogReplayer into `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -The initial implementation of Change Data Capture does not include a parser (see :ref:`reading-commitlogsegments` above) -so, if CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `Design doc `__ -- `JIRA ticket `__ diff --git a/src/doc/3.11.3/_sources/operating/compaction.rst.txt b/src/doc/3.11.3/_sources/operating/compaction.rst.txt deleted file mode 100644 index 0f3900042..000000000 --- a/src/doc/3.11.3/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,442 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table's ``read_repair_chance`` and ``dclocal_read_repair_chance`` to 0. - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/3.11.3/_sources/operating/compression.rst.txt b/src/doc/3.11.3/_sources/operating/compression.rst.txt deleted file mode 100644 index 01da34b6d..000000000 --- a/src/doc/3.11.3/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides three classes (``LZ4Compressor``, - ``SnappyCompressor``, and ``DeflateCompressor`` ). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/3.11.3/_sources/operating/hardware.rst.txt b/src/doc/3.11.3/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/3.11.3/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/3.11.3/_sources/operating/hints.rst.txt b/src/doc/3.11.3/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/3.11.3/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/3.11.3/_sources/operating/index.rst.txt b/src/doc/3.11.3/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/3.11.3/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/3.11.3/_sources/operating/metrics.rst.txt b/src/doc/3.11.3/_sources/operating/metrics.rst.txt deleted file mode 100644 index 04abb48e9..000000000 --- a/src/doc/3.11.3/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,706 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -These metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools scope= type= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessages..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMetrics scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -connectedNativeClients Counter Number of clients connected to this nodes native protocol server -connectedThriftClients Counter Number of clients connected to this nodes thrift protocol server -=========================== ============== =========== - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/3.11.3/_sources/operating/read_repair.rst.txt b/src/doc/3.11.3/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/3.11.3/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/3.11.3/_sources/operating/repair.rst.txt b/src/doc/3.11.3/_sources/operating/repair.rst.txt deleted file mode 100644 index 97d8ce8ba..000000000 --- a/src/doc/3.11.3/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Repair ------- - -.. todo:: todo diff --git a/src/doc/3.11.3/_sources/operating/security.rst.txt b/src/doc/3.11.3/_sources/operating/security.rst.txt deleted file mode 100644 index dfcd9e6c5..000000000 --- a/src/doc/3.11.3/_sources/operating/security.rst.txt +++ /dev/null @@ -1,410 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- - -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/3.11.3/_sources/operating/snitch.rst.txt b/src/doc/3.11.3/_sources/operating/snitch.rst.txt deleted file mode 100644 index faea0b3e1..000000000 --- a/src/doc/3.11.3/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero and read_repair_chance is < 1.0, this will allow - 'pinning' of replicas to hosts in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/3.11.3/_sources/operating/topo_changes.rst.txt b/src/doc/3.11.3/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index c42708e02..000000000 --- a/src/doc/3.11.3/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,124 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase. - -Once the bootstrapping is complete the node will be marked "UP", we rely on the hinted handoff's for making this node -consistent (since we don't accept writes since the start of the bootstrap). - -.. Note:: If the replacement process takes longer than ``max_hint_window_in_ms`` you **MUST** run repair to make the - replaced node consistent again, since it missed ongoing writes during bootstrapping. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/3.11.3/_sources/tools/cqlsh.rst.txt b/src/doc/3.11.3/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/3.11.3/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/3.11.3/_sources/tools/index.rst.txt b/src/doc/3.11.3/_sources/tools/index.rst.txt deleted file mode 100644 index 5a5e4d5ae..000000000 --- a/src/doc/3.11.3/_sources/tools/index.rst.txt +++ /dev/null @@ -1,26 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cqlsh - nodetool diff --git a/src/doc/3.11.3/_sources/tools/nodetool.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool.rst.txt deleted file mode 100644 index e37303110..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _nodetool: - -Nodetool --------- - -.. todo:: Try to autogenerate this from Nodetool’s help. diff --git a/src/doc/3.11.3/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 7051170b3..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate -------- - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index 1b2a0efc6..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap -------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 6d195782f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 98eecb8cd..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats -------- - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/compact.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 87bac5f68..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory -------- - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index c70ff6ba7..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats -------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/decommission.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 390da83a4..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission -------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 0effb66ff..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster -------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/describering.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index ff66d9d43..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering -------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 76c9882e2..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog -------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index d13e61017..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction -------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index bc47423eb..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index fb842cca5..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index 572f133d6..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 0c6e78d6b..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 51e6934b6..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff -------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 96e00c296..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc -------- - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index f9c511d9d..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions -------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/drain.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index fc1c6ee5a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain -------- - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index a4fa50f54..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog -------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 444486ea0..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction -------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index afdf82804..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup -------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index b5731196c..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary -------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index 0f93bae4e..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog -------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index b4d23e17e..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip -------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index 1dd58dffa..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 0c6836e71..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc -------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index 83c4c9062..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 18753213c..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector -------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/flush.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 5713828e4..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush -------- - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 35bd5a3ec..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect -------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 0e585b48f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index 49b819784..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold -------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index 0f474a49a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput -------- - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index fb7b9cdf2..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors -------- - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index cc4ca6d50..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index 4eef62cce..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints -------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index 8c325785b..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput -------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 331b861e4..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels -------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index f5e1c63bf..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow -------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index dceb3882a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas -------- - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index 059e857f2..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds -------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index a153e04a7..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables -------- - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 0f65919d3..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index c723d5d0d..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout -------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 5141fb657..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index 89abe956a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo -------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 0512f6b13..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/help.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index ae491e1ce..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help -------- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/import.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 4c7414f00..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import -------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/info.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 8a5057714..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info -------- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index 42a5a9847..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache -------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index d244d3944..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache -------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index 79e0e0570..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache -------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/join.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index 09c0c5f0b..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join -------- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index c6f084315..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/move.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 952c20170..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move -------- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/netstats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index 16e46ec7f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats -------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index 15c84ccce..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,250 +0,0 @@ -.. _nodetool - -Nodetool -------- - -Usage ---------- - -usage: nodetool [(-pp | --print-port)] - [(-pwf | --password-file )] - [(-p | --port )] [(-u | --username )] - [(-h | --host )] [(-pw | --password )] - [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 61de9ed15..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff -------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/profileload.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index 8572d8bbd..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload -------- - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index f501acf54..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms -------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 6a34cd264..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample -------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index c3be5ba6f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/refresh.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 263194300..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates -------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index a73a33999..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema -------- - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 47708b1bb..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds -------- - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index c8e526ec7..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl -------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 747168902..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers -------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index c02c7009b..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables -------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/removenode.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index 9e6a90cd6..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode -------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/repair.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index 8750bb7ae..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair -------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 56d1fcff4..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin -------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 953272582..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog -------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index fa972b44a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog -------- - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index 433a92ccb..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema -------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index cb3b64caf..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/ring.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 15d6519fb..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring -------- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/scrub.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index 1c65d8018..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub -------- - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 4b8650469..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 3cb6f1746..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity -------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 48ffec63d..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave -------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 861bad69f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold -------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 811cfe183..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput -------- - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index bb1a0f5d3..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors -------- - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index fb926b2da..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 0d921857e..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb -------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index 4caf6a120..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput -------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index c029d11ee..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel -------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index c9fb0bce1..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow -------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 7394458f1..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index e1b6d7eb9..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout -------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index ff89f48c2..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 936489908..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot -------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/status.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index dd517851a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status -------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 5a720340e..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction -------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index a720fc8b8..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup -------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 9902499b4..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary -------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 095dee0cd..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip -------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index 64394f715..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/stop.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 181e41f1d..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop -------- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 301aacd50..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon -------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 4a0332dd3..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms -------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 68159b950..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats -------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index bf7e4d985..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 12728a7c3..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 87ca6ce79..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables -------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/verify.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index 968f1624a..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify -------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/version.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/3.11.3/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 2ce1c552f..000000000 --- a/src/doc/3.11.3/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus -------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/3.11.3/_sources/troubleshooting/index.rst.txt b/src/doc/3.11.3/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 2e5cf106d..000000000 --- a/src/doc/3.11.3/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -.. TODO: todo diff --git a/src/doc/3.11.3/_static/ajax-loader.gif b/src/doc/3.11.3/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/3.11.3/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/3.11.3/_static/basic.css b/src/doc/3.11.3/_static/basic.css deleted file mode 100644 index dc88b5a2d..000000000 --- a/src/doc/3.11.3/_static/basic.css +++ /dev/null @@ -1,632 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/3.11.3/_static/comment-bright.png b/src/doc/3.11.3/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/3.11.3/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/comment-close.png b/src/doc/3.11.3/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/3.11.3/_static/comment-close.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/comment.png b/src/doc/3.11.3/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/3.11.3/_static/comment.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/doctools.js b/src/doc/3.11.3/_static/doctools.js deleted file mode 100644 index 565497723..000000000 --- a/src/doc/3.11.3/_static/doctools.js +++ /dev/null @@ -1,287 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this); - }); - } - } - return this.each(function() { - highlight(this); - }); -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') - return string; - return (typeof translated == 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); \ No newline at end of file diff --git a/src/doc/3.11.3/_static/down-pressed.png b/src/doc/3.11.3/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/3.11.3/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/down.png b/src/doc/3.11.3/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/3.11.3/_static/down.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/extra.css b/src/doc/3.11.3/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/3.11.3/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/3.11.3/_static/file.png b/src/doc/3.11.3/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/3.11.3/_static/file.png and /dev/null differ diff --git a/src/doc/3.11.3/_static/jquery.js b/src/doc/3.11.3/_static/jquery.js deleted file mode 100644 index 25a72c959..000000000 --- a/src/doc/3.11.3/_static/jquery.js +++ /dev/null @@ -1,10219 +0,0 @@ -/*! - * jQuery JavaScript Library v3.1.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2016-12-11T15:18Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. - - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.1.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = jQuery.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isArray: Array.isArray, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - return elem.contentDocument || jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - resolve.call( undefined, value ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.call( undefined, value ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( jQuery.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || jQuery.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && jQuery.nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && jQuery.nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return jQuery.nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -function manipulationTarget( elem, content ) { - if ( jQuery.nodeName( elem, "table" ) && - jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return elem.getElementsByTagName( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - style = elem.style; - - computed = computed || getStyles( elem ); - - // Support: IE <=9 only - // getPropertyValue is only needed for .css('filter') (#12537) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var val, - valueIsBorderBox = true, - styles = getStyles( elem ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - if ( elem.getClientRects().length ) { - val = elem.getBoundingClientRect()[ name ]; - } - - // Some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name, styles ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || - ( jQuery.cssProps[ origName ] = vendorPropName( origName ) || origName ); - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - style[ name ] = value; - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || - ( jQuery.cssProps[ origName ] = vendorPropName( origName ) || origName ); - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( jQuery.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, timerId, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function raf() { - if ( timerId ) { - window.requestAnimationFrame( raf ); - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( jQuery.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - if ( percent < 1 && length ) { - return remaining; - } else { - deferred.resolveWith( elem, [ animation ] ); - return false; - } - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - // attach callbacks from options - return animation.progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off or if document is hidden - if ( jQuery.fx.off || document.hidden ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Checks the timer has not already been removed - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - if ( timer() ) { - jQuery.fx.start(); - } else { - jQuery.timers.pop(); - } -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( !timerId ) { - timerId = window.requestAnimationFrame ? - window.requestAnimationFrame( raf ) : - window.setInterval( jQuery.fx.tick, jQuery.fx.interval ); - } -}; - -jQuery.fx.stop = function() { - if ( window.cancelAnimationFrame ) { - window.cancelAnimationFrame( timerId ); - } else { - window.clearInterval( timerId ); - } - - timerId = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - jQuery.nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( jQuery.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( jQuery.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this:

-
    -
  • Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded -within a specified time window.
  • -
  • Based on read_repair_chance and dclocal_read_repair_chance (part of a table’s schema), read requests may be -randomly sent to all replicas in order to repair potentially inconsistent data.
  • -
-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/architecture/guarantees.html b/src/doc/3.11.3/architecture/guarantees.html deleted file mode 100644 index c15420b25..000000000 --- a/src/doc/3.11.3/architecture/guarantees.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-
-

Todo

-

todo

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/architecture/index.html b/src/doc/3.11.3/architecture/index.html deleted file mode 100644 index b62a1bad3..000000000 --- a/src/doc/3.11.3/architecture/index.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Architecture

-

This section describes the general architecture of Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/architecture/overview.html b/src/doc/3.11.3/architecture/overview.html deleted file mode 100644 index b14cdf4e6..000000000 --- a/src/doc/3.11.3/architecture/overview.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/3.11.3/architecture/storage_engine.html b/src/doc/3.11.3/architecture/storage_engine.html deleted file mode 100644 index b440ede77..000000000 --- a/src/doc/3.11.3/architecture/storage_engine.html +++ /dev/null @@ -1,164 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-
-

Todo

-

todo

-
-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/bugs.html b/src/doc/3.11.3/bugs.html deleted file mode 100644 index be6712b91..000000000 --- a/src/doc/3.11.3/bugs.html +++ /dev/null @@ -1,108 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs and Contributing" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs and Contributing

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the #cassandra IRC channel.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Cassandra Development section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/configuration/cassandra_config_file.html b/src/doc/3.11.3/configuration/cassandra_config_file.html deleted file mode 100644 index 6250894b9..000000000 --- a/src/doc/3.11.3/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1802 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Default Value: KEYSPACE

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using.

-

Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-
stop
-
shut down gossip and Thrift, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

thrift_prepared_statements_cache_size_mb

-

Maximum size of the Thrift prepared statement cache

-

If you do not use Thrift at all, it is safe to leave this value at “auto”.

-

See description of ‘prepared_statements_cache_size_mb’ above for more information.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync

-

This option is commented out by default.

-

commitlog_sync may be either “periodic” or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.)

-

Default Value: batch

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

Default Value: 2

-
-
-

commitlog_sync

-

the other option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

start_rpc

-

Whether to start the thrift rpc server.

-

Default Value: false

-
-
-

rpc_address

-

The address or interface to bind the Thrift RPC service and native transport -server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

rpc_port

-

port for Thrift to listen for clients on

-

Default Value: 9160

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

rpc_server_type

-

Cassandra provides two out-of-the-box options for the RPC Server:

-
-
sync
-
One thread per thrift connection. For a very large number of clients, memory -will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size -per thread, and that will correspond to your use of virtual memory (but physical memory -may be limited depending on use of stack space).
-
hsha
-
Stands for “half synchronous, half asynchronous.” All thrift clients are handled -asynchronously using a small number of threads that does not vary with the amount -of thrift clients (and thus scales well to many clients). The rpc requests are still -synchronous (one thread per active request). If hsha is selected then it is essential -that rpc_max_threads is changed from the default value of unlimited.
-
-

The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory.

-

Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it.

-

Default Value: sync

-
-
-

rpc_min_threads

-

This option is commented out by default.

-

Uncomment rpc_min|max_thread to set request pool size limits.

-

Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all).

-

The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently.

-

Default Value: 16

-
-
-

rpc_max_threads

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

rpc_send_buff_size_in_bytes

-

This option is commented out by default.

-

uncomment to set socket buffer sizes on rpc connections

-
-
-

rpc_recv_buff_size_in_bytes

-

This option is commented out by default.

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

thrift_framed_transport_size_in_mb

-

Frame size for thrift (maximum message length).

-

Default Value: 15

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.)

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations

-

Default Value: 10000

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

request_scheduler

-

request_scheduler – Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below.

-

Default Value: org.apache.cassandra.scheduler.NoScheduler

-
-
-

request_scheduler_options

-

This option is commented out by default.

-

Scheduler Options vary based on the type of scheduler

-
-
NoScheduler
-
Has no options
-
RoundRobin
-
-
throttle_limit
-
The throttle_limit is the number of in-flight -requests per client. Requests beyond -that limit are queued up until -running requests can complete. -The value of 80 here is twice the number of -concurrent_reads + concurrent_writes.
-
default_weight
-
default_weight is optional and allows for -overriding the default which is 1.
-
weights
-
Weights are optional and will default to 1 or the -overridden default_weight. The weight translates into how -many requests are handled during each turn of the -RoundRobin, based on the scheduler id.
-
-
-
-

Default Value (complex option):

-
#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-
-
-
-

request_scheduler_id

-

This option is commented out by default. -request_scheduler_id – An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace.

-

Default Value: keyspace

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack

-

If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client/server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

enable_materialized_views

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_warn_threshold_in_ms

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/configuration/index.html b/src/doc/3.11.3/configuration/index.html deleted file mode 100644 index 172d2b0be..000000000 --- a/src/doc/3.11.3/configuration/index.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/contactus.html b/src/doc/3.11.3/contactus.html deleted file mode 100644 index 0f0af65c2..000000000 --- a/src/doc/3.11.3/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

IRC

-

To chat with developers or users in real-time, join our channels on IRC freenode. The -following channels are available:

-
    -
  • #cassandra - for user questions and general discussions.
  • -
  • #cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
  • #cassandra-builds - results of automated test builds.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/appendices.html b/src/doc/3.11.3/cql/appendices.html deleted file mode 100644 index 5635b1b88..000000000 --- a/src/doc/3.11.3/cql/appendices.html +++ /dev/null @@ -1,565 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER ... DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/changes.html b/src/doc/3.11.3/cql/changes.html deleted file mode 100644 index 0755ee52b..000000000 --- a/src/doc/3.11.3/cql/changes.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

-
    -
  • Adds a new ``duration `` data types (CASSANDRA-11873).
  • -
  • Support for GROUP BY (CASSANDRA-10707).
  • -
  • Adds a DEFAULT UNSET option for INSERT JSON to ignore omitted columns (CASSANDRA-11424).
  • -
  • Allows null as a legal value for TTL on insert and update. It will be treated as equivalent to
  • -
-

inserting a 0 (CASSANDRA-12216).

-
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/ddl.html b/src/doc/3.11.3/cql/ddl.html deleted file mode 100644 index c39b82903..000000000 --- a/src/doc/3.11.3/cql/ddl.html +++ /dev/null @@ -1,765 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-
-
-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
    -
  • 'SimpleStrategy': A simple strategy that defines a replication factor for the whole cluster. The only sub-options -supported is 'replication_factor' to define that replication factor and is mandatory.
  • -
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for -each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is -the associated replication factor.
  • -
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-
-

Column definitions

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-
-

Static columns

-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-
-
-
-

The Primary key

-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-
-

The partition key

-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-
-
-

The clustering columns

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, c, d)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-
-
-
-

Table options

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Compact tables

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as COMPACT -STORAGE cannot, as of Cassandra 3.11.3, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details) and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-
-
-

Reversing the clustering order

-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-
-

Other table options

-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
read_repair_chancesimple0.1The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) for -the purpose of read repairs.
dclocal_read_repair_chancesimple0The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) -belonging to the same data center than the read -coordinator for the purpose of read repairs.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
-
-
Compaction options
-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-
-
-
Compression options
-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-
-
-
Caching options
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-
-
-
Other considerations:
-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table'
-       AND read_repair_chance = 0.2;
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/definitions.html b/src/doc/3.11.3/cql/definitions.html deleted file mode 100644 index ce6a596fa..000000000 --- a/src/doc/3.11.3/cql/definitions.html +++ /dev/null @@ -1,312 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term          ::=  constant | literal | function_call | type_hint | bind_marker
-literal       ::=  collection_literal | udt_literal | tuple_literal
-function_call ::=  identifier '(' [ term (',' term)* ] ')'
-type_hint     ::=  '(' cql_type `)` term
-bind_marker   ::=  '?' | ':' identifier
-
-

A term is thus one of:

-
    -
  • A constant.
  • -
  • A literal for either a collection, a user-defined type or a tuple -(see the linked sections for details).
  • -
  • A function call: see the section on functions for details on which native function exists and how to define your own user-defined ones.
  • -
  • A type hint: see the related section for details.
  • -
  • A bind marker, which denotes a variable to be bound at execution time. See the section on Prepared Statements -for details. A bind marker can be either anonymous (?) or named (:some_name). The latter form provides a more -convenient way to refer to the variable for binding it and should generally be preferred.
  • -
-
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/dml.html b/src/doc/3.11.3/cql/dml.html deleted file mode 100644 index dd751b518..000000000 --- a/src/doc/3.11.3/cql/dml.html +++ /dev/null @@ -1,558 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, ...). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/functions.html b/src/doc/3.11.3/cql/functions.html deleted file mode 100644 index afc596460..000000000 --- a/src/doc/3.11.3/cql/functions.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Time conversion functions

-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/index.html b/src/doc/3.11.3/cql/index.html deleted file mode 100644 index 36d2ee8d2..000000000 --- a/src/doc/3.11.3/cql/index.html +++ /dev/null @@ -1,239 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL).

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/indexes.html b/src/doc/3.11.3/cql/indexes.html deleted file mode 100644 index 8fb629c64..000000000 --- a/src/doc/3.11.3/cql/indexes.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/json.html b/src/doc/3.11.3/cql/json.html deleted file mode 100644 index 0e543a91c..000000000 --- a/src/doc/3.11.3/cql/json.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/mvs.html b/src/doc/3.11.3/cql/mvs.html deleted file mode 100644 index 38f091f12..000000000 --- a/src/doc/3.11.3/cql/mvs.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/security.html b/src/doc/3.11.3/cql/security.html deleted file mode 100644 index 15bbb6d70..000000000 --- a/src/doc/3.11.3/cql/security.html +++ /dev/null @@ -1,704 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob‘s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos‘s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/triggers.html b/src/doc/3.11.3/cql/triggers.html deleted file mode 100644 index 3972c3503..000000000 --- a/src/doc/3.11.3/cql/triggers.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/cql/types.html b/src/doc/3.11.3/cql/types.html deleted file mode 100644 index 696834f0f..000000000 --- a/src/doc/3.11.3/cql/types.html +++ /dev/null @@ -1,697 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 3.11.3, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/data_modeling/index.html b/src/doc/3.11.3/data_modeling/index.html deleted file mode 100644 index 16339a88e..000000000 --- a/src/doc/3.11.3/data_modeling/index.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/code_style.html b/src/doc/3.11.3/development/code_style.html deleted file mode 100644 index a9ce6212c..000000000 --- a/src/doc/3.11.3/development/code_style.html +++ /dev/null @@ -1,208 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/how_to_commit.html b/src/doc/3.11.3/development/how_to_commit.html deleted file mode 100644 index 63d5fb668..000000000 --- a/src/doc/3.11.3/development/how_to_commit.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

—atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/how_to_review.html b/src/doc/3.11.3/development/how_to_review.html deleted file mode 100644 index 5dab12d7e..000000000 --- a/src/doc/3.11.3/development/how_to_review.html +++ /dev/null @@ -1,172 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/ide.html b/src/doc/3.11.3/development/ide.html deleted file mode 100644 index 917170aa5..000000000 --- a/src/doc/3.11.3/development/ide.html +++ /dev/null @@ -1,234 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

Note

-

Bleeding edge development snapshots of Cassandra are available from Jenkins continuous integration.

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/index.html b/src/doc/3.11.3/development/index.html deleted file mode 100644 index 9b9bdedcd..000000000 --- a/src/doc/3.11.3/development/index.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Development" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/development/patches.html b/src/doc/3.11.3/development/patches.html deleted file mode 100644 index 161be73f6..000000000 --- a/src/doc/3.11.3/development/patches.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue tagged with the low hanging fruit label in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our community page.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - -
VersionPolicy
3.xTick-tock (see below)
3.0Bug fixes only
2.2Bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

Tick-Tock Releases

-

New releases created as part of the tick-tock release process will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk.

-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: patch by X; reviewed by Y for CASSANDRA-ZZZZZ
  2. -
  3. When you’re happy with the result, create a patch:
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/development/testing.html b/src/doc/3.11.3/development/testing.html deleted file mode 100644 index 523da97dc..000000000 --- a/src/doc/3.11.3/development/testing.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

TODO: CASSANDRA-12365

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/faq/index.html b/src/doc/3.11.3/faq/index.html deleted file mode 100644 index d77fc4dc8..000000000 --- a/src/doc/3.11.3/faq/index.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see ”... messages dropped ...” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, ... in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE‘s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/genindex.html b/src/doc/3.11.3/genindex.html deleted file mode 100644 index 963568cd1..000000000 --- a/src/doc/3.11.3/genindex.html +++ /dev/null @@ -1,93 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/getting_started/configuring.html b/src/doc/3.11.3/getting_started/configuring.html deleted file mode 100644 index a825a41bc..000000000 --- a/src/doc/3.11.3/getting_started/configuring.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the steps above are enough, you don’t really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/getting_started/drivers.html b/src/doc/3.11.3/getting_started/drivers.html deleted file mode 100644 index 69963a801..000000000 --- a/src/doc/3.11.3/getting_started/drivers.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/getting_started/index.html b/src/doc/3.11.3/getting_started/index.html deleted file mode 100644 index 887972e85..000000000 --- a/src/doc/3.11.3/getting_started/index.html +++ /dev/null @@ -1,146 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/getting_started/installing.html b/src/doc/3.11.3/getting_started/installing.html deleted file mode 100644 index 0dec0b118..000000000 --- a/src/doc/3.11.3/getting_started/installing.html +++ /dev/null @@ -1,196 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/getting_started/querying.html b/src/doc/3.11.3/getting_started/querying.html deleted file mode 100644 index 1e4078484..000000000 --- a/src/doc/3.11.3/getting_started/querying.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/index.html b/src/doc/3.11.3/index.html deleted file mode 100644 index 7ecd598e5..000000000 --- a/src/doc/3.11.3/index.html +++ /dev/null @@ -1,75 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v3.11.3

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/3.11.3/objects.inv b/src/doc/3.11.3/objects.inv deleted file mode 100644 index 69eee5cf8..000000000 Binary files a/src/doc/3.11.3/objects.inv and /dev/null differ diff --git a/src/doc/3.11.3/operating/backups.html b/src/doc/3.11.3/operating/backups.html deleted file mode 100644 index 30ba42fec..000000000 --- a/src/doc/3.11.3/operating/backups.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/operating/bloom_filters.html b/src/doc/3.11.3/operating/bloom_filters.html deleted file mode 100644 index bd729a4d3..000000000 --- a/src/doc/3.11.3/operating/bloom_filters.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/bulk_loading.html b/src/doc/3.11.3/operating/bulk_loading.html deleted file mode 100644 index 911f24bbf..000000000 --- a/src/doc/3.11.3/operating/bulk_loading.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/operating/cdc.html b/src/doc/3.11.3/operating/cdc.html deleted file mode 100644 index 1962ee6be..000000000 --- a/src/doc/3.11.3/operating/cdc.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property cdc=true (either when creating the table or altering it), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in cassandra.yaml on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory.

-
-
-

Configuration

-
-

Enabling or disable CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

This implementation included a refactor of CommitLogReplayer into CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

The initial implementation of Change Data Capture does not include a parser (see Reading CommitLogSegments above) -so, if CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/compaction.html b/src/doc/3.11.3/operating/compaction.html deleted file mode 100644 index a042bc752..000000000 --- a/src/doc/3.11.3/operating/compaction.html +++ /dev/null @@ -1,514 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy).

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table’s read_repair_chance and dclocal_read_repair_chance to 0.

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/compression.html b/src/doc/3.11.3/operating/compression.html deleted file mode 100644 index ccf479664..000000000 --- a/src/doc/3.11.3/operating/compression.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides three classes (LZ4Compressor, -SnappyCompressor, and DeflateCompressor ). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/hardware.html b/src/doc/3.11.3/operating/hardware.html deleted file mode 100644 index cc770f574..000000000 --- a/src/doc/3.11.3/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/hints.html b/src/doc/3.11.3/operating/hints.html deleted file mode 100644 index e8a60ee11..000000000 --- a/src/doc/3.11.3/operating/hints.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/operating/index.html b/src/doc/3.11.3/operating/index.html deleted file mode 100644 index 865d9c7c0..000000000 --- a/src/doc/3.11.3/operating/index.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/metrics.html b/src/doc/3.11.3/operating/metrics.html deleted file mode 100644 index b68acc4ae..000000000 --- a/src/doc/3.11.3/operating/metrics.html +++ /dev/null @@ -1,1601 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all‘ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

These metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools scope=<ThreadPoolName> type=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessages.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMetrics scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsCounterNumber of clients connected to this nodes native protocol server
connectedThriftClientsCounterNumber of clients connected to this nodes thrift protocol server
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/read_repair.html b/src/doc/3.11.3/operating/read_repair.html deleted file mode 100644 index 8f3246a5a..000000000 --- a/src/doc/3.11.3/operating/read_repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/operating/repair.html b/src/doc/3.11.3/operating/repair.html deleted file mode 100644 index 19d241537..000000000 --- a/src/doc/3.11.3/operating/repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.3/operating/security.html b/src/doc/3.11.3/operating/security.html deleted file mode 100644 index 12369ef1c..000000000 --- a/src/doc/3.11.3/operating/security.html +++ /dev/null @@ -1,446 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/snitch.html b/src/doc/3.11.3/operating/snitch.html deleted file mode 100644 index 3188510cd..000000000 --- a/src/doc/3.11.3/operating/snitch.html +++ /dev/null @@ -1,176 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/operating/topo_changes.html b/src/doc/3.11.3/operating/topo_changes.html deleted file mode 100644 index 252ab4a5e..000000000 --- a/src/doc/3.11.3/operating/topo_changes.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase.

-

Once the bootstrapping is complete the node will be marked “UP”, we rely on the hinted handoff’s for making this node -consistent (since we don’t accept writes since the start of the bootstrap).

-
-

Note

-

If the replacement process takes longer than max_hint_window_in_ms you MUST run repair to make the -replaced node consistent again, since it missed ongoing writes during bootstrapping.

-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/search.html b/src/doc/3.11.3/search.html deleted file mode 100644 index 84a0280e9..000000000 --- a/src/doc/3.11.3/search.html +++ /dev/null @@ -1,103 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/searchindex.js b/src/doc/3.11.3/searchindex.js deleted file mode 100644 index 05a7b40ad..000000000 --- a/src/doc/3.11.3/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/security","cql/triggers","cql/types","data_modeling/index","development/code_style","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","tools/cqlsh","tools/index","tools/nodetool","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","troubleshooting/index"],envversion:51,filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/code_style.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","troubleshooting/index.rst"],objects:{},objnames:{},objtypes:{},terms:{"00t89":21,"03t04":21,"0x0000000000000003":14,"0x00000004":13,"100mb":6,"10mb":6,"10s":52,"10x":[6,41],"11e6":52,"128th":4,"12gb":43,"12h30m":21,"15m":46,"160mb":41,"16mb":[30,41],"180kb":6,"19t03":138,"1mo":21,"1st":21,"24h":21,"250m":6,"256mb":6,"256th":6,"29d":21,"2e10":10,"2gb":43,"2nd":[6,11,50],"2xlarg":43,"300s":6,"327e":52,"32gb":43,"32mb":[6,30],"36x":34,"3ff3e5109f22":13,"3gb":42,"3rd":[6,46,50],"40f3":13,"4ae3":13,"4kb":11,"4xlarg":43,"50kb":6,"50mb":[6,41],"512mb":6,"5573e5b09f14":13,"5kb":6,"5mb":41,"64k":6,"64kb":42,"6ms":6,"6tb":43,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":24,"75th":46,"86400s":41,"89h4m48":21,"8gb":43,"8th":[6,40],"90th":46,"95ac6470":52,"95th":46,"98th":46,"99th":46,"9th":46,"\u00eatre":9,"abstract":[23,25],"boolean":[9,12,14,17,19,21,52],"break":[28,41],"byte":[6,9,13,21,46,62,80,115,163],"case":[6,10,11,12,13,14,16,17,18,21,24,25,28,29,30,38,43,49,51,52],"catch":23,"class":[6,11,14,21,23,26,29,41,42,45,49,116,128,148],"default":[4,6,10,11,13,14,17,19,21,26,29,30,31,34,38,40,41,42,46,49,51,52,57,76,80,87,115,116,118,121,131,132,138,152,153,164],"enum":9,"export":[26,46,52],"final":[14,19,23,26,41,43,49,132],"float":[9,10,11,12,14,17,21,38,42],"function":[6,9,10,11,12,15,16,18,19,21,25,32,36,49,50,52],"import":[11,14,21,26,27,29,31,41,43,46,52,116],"int":[9,10,11,13,14,17,18,19,21,29,40,42],"long":[6,13,21,24,25,30,41,46],"new":[0,4,6,10,11,14,16,17,18,19,20,21,23,25,26,28,29,33,36,38,41,43,49,51,107,114,116],"null":[9,10,12,13,14,17,18,21,23,52],"public":[6,14,23,29,30,34,49,50],"return":[6,9,11,13,14,16,17,18,19,21,25,131],"short":[6,21],"static":[6,9,10,18,50],"super":49,"switch":[6,10,19,26,30,45,46,49,50],"throw":[6,14,23,29],"true":[6,11,12,17,19,21,26,30,40,41,49,51,52,113,116],"try":[6,11,23,26,28,30,41,54,131],"var":[6,23,34],"void":29,"while":[6,10,11,12,13,21,24,28,38,41,42,43,49,52],AES:6,AND:[9,11,13,14,18,19,49,52],AWS:43,Added:10,Adding:[6,11,19,21,30,36,45,49],And:[11,14,19,49],Are:25,Ave:21,BUT:23,But:[13,15,19,21,23,28,30,52],CAS:6,CFs:[131,138],CLS:52,DCs:6,DNS:30,Doing:10,EBS:43,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,20,21,28,29,30,31,34,35,41,43,49,50,52],GCs:6,Has:[6,25],IDE:[27,36],IDEs:[26,27],IDs:[116,154],INTO:[6,9,11,13,14,17,21],IPs:[6,50,137,154],Ids:160,JKS:6,KBs:6,LCS:11,NFS:43,NOT:[6,9,10,11,13,14,16,18,19,20,21],Not:[13,19,28,41,42],ONE:[0,6,46,52],One:[6,29,30,41],PFS:6,Pis:43,Such:21,THE:6,TLS:[6,45],That:[11,12,18,21,28,30,41,52],The:[0,4,6,8,9,10,12,14,16,18,19,20,21,23,24,26,28,29,30,31,34,35,36,38,40,42,43,46,49,50,51,52,57,60,65,67,73,77,83,86,87,90,94,98,100,102,107,114,116,118,122,123,129,131,138,141,142,148,153,154,155,162,164,167,168,170],Their:21,Then:[13,29,30,34,41,49],There:[0,6,10,11,12,13,14,21,26,28,29,30,41,46,49],These:[4,6,11,14,26,46,49,52],USE:[9,14,15],USING:[9,13,16,20,21,41],Use:[11,13,19,30,35,45,52,55,60,116,121,131,160,167],Used:46,Uses:[6,17,45,50],Using:[11,13,29,30,49],WILL:6,WITH:[9,11,12,16,18,19,38,40,41,42,49,52],Will:[6,36,80,116,148],With:[6,13,17,30,41,51,56],Yes:30,_cache_max_entri:49,_if_:6,_must_:6,_trace:46,_udt:14,_update_interval_in_m:49,_use:14,_validity_in_m:49,a278b781fe4b2bda:34,abil:[14,30,42],abilityid:16,abl:[6,14,21,26,29,30,41],about:[4,6,19,26,28,29,30,38,41,50,52,59,116,137],abov:[6,8,11,12,13,14,21,26,28,30,31,40,41,46],absenc:12,abstracttyp:21,accept:[0,6,10,11,12,13,17,28,29,38,51,75,116],access:[6,10,21,26,28,43,45,46],accompani:6,accord:[6,30],accordingli:[6,14,30],account:[6,21,29],accru:[41,46],accumul:[6,41,46],accur:[6,30,38,137],accuraci:[38,118,164],acheiv:49,achiev:[41,46],achil:32,ack:6,acoount:46,acquir:[19,46],across:[6,11,19,28,46,49,50,116,120],action:[6,13],activ:[4,6,28,40,46,52,116,118,164],activetask:46,actual:[4,6,13,20,23,25,30,34,41,50,131],acycl:19,add:[0,6,9,10,11,21,24,25,28,31,34,36,41,49],addamsfamili:11,added:[0,6,10,11,14,25,41],adding:[6,13,14,25,43,52],addit:[0,6,9,11,13,19,21,26,28,31,41,43,46,49,52],addition:[11,13,41],address:[6,8,17,21,26,28,31,36,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],adher:10,adjac:41,adjust:[6,38],adv:34,advanc:[6,45,49],advantag:43,advers:30,advic:[28,30],advis:[6,12,21,30],af08:13,afd:21,affect:[6,25,28,30,41,138],afford:6,after:[5,6,10,11,12,13,14,16,17,18,26,28,30,40,41,43,45,46,49,50,52],afterward:[26,29],afunct:14,again:[6,28,41,51,52],against:[6,11,14,28,29,30,43,51,52,131],agent:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],agentlib:26,aggreg:[6,9,10,13,15,18,19,46,52],aid:12,aim:6,akeyspac:14,algorithm:[6,11,51],alia:[10,13,32],alias:[6,10,18],alic:19,align:23,aliv:6,all:[0,6,9,11,12,13,14,17,18,21,23,24,25,26,28,29,36,38,40,41,46,49,51,52,57,58,59,75,87,107,108,113,116,118,120,129,132,138,152,153,155,164,166,167,168],allmemtableslivedatas:46,allmemtablesoffheaps:46,allmemtablesonheaps:46,alloc:[6,30,40,43,46],allocate_tokens_for_keyspac:51,allow:[0,4,6,9,10,11,12,14,16,17,18,21,31,38,40,41,42,43,50],allowallauthent:[6,49],allowallauthor:[6,49],allowallinternodeauthent:6,almost:[6,14,21,41],alon:[6,23],along:[6,13,113,116],alongsid:[35,52],alphabet:23,alphanumer:[11,19],alreadi:[6,11,14,16,18,21,28,41,49,167],also:[0,4,6,10,11,12,13,14,17,18,19,21,26,28,29,30,31,41,43,46,49,51,52,87,168],alter:[9,10,15,17,30,38,40,41,42,49],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:21,alter_type_stat:[12,21],alter_user_stat:12,altern:[6,10,11,12,13,17,21,26,28,31,43,49],although:[6,28],alwai:[0,6,9,10,11,13,14,18,21,23,28,29,30,41,43],amend:24,amongst:11,amount:[6,11,13,21,26,28,29,30,41,42,43,46,51,52,131],amplif:[41,43],anaggreg:14,analogu:13,analyt:38,analyz:29,ani:[0,6,10,11,12,13,14,17,18,19,20,21,24,25,26,28,29,31,34,36,40,41,43,46,49,51,52,55,107,116,121,138,152],annot:23,anonym:[12,21],anoth:[6,11,14,19,21,29,41,49,52],anotherarg:14,ant:[26,28,29],anti:[6,21],anticip:11,anticompact:41,antientropystag:46,antipattern:43,anymor:[24,41],anyon:23,anyth:41,anywai:6,anywher:13,apach:[2,5,6,7,14,20,23,24,25,26,28,29,30,33,34,41,42,46,49,53],api:[6,8,11,15,17,35,50],appear:[12,14,41,52],append:[21,24,43,46,52],appendic:[15,36],appendix:[12,15],appl:21,appli:[6,9,10,11,12,13,19,21,24,28,29,30,46,52],applic:[6,11,19,23,25,26,49],appreci:28,approach:[4,41,51],appropri:[6,11,19,21,25,28,49,50,51],approxim:[41,46],apt:34,arbitrari:[11,12,21],architectur:[30,36],archiv:[6,40,80],archive_command:80,archive_retri:80,aren:13,arg:[14,116],argnam:14,argnum:14,argument:[6,11,13,14,16,17,30,31,42,52,55,56,57,58,60,65,67,73,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],arguments_declar:14,arguments_signatur:14,around:[6,19,41,43,50],arrai:[6,30],arriv:[6,28,30],artifact:26,artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,21],asf:26,ask:[5,28,29,36,49],aspect:11,assassin:116,assertionerror:23,assertrow:29,assign:[6,13,30],associ:[6,11],assum:[6,11,14,26,49,50],assumpt:49,astyanax:32,async:[6,49],asynchron:[6,16,30,43],asynchroni:46,atabl:14,atom:[11,13,20,24],atomiclong:46,attach:28,attemp:46,attempt:[0,6,11,16,18,19,21,30,41,46,49,52,132],attent:[23,28],attribut:41,audit:[66,76,116],auditlog:76,auth:6,authent:[10,45,52],authenticatedus:6,author:[9,19,21,45],authorizationproxi:49,auto:[6,30,155],auto_bootstrap:51,autocompact:[41,67,77,116,155],autogener:54,autom:[8,23],automat:[6,13,14,16,26,29,30,34,41,49,51],avail:[0,6,8,11,14,19,26,28,29,34,40,49,50,52,57,87,129,138,148,167],availabil:6,averag:[6,14,41,46],average_live_cells_per_slice_last_five_minut:163,average_s:11,average_tombstones_per_slice_last_five_minut:163,averagefin:14,averagest:14,avg_bucket_s:41,avoid:[6,11,12,23,25,28,38,41,43,49,50,52,168],awai:[26,51,52],awar:[0,11,28,38,42,137],azur:43,b124:13,b70de1d0:13,back:[6,41,46,51],backend:6,background:[30,34,41,49],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,36,41,45,51,52,68,78,116,156],backward:[6,10,11,15,19,21],bad:[6,14,30,50],balanc:51,banana:21,band:21,bar:[12,23],bardet:21,bare:6,base:[0,4,6,10,11,13,14,18,19,21,24,28,29,30,41,43,46,49,51],bash:30,basi:[6,30,42],basic:[11,41,43],batch:[6,9,15,29,36,52],batch_remov:46,batch_stat:12,batch_stor:46,batchlog:[13,46,89,116,133,139],be34:13,beatl:21,beca:52,becaus:[6,13,14,34,41,42,49],becom:[4,6,11,14,19,28,41,46,49,51],been:[0,4,6,10,13,14,15,19,21,25,28,41,43,49,138],befor:[0,6,10,11,13,14,16,20,21,26,27,29,32,41,49,50,52,80,153],begin:[9,12,13,29,49,52],beginn:28,begintoken:52,behavior:[0,6,10,14,17,21,23,25,38,41,51,132],behind:[6,23,29,30,41],being:[6,11,13,17,21,25,29,30,38,41,46,51],belong:[11,13,14,46,57,116],below:[6,11,12,13,17,19,21,28,34,41,52,63],benchmark:43,benefici:41,benefit:[6,38,41,43,45],besid:6,best:[6,29,41,49,50],best_effort:6,better:[6,23,28,41,43],between:[0,6,9,10,13,15,28,30,38,41,46,49,51,131,152],beyond:[6,52,168],big:[41,60],bigger:[11,41],biggest:14,bigint:[9,14,17,21],bigintasblob:14,bin:[26,34,35,52],binari:[14,33,69,79,116,157],binauditlogg:76,bind:[6,10,12,14,30],bind_mark:[12,13,18,21],biolog:11,birth:13,birth_year:13,bit:[6,14,17,21,28,30,42,43],bite:30,bitrot:11,bitstr:9,black:6,blank:[6,23,30],bleed:26,blindli:30,blob:[9,10,12,17,21,36,42],blobasbigint:14,blobastyp:14,block:[4,6,11,24,31,41,43,46,49,80],blockedonalloc:6,blog:[6,11,13],blog_til:13,blog_titl:13,bloom:[4,11,36,43,45,46],bloom_filter_false_posit:163,bloom_filter_false_ratio:163,bloom_filter_fp_ch:[11,38],bloom_filter_off_heap_memory_us:163,bloom_filter_space_us:163,bloomfilterdiskspaceus:46,bloomfilterfalseposit:46,bloomfilterfalseratio:46,bloomfilteroffheapmemoryus:46,blunt:49,bnf:12,bob:[13,19],bodi:[11,12],boilerpl:27,boolstyl:52,boost:6,boot:30,bootstrap:[0,6,36,42,45,46,49,116,121,148],born:13,both:[0,6,11,13,14,18,21,24,25,28,30,31,38,41,42,43,46,49,51,52],bottleneck:6,bottom:30,bound:[6,11,12,21,43,49],box:[6,49,50],brace:23,bracket:12,braket:12,branch:[24,25,26,29],branchnam:28,breakpoint:26,breed:29,bring:6,brk:30,broadcast:6,broadcast_address:50,broken:[41,46],browser:52,bucket:41,bucket_high:41,bucket_low:41,buffer:[4,6,46],bufferpool:45,bug:[10,24,29,30,36],build:[8,27,29,36,46,116,170],builder:[93,116,145],built:[26,46],bulk:[36,45],bump:10,bunch:23,burn:40,button:30,bytebuff:14,byteorderedpartition:[6,14],bytescompact:46,bytesflush:46,bytestyp:9,c73de1d3:13,cach:[6,30,31,43,45,50,107,109,110,111,116,140,141],cachecleanupexecutor:46,cachenam:46,calcul:[6,38,40,41,46,50],call:[9,11,12,13,14,19,23,31,36,41,43,46,51,116,148],callback:46,caller:23,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,23,24,25,26,28,29,31,34,35,36,38,40,41,42,43,46,49,50,51,52,55,57,58,60,65,67,73,77,80,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],cancel:[10,132],candid:41,cannot:[6,9,11,13,14,17,18,19,21,41,49,55,116],cap:[12,91,95,101,116,143,147,150],capabl:[6,30,52],capac:[6,40,46,50,116,118,140,164],captur:[6,36,45],care:[6,41,131],carlo:19,carri:[23,131],cascommit:46,cascontent:[102,151],casprepar:46,caspropos:46,casread:46,cassablanca:21,cassafort:32,cassandra:[0,2,4,5,8,10,11,13,14,19,20,21,23,24,28,32,33,35,38,41,42,43,46,50,51,52,76,80,116,127,131,134,138,161,169],cassandra_hom:[6,40,49],cassandraauthor:[6,49],cassandradaemon:[26,34],cassandralogin:49,cassandrarolemanag:[6,49],casser:32,cassi:32,cast:[10,13,18],caswrit:46,cat:21,categor:46,categori:[11,12,13,14,76],caught:[25,46],caus:[6,18,30,41,49],caution:6,caveat:49,cbc:6,ccm:[25,29],ccmlib:29,cdc:[6,11],cdc_enabl:40,cdc_free_space_check_interval_m:40,cdc_free_space_in_mb:40,cdc_raw:[6,40],cdc_raw_directori:40,cdccompactor:6,cell:[6,21,46,87,168],center:[6,11,21,30,50,51,73,83,116,131],central:[26,49,52],centric:19,certain:[6,9,11,19,29,41,49],certainli:14,certif:[49,116,127],cfname:[100,118,164],cfs:23,chain:19,chanc:38,chang:[6,11,12,15,19,21,24,26,27,33,34,36,42,45,46,49,148],channel:[5,8,28],charact:[11,12,13,17,19,21,23,52],chat:8,cheap:6,check:[0,6,11,13,23,25,26,28,29,30,38,40,41,46,49,107,116,131,168],checklist:[27,28,36],checkout:[26,28],checksum:[11,42,116,168],cherri:24,chess:13,child:52,chmod:49,choic:[6,11,36,41,45],choos:[0,6,11,27,32,43,46],chosen:[0,6,11,14],chown:49,christoph:21,chrome:52,chunk:[4,6,30,42,52],chunk_length_in_kb:[11,42],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:46,chunksiz:52,churn:6,cipher:[6,49],cipher_suit:6,circular:19,citi:21,clash:12,class_nam:6,classpath:[6,14,21,46],claus:[10,11,14,16,17,18,19,23],clean:[6,23,46,57,116,134],cleanli:28,cleanup:[30,41,45,46,87,116,160],clear:[25,28,59,107],clearsnapshot:116,click:[13,26,28,29],client:[0,6,8,10,11,13,17,19,21,25,30,31,33,36,43,45,52,59,116],client_encryption_opt:49,clientrequest:46,clientstat:116,clock:6,clockr:6,clojur:33,clone:[26,30,52],close:[6,15,49],closer:38,cloud:45,cluster:[0,4,6,9,10,13,14,20,21,25,29,31,35,36,41,43,46,49,50,51,52,64,85,89,104,116,139,154],cluster_nam:[31,35],clustering_column:11,clustering_ord:11,cmsparallelremarken:26,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,20,24,25,26,27,29,36,42,46],codestyl:23,col:14,cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,43,45,46,87],collection_liter:12,collection_typ:21,color:[21,52],column1:9,column:[6,9,10,12,13,14,15,16,17,18,21,42,46,52,100,118,138,153,164],column_definit:11,column_nam:[11,13,16],columnfamili:[6,9,23,41],colupdatetimedeltahistogram:46,com:[6,11,14,23,24,49],combin:[4,6,10,40,41],come:[6,9,49],comingl:41,comma:[6,11,12,13,31,49,51,52,76,118,121,164],command:[0,6,24,29,30,31,34,35,42,45,53,55,56,57,58,60,65,67,73,77,80,83,86,87,90,94,98,100,102,106,107,114,116,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],comment:[6,11,15,18,23,25,49],commit:[6,8,11,27,28,36,46],commitlog:[2,6,30,31,43,45],commitlog_archiv:6,commitlog_directori:[31,43],commitlog_segment_size_in_mb:30,commitlogread:40,commitlogreadhandl:40,commitlogreplay:40,commitlogseg:[6,45,46],committ:[24,28,29],common:[0,14,15,23,25,28,45,52],common_nam:11,commonli:116,commun:[6,8,25,26,28,30,31,35,49],commut:30,compact:[4,6,15,30,36,38,42,43,45,57,61,62,87,90,91,116,129,138,142,143,148,155,160,167],compacted_partition_maximum_byt:163,compacted_partition_mean_byt:163,compacted_partition_minimum_byt:163,compaction_:160,compaction_window_s:41,compaction_window_unit:41,compactionbyteswritten:46,compactionexecutor:46,compactionhistori:[41,116],compactionid:160,compactionparamet:41,compactionparametersjson:41,compactionstat:[41,116],compactionstrategi:45,compactor:[92,116,144],compar:[6,28,41,46],compat:[6,9,10,11,13,15,19,25,28],compatilibi:21,compet:6,compil:[23,26,52],complain:26,complet:[6,13,14,28,30,41,46,49,51,52,116,130,132],completedtask:46,complex:[6,9,14,21,28],complexarg:14,compliant:[6,14,49],complic:28,compon:[4,11,25,38,46,49,116,148],compos:[11,13,21],composit:11,compound:17,comprehens:25,compress:[4,6,29,36,41,43,45,46],compression_metadata_off_heap_memory_us:163,compressioninfo:4,compressionmetadataoffheapmemoryus:46,compressionratio:46,compressor:[6,11],compris:[4,11,42],compromis:49,comput:[6,14],concaten:14,concept:[15,19,41],concern:[13,14],concret:[12,21],concurr:[6,43,92,93,116,131,144,145],concurrentmarksweep:43,condens:13,condit:[6,10,12,13,19,21,23,24,41,46,49,52],conditionnotmet:46,conf:[6,30,31,34,46,49,52],config:[46,49,52],configur:[0,4,11,21,26,29,30,33,34,36,45,46,49,50,52,63,80,116,134,148],confirm:[6,8,25,26],conflict:[13,21,24],conform:[18,25],confus:[10,12,30],conjunct:52,connect:[6,11,19,21,26,35,36,46,49,50,52,59,63,115,116],connectednativecli:46,connectedthriftcli:46,connector:[30,32,49],consecut:31,consequ:[11,13,21,43],conserv:6,consid:[0,6,13,21,28,31,38,41,43],consider:[13,21],consist:[2,11,12,13,14,25,49,51],consol:[26,31,52],constant:[10,11,15,17,21],constantli:[6,41],construct:12,constructor:[6,23],consum:[29,38,40,46],consumpt:40,contact:[6,11,30,36],contain:[0,6,8,9,10,11,12,13,15,16,18,19,21,26,28,40,41,42,49,52,153],contend:[6,46],content:[4,6,11,12,13,36,41,52,80],contentionhistogram:46,context:[6,9,19,21,28,30,49],contigu:13,continu:[0,6,23,26,29,41,49,50],contrarili:12,contrast:[29,49],contribut:[24,27,29,36],contributor:[24,28,29,34],control:[0,6,10,11,13,15,25,31,34,41,49,50,52],conveni:[9,12,14,17,29,51],convent:[6,11,14,15,24,27,28,29,49,50],convers:10,convert:[10,13,14,41],coordin:[0,6,11,13,14,21,30,46,132],coordinatorreadlat:46,coordinatorscanlat:46,cop:23,copi:[0,30,41],core:[6,14,43],correct:[10,25,34,41,42,116,129],correctli:[6,11,30,41,49],correl:[6,10,50],correspond:[6,9,11,13,14,18,21,28,29,30,40,50],corrupt:[6,11,41,42,43,138,168],cost:[6,13,21,42],could:[6,12,21,25,28,41,52],couldn:34,count:[6,9,13,21,30,41,46,51],counter:[6,9,14,43,46,109,116,138,140,141],counter_mut:46,countercach:46,countermutationstag:46,counterwrit:[102,151],countri:[13,21],country_cod:21,coupl:[0,6],cours:[6,13],cover:[25,28,29,30,33,41,46],cpu:[6,11,40,42,45],cqerl:32,cql3:[11,14,25,29,52],cql:[6,10,11,12,13,14,16,17,19,21,29,32,35,36,41,45,49,53,148],cql_type:[11,12,13,14,19,21],cqlc:32,cqldefinit:14,cqlsh:[30,33,34,36,49,53],cqltester:[25,29],crash:43,crc32:4,crc:4,crc_check_chanc:[11,42],creat:[6,9,10,12,13,15,17,26,27,29,30,40,41,42,49,51,52,60],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,21],create_user_stat:12,createkeystor:6,createt:29,creation:[6,10,11,13,14,18,21],creator:19,credenti:[6,49],critic:[25,28,49],cross:[6,30,50],crossnodedroppedlat:46,cryptographi:6,csv:52,cuddli:21,curl:[24,34],current:[6,9,11,13,14,19,21,26,28,34,41,46,51,52,82,99,103,105,107,116,130,159,167],currentlyblockedtask:46,custom:[6,9,10,11,14,15,16,19,28,50,52],custom_option1:19,custom_option2:19,custom_typ:[14,21],cute:21,cvh:25,cycl:[6,40,80],daemon:[26,116,161],dai:[17,21,41],daili:80,danger:6,dash:12,data:[0,4,6,10,12,14,15,16,18,25,31,34,36,38,42,43,45,46,49,50,52,55,60,73,80,83,87,107,116,121,131,153,168],data_file_directori:[31,43],data_read:19,data_writ:19,databas:[12,13,15,20,41,43,49],datacent:[0,6,50,73,83,95,116,131,147],datacenter1:6,dataset:6,datastax:[6,11,14,32],datatyp:14,date:[9,10,14,15,17,138],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,41],daylight:21,db_user:49,dba:49,dc1:[6,11,49],dc2:[6,11,49],dcassandra:[41,46,49,51],dclocal_read_repair_ch:[0,11,41],dcom:49,dcpar:131,ddl:[11,52],ddl_statement:12,dead:[6,45,55,116],dead_node_ip:51,deb:34,debian:[30,33],debug:[31,52],decid:[9,41,50],decim:[9,14,17,21,52],decimalsep:52,declar:[11,12,14,21],decod:[17,21],decommiss:[6,51,116],decompress:42,decreas:[6,41],decrement:[13,21],decrypt:6,dedic:6,deem:6,deeper:28,default_time_to_l:[10,11,13],default_weight:6,defend:30,defin:[0,6,9,10,11,12,13,15,16,17,18,19,20,26,41,46,49,50,51,52,60,116],definit:[9,13,14,15,18,21,36,38],deflat:6,deflatecompressor:[11,42],degrad:6,delet:[6,9,10,11,12,15,17,19,21,28,36,52,80,87,116,166],delete_stat:[12,13],delimit:6,deliv:[0,6],deliveri:[6,116,117,136,146],delta:46,demand:49,deni:30,denorm:21,denot:12,dens:38,depend:[4,6,11,12,13,14,21,25,26,28,29,41],deploi:[30,31],deploy:[6,49,50],deprec:[6,10,11,14,15,30,41],desc:[9,11,13,52],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,19,21,25,26,28,38,49,53,116],describeclust:116,descript:[6,10,11,14,21,46,52],descriptor:46,design:[14,40,41,43],desir:[16,21,30],destin:[40,52],detail:[5,6,10,11,12,13,14,21,30,45,49,52],detect:[2,6,11,24,30,49],detector:[85,116],determin:[0,6,13,19,38,42,50,131],determinist:30,dev:[6,8,11,30],develop:[5,8,26,28,29,36,43],dfb660d92ad8:52,dfp:168,dht:6,dictat:[6,49],did:[25,46],die:6,dies:36,diff:[15,23],differ:[0,6,11,12,13,14,15,19,21,24,26,28,29,30,31,34,41,42,43,46,51],difficult:[6,29],difficulti:21,digest:4,digit:[17,21,30],diminish:21,direct:[6,11,17,19,28,46],directli:[13,18,19,26,41],director:13,directori:[6,20,26,29,30,33,34,35,40,43,45,52,107,116,134],dirti:6,disabl:[6,11,14,41,42,49,50,52,66,67,68,69,70,71,72,73,74,83,116,139,141,143,147,150,151,152],disable_stcs_in_l0:41,disableauditlog:116,disableautocompact:[41,116],disablebackup:116,disablebinari:116,disablefullquerylog:116,disablegossip:116,disablehandoff:116,disablehintsfordc:116,disableoldprotocolvers:116,disablesnapshot:138,disallow:6,disambigu:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],discard:[6,40],disconnect:41,discourag:[11,21,28],discov:30,discuss:[8,21,28],disk:[4,6,11,31,36,38,40,41,42,45,46,80,113,116,129,168],displai:[11,52,56,62,106,108,115,116,163],disrupt:[30,49],dist:34,distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,29,30,41,46,49,51],divid:12,djava:[26,30,49],dml:20,dml_statement:12,dmx4jaddress:46,dmx4jport:46,dns:30,dobar:23,doc:[6,25,40,49],document:[5,12,14,15,17,25,28,35,49,52],doe:[6,11,13,14,16,17,18,19,21,24,25,28,36,38,40,41,42,49,50,51],doesn:[6,14,21,23,29,30],dofoo:23,doing:[6,13,29,30,41,51],dollar:[10,12],domain:[49,137,154],don:[5,13,23,24,25,26,28,30,31,41,51,107,131],done:[6,11,13,21,28,29,31,35,41],doubl:[6,9,10,11,12,14,17,21,26,46,50],down:[6,19,41,46,50,51,71,116,131],download:[6,26,34,46],downward:19,drain:116,drive:[6,41,43],driver:[6,12,14,29,33,36,52],drop:[6,10,15,36,41,46,80],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,21],drop_user_stat:12,droppabl:[6,41],dropped_mut:163,droppedmessag:45,droppedmetr:46,droppedmut:46,dropwizard:46,dt_socket:26,dtest:[25,27],due:[11,13,21,30,34,46,51],dump:52,duplic:25,durable_writ:11,durat:[6,10,15,19,41,46,118,164],dure:[6,11,14,20,28,29,30,41,42,46,49,51,52,138],dying:30,dynam:[6,45,49],dynamic_snitch:50,dynamic_snitch_badness_threshold:50,dynamic_snitch_reset_interval_in_m:50,dynamic_snitch_update_interval_in_m:50,dynamo:[2,36],each:[0,4,6,10,11,12,13,14,17,18,19,21,24,28,35,36,41,42,43,46,49,50,51,52,116,141,155,168],each_quorum:0,earli:[6,12,28],earlier:15,easi:[9,28],easier:[0,28],easiest:30,ec2:[6,43,50],ec2multiregionsnitch:[6,50],ec2snitch:[6,50],ecc:43,echo:34,eclips:[23,27,29],ecosystem:25,edg:[25,26],edit:[26,31,34,46,49],effect:[6,11,21,28,30,38,42,49,71,116],effectiv:46,effici:[6,11,41,50,51],effort:6,either:[6,8,12,13,14,16,21,23,24,26,28,30,34,35,40,41,46,49,166],elaps:[41,46],element:[21,52],elig:6,els:[11,13,23,28],email:[8,16,21,36],embed:29,emploi:38,empti:[6,9,10,11,12,52],emptytyp:9,enabl:[6,11,14,17,19,29,30,41,42,50,51,52,76,77,78,80,83,84,116,152],enable_user_defined_funct:14,enableauditlog:116,enableautocompact:[41,116],enablebackup:116,enablebinari:116,enablefullquerylog:116,enablegossip:116,enablehandoff:116,enablehintsfordc:116,enableoldprotocolvers:116,encapsul:[23,46],enclos:[9,10,12,14,19],enclosur:12,encod:[15,21,25,52],encount:[5,13,34,46],encourag:[6,11],encrypt:[6,45],encryption_opt:6,end:[21,28,30,41,49,52,60,94,116,131],end_token:[60,131],end_token_1:121,end_token_2:121,end_token_n:121,endpoint:[46,50,55,94,116,131,166],endpoint_snitch:50,endtoken:52,enforc:[17,49],engin:[2,11,28,36,46],enhanc:43,enough:[0,6,21,30,31,41,50,52],enqueu:6,ensur:[11,13,18,20,30,42,49],entail:30,enter:[30,52],entir:[0,4,6,14,21,30,38,41,49,51,52],entri:[4,6,9,13,16,28,36,46,49,52],entropi:6,entry_titl:13,enumer:19,env:[30,31,46,49],environ:[0,5,6,26,30,33,43],ephemer:43,epoch:21,equal:[0,6,10,11,13,21,23,41],equival:[10,11,12,13,14,19,24,41],eras:11,erlang:33,erlcass:32,err:52,errfil:52,error:[6,11,12,14,16,18,19,21,23,25,26,34,36,52,132],escap:[12,17],especi:[28,30,41,52],essenti:[0,6,14,30,52],establish:[6,19,50],estim:46,estimatedcolumncounthistogram:46,estimatedpartitioncount:46,estimatedpartitionsizehistogram:46,etc:[6,18,21,23,25,30,31,34,41,46,49],eth0:6,eth1:6,ev1:21,even:[0,6,10,12,13,14,17,21,28,36,41,49,52,63,138,167],evenli:6,event:[13,21,41,52,131],event_typ:13,eventu:[4,13],ever:[23,29,30,43],everi:[4,6,11,13,14,18,19,20,21,35,38,41,43,52],everyth:[12,23,26,30],evict:46,evil:[6,14],exact:[11,12,14,42],exactli:[11,14,18,49],exampl:[0,6,11,13,14,17,19,21,29,34,35,41,49,50,52],exaust:6,excalibur:11,exce:[4,6,17,23],exceed:[6,43],excel:11,excelsior:11,except:[0,13,14,17,25,27,28,29,30,46],excess:38,exchang:[6,30],exclud:[46,76,99,116],excluded_categori:76,excluded_keyspac:76,excluded_us:76,exclus:[21,29],execut:[6,9,11,12,13,14,19,26,29,35,41,46,49,52],exhaust:6,exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,29,36,38,41,42,50,51],expect:[6,10,12,21,23,25,28,41,49],expens:[6,38,50],experi:[6,41],experienc:6,experiment:[6,131],expir:[6,10,11,13,21,45,49,138],expiri:41,explain:[23,25,28,34],explicit:10,explicitli:[6,10,13,17,21,23,41,50],explor:26,expon:10,exponenti:46,expos:[6,9,49],express:[0,6,10,12,50],expung:30,extend:[21,28,29,107,168],extens:[6,11,49],extern:[46,51],extra:[0,6,11,41],extract:[23,34],extrem:[6,13],fact:[21,29,30],factor:[0,6,11,36,42,49],fail:[6,13,14,21,36,41,52,116,132],failur:[2,6,28,36,41,43,46,50,85,116,168],failuredetector:116,fairli:[6,40,49],fake:14,fall:6,fallback:[6,50],fals:[6,11,12,17,19,21,38,40,41,42,46,49,51,52,138],famili:[6,43,100,118,153,164],fanout_s:41,fast:[6,38,41],faster:[6,28,42,43,116,141],fastest:[6,24,50],fatal:6,fault:30,fav:[16,21],fax:21,fct:14,fct_using_udt:14,fear:30,feasibl:21,featur:[25,26,28,49],fed:6,feedback:28,feel:24,fetch:[6,11,52],few:[41,43],fewer:[6,28],fffffffff:[17,21],field:[10,13,14,17,21,23,38],field_definit:21,field_nam:13,fifteen:46,fifteenminutecachehitr:46,figur:41,file:[4,7,11,26,27,28,29,30,31,33,36,38,41,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],filenam:[11,52,100,116],filesystem:49,fill:[40,41],fillion:13,filter:[4,9,11,18,36,43,45,46,131],finalfunc:[9,14],find:[6,26,29,34,38,41,51,94,98],fine:[6,28,49],finer:6,finish:[26,28,116,133],fip:[6,49],fire:20,firefox:52,firewal:[6,30,31,50],first:[5,6,11,13,14,21,28,30,33,41,43,49,52,131,138],firstnam:13,fit:[6,41,46],five:46,fiveminutecachehitr:46,fix:[6,10,12,24,30,41,43],flag:[6,13,24,25,28,40,46,51],flexibl:49,flight:[6,49],flip:11,floor:6,flow:[6,19,25],fluent:32,flush:[4,6,40,41,43,46,75,116,153],fname:14,focu:28,folder:[26,160],follow:[0,5,6,8,9,10,11,12,13,14,17,18,19,21,23,24,25,26,28,29,30,31,34,36,40,41,42,46,49,50,52,57,60,67,77,86,87,122,131,138,151,155,167,168],font:12,foo:[11,12,40],footprint:[116,118],forc:[4,6,11,13,52,60,63,116,130,131,132],forcefulli:[55,116],foreground:[31,34],forev:41,forget:5,fork:28,form:[6,10,11,12,14,19,62,115,163],formal:12,format:[6,10,17,21,24,25,27,28,46,52,61,80,100,121,163,165],former:[6,46],forward:[6,11],found:[5,12,14,15,28,29,31,35,49,52,160,168],four:13,fqcn:29,fraction:6,frame:6,framework:[25,29],franc:[13,21],free:[6,11,21,24,26,46],freed:4,freenod:8,frequenc:[6,40],frequent:[6,29,36,41,49],fresh:51,friendli:[6,21,29],from:[0,4,6,9,11,12,13,14,15,17,18,19,21,24,27,28,29,33,35,36,38,40,41,42,43,46,49,50,51,54,55,57,58,60,65,67,73,76,77,83,86,87,90,94,98,100,102,106,107,114,116,118,121,122,123,125,126,129,130,131,132,134,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],fromjson:15,froom:21,frozen:[9,10,11,13,14,21],fruit:[21,28],fsync:[6,46],full:[6,9,11,13,16,19,28,34,35,41,42,49,52,70,80,116,122,131,134],full_nam:163,fulli:[6,11,12,14,45,49],function_cal:12,function_nam:[13,14,19],fundament:17,further:[5,6,11,18,21,41,45,49],furthermor:[10,13,49],futur:[6,9,10,11,21,28,82,116,159],g1gc:43,game:[14,21],garbag:[11,43,45,46,87],garbagecollect:116,gather:41,gaug:46,gaurante:0,gc_grace_second:11,gc_type:46,gce:[30,43],gcg:6,gcstat:116,gener:[0,2,4,6,8,11,12,13,14,17,21,25,26,27,28,30,43,49,52,102,138,151],genuin:23,get:[6,8,24,26,28,30,34,36,38,41,92,93,96,99,116],getbatchlogreplaythrottl:116,getcompactionthreshold:116,getcompactionthroughput:116,getconcurrentcompactor:116,getconcurrentviewbuild:116,getendpoint:116,getint:14,getinterdcstreamthroughput:116,getlocalhost:[6,30],getlogginglevel:116,getlong:14,getmaxhintwindow:116,getpartition:23,getreplica:116,getse:116,getsstabl:116,getstr:14,getstreamthroughput:116,gettempsstablepath:23,getter:[19,23],gettimeout:116,gettraceprob:116,gib:[62,115,163],gist:23,git:[5,24,26,28],github:[23,24,28,29],give:[18,19,21,28,29,36,52],given:[0,6,11,12,13,14,16,21,28,38,41,49,51,52,58,60,65,67,77,90,98,102,116,122,142,148,152,155,162],global:[6,52,116,140],gmt:21,goal:[6,41],gocassa:32,gocql:32,going:[6,28,41],gone:6,good:[6,23,28,29,30,52],googl:[23,52],gori:30,gossip:[2,6,30,46,50,71,81,104,116,158],gossipinfo:116,gossipingpropertyfilesnitch:[6,50],gossipstag:46,got:6,gp2:43,gpg:34,grace:45,grai:21,grain:49,grammar:[11,12],grant:[6,9,49],grant_permission_stat:12,grant_role_stat:12,granular:[6,87],graph:19,gravesit:11,great:[28,41],greater:[0,6,21,30,50,144,145],greatli:6,green:21,group:[6,10,11,19,41,46,49,50],group_by_claus:13,grow:21,guarante:[0,2,11,13,14,21,28,36,38,41,51,52],guid:[6,26],guidelin:[10,25,43],had:[9,10,41],half:[6,24,30],hand:[6,13,43],handl:[6,14,25,27,28,30,40,43,46,49,80],handoff:[6,46,51,72,105,116,146],handoffwindow:116,hang:28,happen:[6,13,23,24,28,36,41,46,50],happi:28,happili:43,hard:[6,14,41,43],harder:6,hardwar:[6,36,45],has:[0,4,6,10,11,12,13,14,18,19,21,23,28,30,41,43,46,49,50,52],hash:[4,6,41],hashcod:23,haskel:33,hasn:80,have:[0,5,6,9,10,11,12,13,14,15,18,19,21,23,24,25,26,28,29,30,31,34,38,41,42,43,46,49,50,80,138],haven:28,hayt:32,hdd:[6,43],head:28,header:[26,52],headroom:6,heap:[4,6,26,31,36,38,42,43,46],heap_buff:6,heavi:6,heavili:43,held:[6,43,116,120],help:[5,6,10,28,29,35,54,56,116],helper:29,henc:[5,6,11,21],here:[6,24,29,30,32,41,46,49],hex:[12,17,100],hexadecim:[10,12,100],hibern:51,hidden:51,hide:[23,25],hierarch:19,hierarchi:19,high:[0,6,30,41,43],higher:[0,19,28,38,41,46,51,118,164],highest:41,highli:[28,30,43,49],hint:[0,6,11,12,30,31,36,45,46,51,72,73,82,83,97,105,116,117,136,146,149,159,166],hintedhandoff:[6,45],hintedhandoffmanag:46,hints_creat:46,hints_directori:31,hints_not_stor:46,hintsdispatch:46,histogram:[41,46,116,119,162],histor:28,histori:[23,59,61,116],hit:[6,41,46],hitrat:46,hoc:29,hold:[0,6,10,13,19,30,41,52],home:[21,52],hope:41,hopefulli:28,host:[6,31,36,46,50,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],hostnam:[6,30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],hot:[6,46],hotspot:11,hotspotdiagnost:49,hottest:6,hour:[6,21,28,41],hourli:80,how:[0,5,6,7,8,11,12,21,25,26,27,28,29,33,35,36,41,42,46,50,52,80],howev:[6,9,10,11,12,13,15,17,18,21,28,29,30,31,34,38,42,43,49,52],hsha:6,html:6,http:[6,23,24,26,34,46],httpadaptor:46,hub:30,human:[11,62,115,163],hypothet:24,iauthent:6,iauthor:6,icompressor:42,idea:[6,14,27,28,29,30,41,52],ideal:[6,29,41,49],idempot:[13,21],idemptot:21,ident:0,identifi:[6,9,10,11,13,14,15,16,19,20,21],idiomat:8,idl:6,ieee:[17,21],iendpointsnitch:[6,50],ignor:[0,6,10,14,21,23,52,163],iinternodeauthent:6,illeg:14,illustr:19,imag:21,imagin:41,immedi:[6,11,21,28,38,42,57,116],immut:[4,30,42,43],impact:[6,11,25,41,45,49],implement:[6,10,13,14,18,19,23,29,30,40,42,49,50],implementor:6,impli:[11,12,21],implic:[0,49],implicitli:14,import_:52,imposs:41,improv:[0,6,11,21,28,29,38,41,43,50,51,52],inact:30,includ:[4,6,10,11,12,13,18,19,21,23,28,40,41,43,46,49,52,76,132,167],included_categori:76,included_keyspac:76,included_us:76,inclus:28,incom:6,incomingbyt:46,incompat:[6,10],incomplet:25,inconsist:[0,30],incorrect:30,increas:[6,11,30,38,41,42,43,46,50,51,131],increment:[6,10,13,21,28,41,68,78,116,132,138,156],incur:[13,21,46],indent:23,independ:[11,41,43,49],index:[4,6,9,10,11,12,13,15,21,36,41,45,52,116,122],index_build:160,index_identifi:16,index_nam:16,index_summary_off_heap_memory_us:163,indexclass:16,indexedentrys:46,indexinfocount:46,indexinfoget:46,indexnam:122,indexsummaryoffheapmemoryus:46,indic:[5,6,12,13,23,28,30,131],indirectli:13,individu:[6,10,14,21,28,29,43,49],induc:13,inequ:[10,13],inet:[9,11,14,17,21],inetaddress:[6,30],inexpens:43,infin:[9,10,12],influenc:11,info:[6,31,46,65,116],inform:[4,6,12,13,21,35,49,50,51,52,56,59,85,104,106,107,108,115,116,137,154],ingest:6,ingestr:52,inher:[11,21],inherit:19,init:46,initcond:[9,14],initi:[6,14,23,25,40,46,49,52,116,148],initial_token:51,input:[9,10,14,17,21,25,52],inputd:21,inreleas:34,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,19,21,30,33,36,43,52],insert_stat:[12,13],insid:[6,11,12,13,21,23,52],inspect:[6,26,52],instabl:6,instal:[6,20,30,33,36,52],instanc:[6,10,11,12,13,14,16,18,19,20,21,26,29,30,40,41,43,46],instantan:46,instanti:10,instantli:6,instead:[10,11,13,18,21,23,30,41,137,154],instruct:[6,8,11,24,26,36],instrument:49,intasblob:13,integ:[0,10,11,12,13,17,21,46],integr:[27,29,36],intellij:[23,27],intend:[25,49],intens:[6,29,30],intent:25,inter:[6,95,116,147],interact:[29,35,52],interest:[0,41,49],interfac:[6,10,14,23,30,31,42,49],intern:[6,9,11,13,18,21,25,30,43,46],internaldroppedlat:46,internalresponsestag:46,internet:6,internod:[6,30],internode_encrypt:[6,49],internodeconnect:[102,151],internodeus:[102,151],interpret:[10,21,52],interrupt:30,interv:[6,9,46],intra:[6,46,50],intrins:21,introduc:[6,10,17,28,51],introduct:[10,19,29],intvalu:14,invalid:[6,13,19,25,49,107,109,110,111,116],invalidatecountercach:116,invalidatekeycach:116,invalidaterowcach:116,invertedindex:20,investig:6,invoc:14,invok:[24,34,49,168],involv:[6,13,41,42,49],ioerror:23,ip1:6,ip2:6,ip3:6,ip_address:55,ipv4:[6,17,21,30],ipv6:[6,17,21],irc:[5,28,36],irolemanag:6,irrevers:[11,21],isn:[0,18,23,28,30],iso:21,isol:[6,11,13],issu:[0,19,24,28,29,30,38,41,42,131],item:[12,21,25,26],iter:[0,6],its:[4,6,11,12,13,14,21,26,30,41,46,49,50,51],itself:[6,11,16,30,34],iv_length:6,jaa:49,jacki:24,jamm:26,januari:21,jar:[14,23,26,46],java7:49,java:[6,14,20,21,23,26,28,33,34,36,40,41,43,46,49],javaag:26,javadoc:[23,25],javas:6,javascript:[6,14],javax:49,jbod:43,jce8:6,jce:6,jcek:6,jconsol:[36,41,49],jdk:6,jdwp:26,jenkin:[26,29],jetbrain:26,jira:[5,25,28,29,40],jkskeyprovid:6,jmc:[41,49],jmx:[6,19,36,45,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],jmx_password:49,jmx_user:49,jmxremot:49,job:[28,57,87,129,131,138,167],job_thread:131,john:[13,21],join:[6,8,13,36,41,49,51,116],joss:13,jpg:21,jsmith:21,json:[9,10,13,15,36,41,42,61,163,165],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,judgement:23,junit:[23,26,29],jurisdict:6,just:[6,14,19,26,28,29,30,41,49],jvm:[6,20,26,30,31,45,49,51],jvm_extra_opt:26,jvm_opt:[31,49],jvmstabilityinspector:25,keep:[6,8,11,23,28,30,41,46,107],keepal:[6,30],kei:[4,6,9,10,13,14,17,21,29,30,34,40,41,42,43,46,49,57,94,98,100,110,116,120,140,141,163],kept:[6,41,46],kernel:[6,30],key_alia:6,key_password:6,key_provid:6,keycach:46,keycachehitr:46,keyserv:34,keyspac:[0,6,9,10,12,14,15,16,19,21,36,38,41,42,45,49,51,52,57,58,60,65,67,76,77,86,87,90,94,98,100,107,116,118,120,121,122,123,129,131,137,138,142,153,154,155,162,163,164,167,168,170],keyspace1:[6,19],keyspace2:6,keyspace_nam:[11,14,19,21,41],keystor:[6,49],keystore_password:6,keystorepassword:49,keyword:[10,11,13,14,15,16,17,21],kib:[62,115,163],kick:[116,133],kill:[6,34],kilobyt:42,kind:[11,12,21,28,40,41],kitten:21,know:[6,13,21,23,41],known:[19,21,32,35,38,41],ks_owner:49,ks_user:49,ktlist:153,kundera:32,label:[21,28],lag:46,land:42,landlin:21,lang:[36,46,49],languag:[6,9,10,12,14,20,21,32,35,36,52],larg:[6,11,13,14,21,29,36,41,43,46,52],larger:[6,29,30,41,42,43],largest:[6,46],last:[6,12,13,14,15,28,41,46,55,116],lastli:[13,21],lastnam:13,latenc:[0,6,30,46,50],later:[0,11,21,23,28,30],latest:[0,28,34,41,52,168],latter:12,layer:43,layout:11,lazi:11,lazili:11,lead:[6,10,21,41],learn:[6,29,30,52],least:[0,6,11,12,13,18,30,41,43],leav:[6,12,13,23,29,30,52],left:[6,17,41],legaci:[6,19],legal:10,length:[4,6,10,17,21,25,41],less:[6,21,28,30,38,43],let:[6,41],letter:17,level:[6,10,11,13,19,23,25,31,43,45,46,49,52,96,107,116,148],leveledcompactionstrategi:[11,38,41],lexic:30,lib:[6,20,25,26,34],libqtcassandra:32,librari:[8,25,29,32,46,52],licenc:25,licens:[25,26,28],life:28,lifespan:43,like:[0,6,12,13,14,17,21,23,24,25,28,29,30,36,41,42,43,49],likewis:19,limit:[6,9,10,11,18,19,21,30,40,41,42,49],line:[12,23,28,29,31,34,35,49,53,55,57,58,60,65,67,73,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],linear:43,linearli:38,link:[6,8,11,12,28,29,34],linux:[6,30],list:[4,5,6,9,10,11,12,13,14,17,26,28,29,31,34,35,36,41,49,51,52,55,57,58,59,60,65,67,73,76,77,83,86,87,90,94,98,99,100,102,106,107,113,114,116,118,121,122,123,126,129,130,131,132,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],list_liter:[13,21],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,36,46],listen_address:[31,35,36],listen_interfac:31,listsnapshot:116,liter:[10,12,14,17,52],littl:23,live:[13,36,41,46,51],livediskspaceus:46,livescannedhistogram:46,livesstablecount:46,load:[0,6,11,20,21,36,45,46,49,50,51,108,116,123,131,154],local:[0,6,11,26,28,29,35,43,46,49,50,52,116,125,131,135,166],local_jmx:49,local_on:[0,49,52],local_quorum:[0,52],local_read_count:163,local_read_latency_m:163,local_seri:52,local_write_latency_m:163,localhost:[6,35,49],locat:[6,33,34,42,46,49,50,52,160],lock:[6,30,46],log:[6,11,13,25,29,33,34,36,40,45,46,49,66,70,76,80,96,116,131,134,148,160],log_al:41,logback:31,logger:[23,31,76],logic:[6,20],login:[6,9,19,29,49],lol:21,longer:[6,9,10,30,41,51,57,116],look:[6,12,24,28,29,41,43],lookup:46,loop:23,lose:[6,41,51],loss:[6,21],lost:[41,51],lot:[6,35,36],low:[6,28,116,118],lower:[0,6,11,12,13,19,30,38,41,46,51],lowercas:12,lowest:[28,41],lz4:6,lz4compressor:[6,11,42],macaddr:9,machin:[6,11,29,30,46,49,50,51],made:[6,21,36,38,43,49],magnet:6,magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,19,21,25,26,28,29,30,34,38,41,49,50,51,52,138],mail:[5,28,36],main:[0,14,18,26,30,33,34,49,51,52],main_actor:13,mainli:[6,11],maintain:[6,28],mainten:46,major:[0,10,28,49,60,116],make:[0,6,8,9,20,21,23,26,28,29,30,31,34,41,49,51,52],man:6,manag:[6,19,26,29,46,49,51,56,116],mandatori:[11,14],mani:[0,6,11,23,25,28,41,42,43,46,49,52,57,60,67,77,80,86,87,131,138,155,167,168],manipul:[12,15,29,36],manual:[6,24,30],map:[6,9,10,11,13,14,17,19,36,46],map_liter:[11,16,19,21],mar:21,mark:[6,19,41,51,71,116],marker:[6,11,12,25,30],match:[6,12,13,14,17,19,46,50],materi:[6,10,11,12,15,36,46,52,116,170],materialized_view_stat:12,matter:[11,30],max:[6,36,41,46,49,52,80,90,97,116,131,142,149],max_hint_window_in_m:51,max_log_s:80,max_map_count:30,max_mutation_size_in_kb:[6,30],max_queue_weight:80,max_thread:6,max_threshold:41,maxattempt:52,maxbatchs:52,maxfiledescriptorcount:46,maxhintwindow:149,maxim:43,maximum:[4,6,14,38,46,52,80,138],maximum_live_cells_per_slice_last_five_minut:163,maximum_tombstones_per_slice_last_five_minut:163,maxinserterror:52,maxoutputs:52,maxparseerror:52,maxpartitions:46,maxpools:46,maxrequest:52,maxrow:52,maxthreshold:142,maxtimeuuid:10,mayb:13,mbean:[6,19,41,46,49],mbeanserv:19,mbp:6,mct:6,mean:[6,9,11,12,13,14,17,18,21,36,41,46,50,52,131],meaning:13,meanpartitions:46,meant:[21,30,46],measur:[6,25,29,46,51,52],mechan:40,median:46,meet:[6,25],megabyt:6,member:23,membership:6,memlock:30,memori:[4,6,11,36,38,41,45],memory_pool:46,memtabl:[2,6,38,40,41,42,43,46,153],memtable_allocation_typ:4,memtable_cell_count:163,memtable_cleanup_threshold:4,memtable_data_s:163,memtable_off_heap_memory_us:163,memtable_switch_count:163,memtablecolumnscount:46,memtableflushwrit:46,memtablelivedatas:46,memtableoffheaps:46,memtableonheaps:46,memtablepool:6,memtablepostflush:46,memtablereclaimmemori:46,memtableswitchcount:46,mention:[6,21,28,46,49],menu:26,mere:23,merg:[24,28,38,42,43,45],mergetool:24,merkl:46,mess:[28,29],messag:[6,21,25,28,34,36,46],met:13,meta:[13,46],metadata:[4,19,42,43,46],metal:6,meter:46,method:[10,13,14,19,23,25,26,29,36,49],metric:[6,45],metricnam:46,metricsreporterconfigfil:46,mib:[62,115,163],microsecond:[6,11,13,21,46],midnight:21,might:[6,13,41,46,55,57,58,60,65,67,73,77,80,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],migrat:[6,46,50],migrationstag:46,millisecond:[6,10,21,46,118,138,164],min:[6,30,40,41,46,52,90,116,142],min_sstable_s:41,min_threshold:41,minbatchs:52,mind:6,minim:[6,41,43],minimum:[6,11,14,31,46],minor:[10,12,45],minpartitions:46,minthreshold:142,mintimeuuid:10,minut:[6,21,41,46,80],misbehav:41,misc:[102,151],miscelen:46,miscellan:6,miscstag:46,miss:[11,41,46,51],misslat:46,mistaken:[55,57,58,60,65,67,73,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],mitig:[6,49],mix:[6,41],mmap:30,mnt:16,mock:29,mode:[6,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],model:[11,15,19,28,36,49],moder:43,modern:43,modif:[13,19],modifi:[6,9,10,11,14,19,21,28,38,41,42],modification_stat:13,modul:52,modular:25,moment:[6,28],monitor:[30,36,45,49,50,56,116],monkeyspeci:[11,18],monkeyspecies_by_popul:18,month:21,more:[0,4,6,10,11,12,13,21,23,28,29,31,35,36,38,43,45,46,49,50,51,60,86,87,116,118,131,138,164,168],moreov:13,most:[6,11,12,13,21,26,28,29,30,31,41,42,43,49,52,59,116,164],mostli:[6,11,21],motiv:[29,41],mount:6,move:[6,28,30,36,40,45,46,116],movement:45,movi:[13,21],movingaverag:6,mtime:11,much:[0,5,6,11,38,41,50],multi:[0,6,12,25],multilin:27,multipl:[4,6,10,11,12,13,14,21,23,25,26,28,30,31,41,43,50,121],multipli:41,murmur3partit:4,murmur3partition:[6,14,52],must:[0,6,10,11,13,14,17,18,19,23,28,29,30,31,41,46,49,51,52,153],mutant:16,mutat:[0,6,13,30,40,46,168],mutationstag:46,mv1:18,mx4j:46,mx4j_address:46,mx4j_port:46,mx4jtool:46,mxbean:19,myaggreg:14,mycolumn:17,mydir:52,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,20],mytrigg:20,nairo:21,name:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,28,29,30,31,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],names_valu:13,nan:[9,10,12],nanosecond:21,nathan:13,nativ:[6,10,12,15,17,25,30,35,46,52,69,79,116,122,157],native_transport_min_thread:6,native_transport_port:31,native_transport_port_ssl:49,native_typ:21,natur:[11,21,23,41,42],nearli:26,neccessari:6,necessari:[6,11,14,19,28,34,42,49],necessarili:[6,12,31],need:[0,6,10,11,12,13,19,21,23,25,26,28,29,30,31,34,35,38,41,42,43,49,50,52,94,98],neg:6,neglig:13,neighbour:41,neither:[18,21,49],neon:26,nerdmovi:[13,16],nest:[12,13,23],net:[6,26,30,33,34,49],netstat:[51,116],network:[6,13,30,43,49,50,115,116,119],networktopologystrategi:[11,49],never:[6,10,11,12,13,14,21,23,30,41],nevertheless:13,new_rol:19,new_superus:49,newargtuplevalu:14,newargudtvalu:14,newer:[41,43,52,87],newest:[11,41],newli:[11,21,28,40,116,123],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[6,30,35,41,52],ngem3b:13,ngem3c:13,nifti:24,nio:[6,14,46],no_pubkei:34,node:[0,4,6,11,13,14,20,21,25,29,31,32,35,36,38,40,41,43,45,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],nodej:33,nodetool:[34,36,38,42,45,49,51,53,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],nologin:9,non:[6,9,10,11,12,13,14,19,21,30,38,42,46,49,52],none:[6,11,13,21,49],nonsens:19,nor:[11,18,21],norecurs:[9,19],norm:46,normal:[14,17,26,30,34,46,51,52],noschedul:6,nosuperus:[9,19],notabl:[14,17],notat:[10,12,13,52],note:[0,5,6,10,11,12,13,14,15,17,19,21,24,28,30,41,49],noth:[6,11,14,24,29,30],notic:6,notif:8,notion:[11,12],now:[10,23,26,41,51],ntp:6,nullval:52,num_cor:52,num_token:51,number:[0,6,10,11,12,13,14,17,18,21,26,28,29,30,34,38,41,42,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:163,numer:[15,38],numprocess:52,object:[6,11,12,25],objectnam:19,observ:23,obsolet:[6,43,46],obtain:[12,49],obviou:[14,24],obvious:11,occup:13,occupi:[6,46],occur:[10,12,13,20,21,30,41,43,46],occurr:21,octet:[6,50],odd:28,off:[4,6,30,42,46,49,52,116,133],off_heap_memory_used_tot:163,offer:[15,29,42],offheap:[38,43],offheap_buff:6,offheap_object:6,offici:[36,52],offset:[4,46],often:[6,11,12,23,28,29,30,41,42,43,49,50,52,80],ohc:6,ohcprovid:6,okai:23,old:[4,6,41,51,74,84,116],older:[6,14,26,34,41,43,52],oldest:[6,11],omit:[6,10,11,13,17,21,148],onc:[4,6,11,12,14,21,24,26,28,29,30,40,41,42,43,46,49,51,52],one:[0,4,6,9,10,11,12,13,14,17,18,19,21,23,26,28,29,31,36,38,41,43,46,49,50,51,52,57,60,67,77,86,87,102,116,131,138,151,153,155,167,168],oneminutecachehitr:46,ones:[6,11,12,13,14,18,19,46],ongo:[41,51],onli:[0,6,9,11,12,13,14,17,18,19,21,23,28,29,31,36,38,41,42,43,46,49,50,52,131,153,163],onlin:52,only_purge_repaired_tombston:41,onto:[4,41],open:[5,6,26,49,50],openfiledescriptorcount:46,openjdk:34,oper:[0,6,10,11,13,16,18,19,21,23,36,38,40,43,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],operatingsystem:46,opertaion:6,opportun:38,ops:30,opt:14,optim:[6,11,12,13,30,41,43,51],optimis:131,option1_valu:19,option:[4,6,9,10,12,13,14,16,19,21,26,29,30,34,42,43,45,49,51,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],oracl:[6,34,49],order:[0,4,6,9,10,14,18,21,23,28,30,38,40,41,50,51,52],ordering_claus:13,orderpreservingpartition:6,org:[6,14,20,23,26,29,30,34,41,42,46,49],organ:[4,26,32],origin:[9,24,28,138],orign:13,other:[0,4,6,10,12,13,14,18,19,21,24,26,28,31,36,38,41,43,46,49,50,51,116,121,132],other_rol:19,otherwis:[0,9,12,13,16,21],our:[5,6,8,24,26,28,41],ourselv:24,out:[6,12,23,26,28,41,46,49,50,51,131],outbound:6,outboundtcpconnect:6,outgo:6,outgoingbyt:46,outlin:49,outofmemoryerror:36,output:[14,19,25,26,38,41,52,60,61,163,165],outsid:[11,20,21],over:[0,6,11,21,30,41,46,49,50,51],overal:14,overflow:[17,138],overhead:[6,30,42,46,51],overidden:49,overlap:[0,41],overload:[6,14,30],overrid:[6,23,49,51,138],overridden:[6,11],overview:[2,36,45],overwhelm:6,overwrit:[42,43],overwritten:[46,87],own:[0,6,11,12,14,21,28,30,34,41,42,46,49,94,100,107,116,168],owner:21,ownership:[41,137],p0000:21,pacif:21,packag:[26,30,31,33,35,52],packet:6,page:[6,21,26,28,29,30,43,46],paged_slic:46,pages:52,pagetimeout:52,pai:23,pair:[6,11,19,21,41,49],parallel:[29,41,131],paramet:[6,14,23,25,26,31,38,43,50,51,116,148],paranoid:6,parenthesi:[11,52],parnew:43,pars:[6,12,40,52],parser:[9,10,40],part:[0,5,6,11,13,14,18,21,25,26,28,29,30,50,51,52],parti:[25,46],partial:4,particip:[0,20],particular:[11,12,13,14,17,19,21,30,43,46,49],particularli:[12,21,49],partit:[4,6,10,13,14,30,38,41,43,46,87,94,98,116,138,164],partition:[4,10,13,14,52,64,116,131],partition_kei:[11,13],partli:13,pass:[25,28,31,52],password:[6,9,13,19,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],password_a:19,password_b:19,passwordauthent:[6,49],passwordfilepath:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],past:46,patch:[10,13,23,24,25,27,29,36],path:[5,6,16,25,34,38,41,42,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],patter:19,pattern:[6,19,21],paus:[6,30,116,117],pausehandoff:116,paxo:[13,46,52],peer:[6,46],peerip:46,penalti:[6,13],pend:[41,46,116,130],pending_flush:163,pendingcompact:46,pendingflush:46,pendingrangecalcul:46,pendingtask:46,pendingtasksbytablenam:46,pennsylvania:21,peopl:[28,30],per:[0,4,6,10,11,13,23,24,28,30,38,40,41,42,46,49,52,116,139,146],percent:46,percent_repair:163,percentag:[6,46,50],percentil:46,percentrepair:46,perdiskmemtableflushwriter_0:46,perfect:14,perform:[6,11,13,19,21,24,25,27,30,31,38,41,43,46,49,50,52,131],period:[6,43,46,49,116,118],perman:[11,30,41,43],permiss:[6,9,12,29,49],permit:[6,19,40,49],persist:[4,30,38,43,49],perspect:30,pet:21,pgrep:34,phantom:32,phase:[51,52],phi:6,phone:[13,21],php:33,physic:[0,6,11,30,43,50],pick:[24,28,30,41,49,51,121],pid:[30,34],piec:[12,41,46],pile:6,pin:[6,50],ping:28,pkcs5pad:6,pkill:34,place:[5,6,16,20,23,24,28,40,41,46,49,52,116,123],placehold:[14,52],plai:[14,21],plain:4,plan:[11,24,28],platform:19,platter:[6,43],player:[14,21],playorm:32,pleas:[5,6,11,13,14,15,21,23,26,29,30],plu:[14,41,46],plug:6,pluggabl:[19,49],plugin:46,poe:21,point:[6,10,17,21,23,26,36,49,52,94,116],pointer:14,polici:[6,28,49,168],pool:[6,34,46,116,165],popul:[11,18],popular:[26,43],port:[6,26,31,36,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],portion:[43,52],posit:[4,6,10,11,21,38,46,51],possbili:6,possess:19,possibl:[6,10,11,13,14,17,19,21,25,28,29,30,38,41,43,46,49,51],post:[13,116,141],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,25,41,43,49,51,138],power:6,pr3z1den7:21,practic:[11,12,13,49],pre:[6,17,21,43,49],preced:30,precis:[10,17,21,41],precondit:46,predefin:11,predict:13,prefer:[0,6,11,12,21,23,28,49,50],preferipv4stack:26,prefix:[11,12,21],prepar:[6,14,15,46],preparedstatementscount:46,preparedstatementsevict:46,preparedstatementsexecut:46,preparedstatementsratio:46,prepend:21,prerequisit:33,present:[12,13,18,46],preserv:[6,17,19],press:34,pressur:[6,46],pretti:52,prevent:[6,29,40],preview:131,previou:[6,10,11,21,41,51],previous:6,previsouli:[83,116],primari:[9,10,13,14,21,29,40,41,42,49,51],primarili:[6,11],primary_kei:[11,18],print:[52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],prior:[6,13,19,21],prioriti:28,privat:[6,23,49,50],privileg:[19,34,49],probabilist:[38,42],probabl:[6,11,29,38,41,103,116,152],problem:[5,6,14,24,25,30,49],problemat:21,proc:[6,30],proce:[25,42,51],procedur:[13,49],process:[0,6,14,24,25,26,28,29,30,34,40,42,43,46,49,51,52,56,116,117,136],prod_clust:52,produc:[13,14,41,80],product:[6,28,30,43,50],profil:[13,116,118],profileload:116,program:[14,29],progress:[23,24,28,38,45,116,170],project:[23,29,46],promin:11,prompt:52,propag:[6,11,14,23,25,50],proper:[11,21,30,49],properli:[6,25],properti:[6,11,19,33,40,41,49,50,51],propertyfilesnitch:[6,50],proport:[6,13],proportion:[6,89,116,139],propos:[6,46],protect:[6,43],protocol:[6,25,30,35,46,49,52,59,69,74,79,84,116,157],provid:[0,5,6,11,12,13,14,15,17,21,26,28,35,40,41,42,43,46,49,50,51,53,115,116,126,130],proxim:[6,50],proxyhistogram:116,prv:131,ps1:49,ps22dhd:13,pt89h8m53:21,pull:[29,41,46,131],purg:43,purpos:[11,12,13,21,43,49],push:[24,28,46],put:[15,28,31,41,51,107,131],pwf:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],python:[14,28,29,33,34,52],quak:[14,21],qualifi:[6,11,14,28],qualiti:49,quantiti:21,queri:[6,10,11,12,13,14,16,18,19,33,36,41,46,52,70,80,116,134],question:[8,19,36],queu:[6,46],queue:[6,46,80],quick:[107,168],quickli:[30,41],quill:32,quintana:21,quit:[41,52],quorum:[0,49,52],quot:[9,10,11,12,14,17,19,52],quotat:19,quoted_identifi:12,quoted_nam:11,race:[21,24],rack1:6,rack:[0,6,49,50],rackdc:[6,50],rackinferringsnitch:[6,50],raid0:43,raid1:43,raid5:43,rain:12,rais:[12,30],raison:9,ram:[38,42,43],random:[11,14,30,51],randomli:[0,6,51],randompartition:[6,13,14],rang:[2,6,10,11,13,21,25,41,45,46,52,60,65,102,116,121,131,151],range_slic:46,rangekeysampl:116,rangelat:46,rangemov:51,rangeslic:46,rapid:43,rare:[10,38],raspberri:43,rate:[6,11,46,49,52],ratebasedbackpressur:6,ratefil:52,rather:[13,30,41,43],ratio:[6,42,43,46],raw:[6,14],reach:[6,28,30,40,41],read:[0,6,11,13,21,23,25,29,30,33,36,38,41,42,43,45,46,49,50,52,102,151,163,168],read_lat:163,read_repair:46,read_repair_ch:[0,6,11,41,50],read_request_timeout:30,readabl:[11,62,115,163],readi:[28,49],readlat:46,readrepair:46,readrepairstag:46,readstag:46,readwrit:49,real:[8,11,23,30],realiz:41,realli:[6,29,31],reason:[0,6,13,14,15,30,31,34,41,43,49,51],rebuild:[38,41,42,46,116,122,138],rebuild_index:116,receiv:[6,14,28,30,41,43],recent:[6,28,29,43,59],reclaim:41,recogn:[13,26,28],recommend:[6,11,21,30,43,49,51],recompact:41,recompress:42,reconnect:49,record:[11,13,21,28,41],recov:[6,30,41],recoveri:6,recreat:52,recurs:80,recv:34,recycl:[6,46],redistribut:6,redo:28,reduc:[6,30,41,42,63,89,116,131,139],reduct:6,redund:[0,23,25,28,43],reenabl:[79,81,82,116],refactor:40,refer:[6,11,12,13,14,15,21,23,29,30,34,35,52],referenc:6,reflect:41,refresh:[6,49,52,116,124],refreshsizeestim:116,refus:36,regard:[11,13],regardless:[0,6,19,28],regener:38,regexp:12,region:[6,50],regist:21,registri:49,regress:[25,29],regular:[9,12,26,29,30,46,52],regularstatementsexecut:46,reinsert:138,reject:[6,13,30,40,49],rel:[6,21,52],relat:[8,10,12,13,26,28,41,46],releas:[6,10,34,52],relev:[13,19,21,28,42,49],reli:[6,14,21,30,51],reliabl:41,reload:[6,116,125,126,127,128],reloadlocalschema:116,reloadse:116,reloadssl:116,reloadtrigg:116,reloc:[116,129],relocatesst:116,remain:[6,13,14,21,24,41,46,51,163],remaind:[17,42],remedi:41,remot:[0,24,26,36,41,49,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],remov:[4,6,10,11,12,13,14,15,17,21,25,30,36,40,45,49,55,58,87,116,130],removenod:[51,55,116],renam:[9,21],reorder:6,repair:[0,4,6,11,30,36,42,45,46,50,51,107,116,132,148,168],repair_admin:116,repeat:[12,34,42,49],replac:[6,9,14,19,21,25,30,36,41,45,80],replace_address_first_boot:51,replai:[0,21,43,46,89,116,133,139],replaybatchlog:116,replic:[2,6,11,36,41,43,49,51,55,116],replica:[0,6,11,13,30,41,46,50,51,63,98,116],replication_factor:[0,11,49],repo:[24,26],report:[28,36,45],report_writ:19,reportfrequ:52,repositori:[5,8,26,28,29,34],repres:[6,10,17,19,21,30,41,46,49,50,52],represent:[10,17],request:[0,6,13,19,20,29,30,38,41,43,45,49,50,52,116,152,167],request_respons:46,requestresponsestag:46,requestschedul:6,requesttyp:46,requir:[0,6,11,13,14,19,23,24,25,26,28,30,38,42,43,49],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15],reset:[6,13,116,135,148],reset_bootstrap_progress:51,resetfullquerylog:116,resetlocalschema:116,resid:[6,13,30,46],resolut:[6,13,30],resolv:[24,30,137,154],resort:[55,116],resourc:[19,49],resp:14,respect:[6,10,14,34,50,80],respond:[0,6,12],respons:[0,6,19,30,46,51],ressourc:21,rest:[6,11,12,21,25,51],restart:[30,41,49,51,116,123,141],restor:[41,51,52],restrict:[10,11,13,18,19],result:[0,6,8,10,11,12,14,17,19,21,28,30,41,46,52],resum:[56,116,136],resumehandoff:116,resurrect:41,resync:[116,135],retain:[30,41],rethrow:23,retri:[0,6,21,46,80],retriev:[11,13,19],reus:25,revers:13,review:[11,23,27,28,29,36],revok:[9,49],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[38,41,42,116,138,167],rewritten:[43,138],rfc:[14,21],rhel:36,rich:21,rider:21,riderresult:21,right:[6,26,30,52],ring:[2,6,36,49,51,52,112,114,116,148],risk:11,rmem_max:6,rmi:[30,49],robin:6,rogu:14,role:[6,9,10,12,15,45],role_a:19,role_admin:19,role_b:19,role_c:19,role_manag:49,role_nam:19,role_opt:19,role_or_permission_stat:12,role_permiss:6,roll:[30,49,80],roll_cycl:80,romain:21,root:[6,24,28,34],rotat:6,roughli:6,round:[6,13,41,46],roundrobin:6,roundrobinschedul:6,rout:[6,50],row:[0,4,6,10,11,13,14,15,17,18,29,35,38,42,43,46,52,87,107,111,116,138,140,141],rowcach:46,rowcachehit:46,rowcachehitoutofrang:46,rowcachemiss:46,rowindexentri:46,rows_per_partit:11,rpc:[6,46],rpc_min:6,rpc_timeout_in_m:[102,151],rsc:168,rubi:[14,33],rule:[6,12,14,28,30],run:[5,6,12,21,24,26,28,30,31,34,41,43,46,49,51,107,131],runtim:[6,33,96,116],runtimeexcept:23,rust:33,safe:[6,14,21,41,49],safeguard:43,safeti:[41,51],sai:36,said:[11,28,30,116,167],same:[0,5,6,11,12,13,14,15,17,18,19,21,24,26,28,31,36,38,41,46,49,50,131],sampl:[4,6,12,14,46,52,80,116,118,120,164],sampler:[46,118,164],san:43,sandbox:[6,14],satisfi:[0,23,43,46,51],satur:[6,46],save:[6,13,21,30,31,38,42,43,51,116,141],saved_cach:6,saved_caches_directori:31,sbin:30,scala:[14,33],scalar:15,scale:[6,29,42],scan:[6,13,38,46],scenario:24,scene:30,schedul:6,schema:[0,9,11,14,17,46,52,64,116,125,135],schema_own:19,scope:[19,46,49],score:[6,14,21,50],script:[6,14,26,29,80],scrub:[38,41,42,46,116,160],search:28,second:[6,11,12,13,21,30,40,43,49,52,116,139,146],secondari:[10,12,13,15,36,41,46,116,122],secondary_index_stat:12,secondaryindexmanag:46,section:[2,5,7,10,11,12,13,15,19,21,30,33,34,35,41,46,49,51,53],secur:[6,14,15,36,45],see:[0,4,6,10,11,12,13,14,17,19,21,26,28,35,36,40,41,46,49,51,52,87,116,131],seed:[6,31,36,50,99,116,126],seedprovid:6,seek:[6,43,46],seen:[6,11],segment:[4,6,40,46,52,80],select:[6,9,10,11,12,14,15,19,26,29,30,35,38,41,49,52,121],select_claus:13,select_stat:[12,18],self:25,selinux:30,semant:[10,13,14],semi:30,send:[6,8,30],sens:[6,10,13,15,30],sensic:14,sensit:[11,12,14,17],sensor:21,sent:[0,6,21,30,46],separ:[4,6,11,13,23,28,31,41,43,49,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],seq:[6,131],sequenc:12,sequenti:[6,43,131],seren:13,seri:[11,41,52],serial:6,serializingcacheprovid:6,serv:[13,43,49],server:[6,12,13,21,26,29,30,43,46,49],server_encryption_opt:49,servic:[6,26,34,49,51],session:[6,19,49,116,132],set:[0,6,9,10,11,12,13,14,17,18,25,27,28,29,31,36,38,40,41,42,43,46,49,50,51,52,57,76,87,116,129,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,167],set_liter:21,setbatchlogreplaythrottl:116,setcachecapac:116,setcachekeystosav:116,setcompactionthreshold:[41,116],setcompactionthroughput:[41,116],setconcurrentcompactor:116,setconcurrentviewbuild:116,sethintedhandoffthrottlekb:116,setint:14,setinterdcstreamthroughput:116,setlogginglevel:116,setlong:14,setmaxhintwindow:116,setstr:14,setstreamthroughput:116,setter:[19,23],settimeout:116,settraceprob:116,setup:[28,29,49],sever:[4,13,19,41,49],sfunc:[9,14],sha:24,shadow:41,share:[11,13,26],sharedpool:52,sharp:32,shed:30,shell:[35,36,53],shift:21,ship:[29,35,49,52],shortcut:18,shorter:49,shorthand:52,should:[0,5,6,10,11,12,13,14,17,19,21,25,26,28,29,30,31,32,33,35,38,41,42,43,46,49,50,51,52,121,131,151],shouldn:11,show:[19,36,51,65,85,104,116,120,130,137,154,155,163,170],shown:[12,52,163],shrink:6,shut:6,shutdown:[6,43],side:[11,13,17,21,49],sign:[13,21,30],signal:[116,127],signatur:[34,40],signific:[6,26,28,29,43],significantli:6,silent:14,similar:[6,13,14,42,43],similarli:[0,10,17,23,43,116,121],simpl:[6,11,26,29,49],simple_classnam:29,simple_select:13,simplequerytest:29,simplereplicationstrategi:49,simpleseedprovid:6,simplesnitch:[6,50],simplestrategi:11,simpli:[0,6,11,13,14,17,21,26,29,41,43,46,51,168],simul:29,simultan:[6,43,52,57,87,129,138,167],sinc:[6,11,13,14,21,26,30,34,41,46,51],singl:[0,6,10,11,12,13,14,17,18,19,21,23,28,31,35,36,45,46,49,50,52,60],singleton:25,situat:[6,29,41],size:[4,6,11,21,23,30,31,38,40,42,43,45,46,49,52,80,113,116],size_estim:[116,124],sizetieredcompactionstrategi:[11,41],skip:[6,13,46,51,52,138,153],skipcol:52,skiprow:52,sks:34,sla:25,slash:12,slf4j:23,slightli:6,slow:[6,50],slower:[6,11,38],slowest:6,slowli:[6,21],small:[6,11,13,21,30,41,43],smaller:[6,30,41,43,52],smallest:[0,11,14,46],smallint:[9,10,14,17,21],smith:21,smoother:10,smoothli:6,snappi:6,snappycompressor:[11,42],snapshot:[6,26,46,58,113,116,138],snapshot_nam:58,snapshotnam:[58,116],snitch:[6,36,45,64,116],socket:[6,49,151],sole:11,solid:[6,43],some:[0,6,9,11,12,13,14,21,26,28,29,30,31,40,41,42,46,49,51,52],some_funct:14,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[24,41],sometim:[6,12,13],someudt:14,somewher:34,soon:49,sooner:6,sort:[4,11,13,21,41,43,163],sort_kei:163,sourc:[5,6,8,14,27,34,46,121],source_elaps:52,space:[6,23,30,40,41,43,46],space_used_by_snapshots_tot:163,space_used_l:163,space_used_tot:163,span:[6,13,41],sparingli:13,spark:32,spec:[25,35,46,52],speci:[11,18],special:[12,13,29,30,41,46],specif:[6,9,11,12,13,19,21,26,28,30,32,40,41,46,49,52,116,121,131],specifc:46,specifi:[0,6,10,11,12,13,14,16,18,19,21,26,30,35,40,41,42,46,49,51,52,58,60,100,116,121,131,137,149,151,153,160,163,166],specific_dc:131,specific_host:131,specific_keyspac:121,specific_sourc:121,specific_token:121,specul:[0,46],speculativeretri:46,speed:[6,36],spent:46,spike:30,spin:[6,43],spindl:6,spirit:[6,50],split:[23,30,41,46,52,60],spread:[6,50],sql:[13,15],squar:12,squash:28,src:121,ssd:[6,16,43],ssl:[6,30,45,52,116,127],ssl_storage_port:50,sss:17,sstabl:[2,6,11,30,38,42,43,45,57,60,87,100,107,116,123,129,138,167,168],sstable_compression_ratio:163,sstable_count:163,sstable_s:41,sstable_size_in_mb:41,sstableexpiredblock:41,sstablesperreadhistogram:46,sstablewrit:23,stabil:28,stabl:[34,52],stack:6,stage:28,stai:[36,41],stale:49,stall:[6,51],stand:[6,29],standalon:29,standard:[6,21,30,34,46],start:[0,6,9,13,27,30,31,34,36,41,43,46,49,51,60,131,160],start_token:[60,131],start_token_1:121,start_token_2:121,start_token_n:121,starter:28,startup:[6,20,26,30,41,46,51],starvat:6,state:[6,14,38,41,43,46,51,116,154],statement:[6,9,10,11,13,14,15,16,17,19,20,21,25,27,28,38,41,46,49,52],static0:11,static1:11,statist:[4,41,46,52,62,88,116,119,162,163,165],statu:[19,25,28,30,34,52,116,130,155,156,157,158,159,168],statusautocompact:116,statusbackup:116,statusbinari:116,statusgossip:116,statushandoff:116,stc:11,stdin:52,stdout:52,step:[6,26,31,49],still:[0,6,10,13,14,17,21,23,49,51,52],stop:[6,34,52,75,116,134,161],stop_commit:6,stop_paranoid:6,stopdaemon:116,storag:[2,11,15,16,28,30,36,42,43,45],storage_port:[31,50],storageservic:[6,23],store:[0,4,6,10,11,12,13,21,36,38,41,42,43,46,49,52,72,80,82,116,159],store_typ:6,straight:51,straightforward:40,strategi:[0,6,11,45,50],stream:[4,6,36,41,42,45,56,95,101,116,121,131,147,148,150,151],street:21,strength:6,strict:[10,41],strictli:[8,11,14],string:[6,10,11,12,13,14,16,17,19,20,21,46,52,100],strong:0,strongli:[6,11,12,49],structur:[4,6,9,19,25,38,46],stub:49,style:[6,25,26,27,28,29,36],stype:[9,14],sub:[11,13,21,34,41],subclass:6,subdirectori:[6,20],subject:[6,14,49],submiss:[6,28],submit:[28,29,36,60],subscrib:8,subscript:8,subsequ:[6,13,30,41,42],subset:[19,41,52],substitut:34,subsystem:49,subvert:41,succed:46,succesfulli:46,success:[0,52],sudden:6,sudo:[30,34],suffici:[6,43],suggest:[12,28,43],suit:[6,28,29,49],suitabl:[13,14,25,28],sum:40,summari:[4,6,46],sun:[23,49],sunx509:6,supercolumn:9,supersed:[10,138],superus:[9,19,49],suppli:[13,24],support:[0,6,9,10,11,12,13,14,15,16,18,19,21,28,29,30,32,36,41,49,52,138,160],suppos:13,sure:[6,8,23,26,28,29,30,31,34,41],surplu:30,surpris:0,surprisingli:6,surround:[17,52],suscept:14,suspect:[5,28],suspend:26,swamp:30,swap:6,symmetri:17,symptom:30,sync:[6,30,46,131],synchron:6,synonym:19,synopsi:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],syntact:[11,19],syntax:[10,12,13,14,19,21,41,42],sys:6,sysctl:30,sysintern:6,system:[6,11,14,19,29,30,31,35,41,43,46,49,52,91,92,93,95,101,107,116,123,124,125,143,144,145,147,150],system_auth:[6,49],system_trac:131,tab:23,tabl:[0,4,6,9,10,12,13,14,15,16,17,18,19,20,21,29,38,41,42,45,49,52,57,60,67,75,77,86,87,90,94,98,107,116,122,123,125,129,131,138,142,153,155,160,162,163,167,168],table1:19,table_nam:[11,13,16,19,20,41,163],table_opt:[11,18],tablehistogram:116,tablestat:116,tag:[21,25,28,153],take:[6,10,11,13,14,21,25,26,28,30,38,41,42,43,51,116,153],taken:[6,40,41,46],tar:34,tarbal:[31,33,52],target:[11,19,26,29,41],task:[6,26,28,46,52],tcp:[6,30],tcp_keepalive_intvl:30,tcp_keepalive_prob:30,tcp_keepalive_tim:30,tcp_nodelai:6,tcp_wmem:6,teach:[6,50],team:30,technetwork:6,technic:[11,15],technot:6,tee:34,tell:[6,13,25,30,31,46],temporari:49,temporarili:6,tenanc:6,tend:[6,30,43],tendenc:6,terabyt:42,term:[6,13,14,15,18,21],termin:[12,52],ternari:23,test:[6,8,23,25,27,28,35,36,43,52],test_keyspac:49,testabl:[25,28],testbatchandlist:29,testmethod1:29,testmethod2:29,testsom:29,teststaticcompactt:29,text:[4,9,11,12,13,14,17,21,40,42,49],than:[0,6,11,12,13,14,15,18,21,23,28,36,41,42,43,49,50,51,132,144,145],thei:[6,9,10,11,12,13,14,15,18,19,21,23,25,28,29,36,38,41,42,43,46,49],them:[6,10,11,13,14,21,23,28,29,30,35,38,41,46,49,116,167],themselv:[13,19],theoret:11,therefor:[28,29,49],thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,19,21,23,24,25,26,28,29,30,31,33,34,36,38,40,41,42,43,46,49,50,51,52,53,54,55,57,58,60,63,65,67,73,77,83,86,87,89,90,94,98,100,102,106,107,114,116,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],thing:[6,21,24,28,30,33,41],think:6,third:[21,25,46],thobb:52,those:[11,12,13,14,16,17,18,19,21,28,30,40,41,49,52,167],though:[6,10,12,21,36,41,42,46],thousand:52,thousandssep:52,thread:[6,43,46,49,57,87,116,129,131,138,146,165,167],threadpool:45,threadpoolnam:46,threadprioritypolici:26,three:[0,6,38,41,42,49,52],threshold:[4,40,43,50,90,116,142,148],thrift:[6,9,11,15,30,46],throttl:[6,89,116,139,143,146,147,150],throttle_limit:6,through:[0,5,9,10,11,12,13,26,28,30,35,40,41,52],throughout:49,throughput:[0,6,41,42,43,46,91,95,101,116,143,147,150],throwabl:[25,29],thrown:21,thu:[6,10,11,12,13,18,21,30,46,50,51,116,167],thumb:[6,28],thusli:21,tib:[62,115,163],ticket:[5,24,25,28,29,40],tie:30,tier:45,ties:13,tighter:6,tightli:6,tild:52,time:[0,6,8,9,10,11,12,13,15,16,17,18,23,25,26,28,29,30,38,40,42,45,46,49,52,116,118],timehorizon:6,timelin:11,timeout:[6,21,30,46,52,102,116,151],timeout_in_m:151,timeout_typ:[102,151],timer:[6,46],timestamp:[4,9,10,11,13,14,15,17,36,41,52,138],timeunit:41,timeuuid:[9,10,11,17,21],timewindowcompactionstrategi:11,timezon:[17,52],tini:[6,41],tinyint:[9,10,14,17,21],tjake:23,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,toc:4,todai:12,todat:14,todo:[25,29],togeth:[6,11,13,14,41],toggl:49,tojson:15,token:[2,4,6,9,10,12,13,30,41,46,52,60,65,107,108,114,116,121,131,137,168],toler:38,tom:13,tombston:[4,6,11,17,30,45,46,87,138],tombstone_compaction_interv:41,tombstone_threshold:41,tombstonescannedhistogram:46,ton:29,too:[6,11,12,14,21,25,41],tool:[6,12,28,30,36,41,46,49,51],top:[13,21,28,36,46,118,163,164],topcount:[118,164],topic:52,topolog:[6,50,137],toppartit:116,total:[6,13,40,41,46],totalblockedtask:46,totalcommitlogs:46,totalcompactionscomplet:46,totaldiskspaceus:46,totalhint:46,totalhintsinprogress:46,totallat:46,totimestamp:14,touch:[8,30,41],tough:29,tounixtimestamp:14,tour:21,toward:11,tpstat:116,trace:[6,46,103,116,131,152],track:[6,41,46],tracker:28,tradeoff:[0,6],tradit:[41,42],traffic:[6,50],trail:23,transact:[13,20,46,160],transfer:[6,30,49],transform:13,transit:[10,19],translat:6,transpar:[6,30],transport:[6,26,46,69,79,116,157],treat:[0,6,10,30,50],tree:[26,46],tri:41,trigger:[4,6,9,12,15,36,38,42,45,57,116,128],trigger_nam:20,trigger_stat:12,trip:[6,13],trivial:49,troubleshoot:[25,36],truesnapshotss:46,truli:9,truncat:[6,9,10,15,19,102,116,151,166],truncate_stat:12,truncatehint:116,trunk:[24,25,26,28],trust:49,trustor:6,truststor:[6,49],truststore_password:6,truststorepassword:49,tserverfactori:6,ttl:[4,6,9,10,11,14,17,21,45,138],tty:52,tunabl:2,tune:[30,38,41,43],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:21,tuplevalu:[10,14],turn:[0,6,28,30,49],twc:[11,41],twice:[6,21],two:[0,6,11,12,13,14,17,26,36,38,41,43,49,50,52],txt:[4,14,24,25,28],type:[0,6,10,11,12,13,14,15,19,25,34,36,43,45,49,52,102,116,151,160],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,30,38,41,43,46,49,52],ubuntu:26,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:21,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:30,unabl:[6,25,36],unaffect:21,unavail:[6,11,46,49,51],unblock:46,unbound:21,unchecked_tombstone_compact:41,uncom:[6,46,49],uncommon:28,uncompress:[6,42,46],undelet:41,under:[6,21,23,29,46,49],underli:[6,18,41,49],understand:[6,28,30],unencrypt:[6,49],unexpectedli:21,unfinishedcommit:46,unflush:[40,153],unfortun:29,uniqu:[11,14,21],unit:[21,25,27,41,116,140],unixtimestampof:[10,14],unless:[6,11,13,16,18,19,21,23,40,49,50],unlik:[6,10,13,21],unlimit:[6,30,52],unlog:9,unnecessari:[25,51],unnecessarili:40,unpredict:13,unprepar:46,unquot:12,unquoted_identifi:12,unquoted_nam:11,unrel:28,unreleas:28,unrepair:45,unsecur:49,unset:[6,10,13,17],unsign:21,unspecifi:6,unsubscrib:[8,36],untar:34,until:[0,6,21,38,40,41,42,49,50],unus:6,unusu:25,updat:[6,9,10,11,12,14,15,17,18,19,21,25,28,29,34,36,41,42,46,49,52],update_paramet:13,update_stat:[12,13],upgrad:[6,41,116,167],upgradesst:[38,41,42,116],upload:28,upon:[6,21,38,42],upper:[12,17,41,49],ups:43,upstream:28,uptim:[108,116],url:24,usag:[4,6,11,21,36,38,40,42,46,52],use:[6,9,10,11,12,13,14,16,17,18,19,21,23,25,26,28,29,31,34,35,36,38,40,41,43,46,49,50,51,52,57,87,99,116,118,129,138,164,167],use_stat:12,usecas:41,useconcmarksweepgc:26,usecondcardmark:26,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,21,25,26,28,29,30,41,43,46,49,50,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,94,98,100,102,106,107,114,116,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],useecassandra:49,useful:[0,6,11,14,28,41,42,46,51,52,55,57,58,60,65,67,73,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],useparnewgc:26,user1:13,user2:13,user3:13,user4:13,user:[5,6,8,9,10,11,12,13,15,16,17,18,25,28,30,34,38,41,42,43,49,52,60,76,116],user_count:13,user_defined_typ:21,user_funct:19,user_nam:13,user_occup:13,user_opt:19,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],uses:[0,4,6,11,12,13,14,16,19,20,29,30,49],usethreadprior:26,using:[4,6,10,11,12,13,14,18,19,21,26,28,29,33,34,35,36,38,42,43,45,46,49,51,52,60,121,138,153],usr:52,usual:[6,13,21,24,29,38,49,131],utc:[17,52],utd:11,utf8:[21,52],utf8typ:9,utf:52,util:[14,25,41,52],uuid:[9,10,11,12,17,21],val0:11,val1:11,val:14,valid:[6,10,11,12,13,14,17,21,30,41,42,46,49,52,131,138,160],validationexecutor:46,valu:[6,9,10,11,12,13,14,16,17,21,25,26,30,38,41,46,49,50,52,76,103,107,116,139,143,144,145,146,147,149,150,151,152],value1:13,value2:13,value_in_kb_per_sec:[139,146],value_in_m:149,value_in_mb:[143,147,150],valueof:14,varchar:[9,11,14,17,21],vari:[6,42],variabl:[6,10,12,17,21,26,33],variant:12,varieti:40,varint:[9,11,14,17,21],variou:[26,29,43,49],veri:[6,11,13,28,29,30,38,41,42,43],verifi:[28,30,32,34,42,107,116,160],version:[5,6,9,11,14,15,21,26,28,32,34,41,46,51,59,64,74,84,116,167,168],vertic:52,via:[6,8,10,19,25,30,31,41,42,43,46,49,50],view:[6,10,11,12,15,19,36,46,52,93,116,145,170],view_nam:18,viewbuildstatu:116,viewlockacquiretim:46,viewmutationstag:46,viewpendingmut:46,viewreadtim:46,viewreplicasattempt:46,viewreplicassuccess:46,viewwrit:46,viewwritelat:46,virtual:[0,6,30,41,46,51],visibl:[11,19,23,38],vnode:[6,42],volum:[6,40,42],vulner:[6,49],wai:[4,6,12,15,17,18,21,24,26,29,30,41,42,131],wait:[0,6,11,28,30,46,116,133],waitingoncommit:46,waitingonfreememtablespac:46,waitingonsegmentalloc:46,want:[6,11,13,26,28,29,30,49,51],warmup:[116,141],warn:[6,11,23,29,45,131],washington:21,wasn:10,wast:6,watch:29,weaker:0,websit:[29,34],week:21,weight:[6,46,80],welcom:8,well:[6,11,13,14,17,21,25,26,40,42,43,49,50,116,134],went:46,were:[6,9,10,19,25,26,41,46],what:[11,13,21,27,29,31,36,41,43,49,52],whatev:[10,13,30],whedon:13,when:[4,6,9,10,11,12,13,14,15,16,17,19,21,23,25,28,29,31,36,38,40,42,43,45,46,49,50,51,52,55,57,58,60,63,65,67,73,77,83,86,87,90,94,98,100,102,106,107,114,118,121,122,123,129,130,131,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,160,162,163,164,166,167,168,170],where:[0,4,6,9,10,11,12,14,16,17,18,19,21,25,29,31,34,38,41,42,49,51,52,80,131],where_claus:13,wherea:[21,49],whether:[0,6,9,11,13,26,41,50,52,80],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,28,29,30,31,34,35,38,40,41,42,43,46,49,50,51,60,94,98,107,121,131],whichev:[0,6],whitelist:49,whitespac:27,who:[19,28,30],whole:[6,11,13,14,21,41],whose:[11,21,160],why:[25,28,36],wide:[4,40],width:12,wiki:[6,26],wildcard:[13,19],window:[0,6,45,46,49,97,105,116,149],winner:30,wip:[26,28],wipe:[30,51],wire:30,wise:11,wish:[6,41,46],within:[0,4,6,11,12,13,16,28,30,41,43,46,49],withing:6,without:[6,11,12,13,14,19,21,24,26,28,29,30,40,43,46,49,52,55,107,116,123],wmem_max:6,won:[6,13,24],wont:41,word:[10,11,12,18,19,21,30],work:[6,10,11,14,15,17,23,24,26,27,29,30,41,43,46,49,50,51,52],worker:52,workload:[6,25,38,41,43],workspac:26,worktre:26,worri:[28,30],wors:[6,50],worst:[6,28],worthwhil:6,would:[6,12,13,14,17,19,26,28,29,36,41,42,43,49,50],wrap:50,write:[0,4,6,10,11,13,21,23,25,29,30,40,41,42,43,46,49,50,51,52,75,102,116,151,163],write_lat:163,write_request_timeout:30,writelat:46,writer:[6,23],writetim:[9,14],writetimeoutexcept:6,written:[4,6,20,30,38,41,42,46],wrong:6,wrte:46,www:[6,11,34],xlarg:43,xml:31,xmn220m:26,xms1024m:26,xmx1024m:26,xmx:43,xss256k:26,xvf:34,yaml:[6,14,31,34,46,49,50,51,61,76,80,116,134,163,165],year:[13,21],yes:[9,11,49],yet:[11,46],yield:[13,51],you:[5,6,8,10,11,12,13,14,16,17,18,20,21,23,24,26,27,29,30,31,32,33,34,35,36,41,46,49,50,51,52,55,116,153],younger:14,your:[0,5,6,8,10,11,12,23,26,28,29,30,31,34,36,41,43,49,50,52],yourself:[24,29],yyyi:[17,21],z_0:[11,16,18],zero:[6,10,30,46,50],zip:21,zipcod:21,zone:[6,21,50],zzzzz:28},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs and Contributing","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Security","Triggers","Data Types","Data Modeling","Code Style","How-to Commit","Review Checklist","Building and IDE Integration","Cassandra Development","Contributing Code Changes","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra’s documentation!","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","cqlsh: the CQL shell","Cassandra Tools","Nodetool","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","Troubleshooting"],titleterms:{"class":50,"function":[13,14,17],"import":[23,107],"long":29,"new":30,"static":11,"switch":41,Adding:51,IDE:26,IDEs:23,LCS:41,TLS:49,The:[11,13,15,17,41],USE:11,Use:42,Uses:42,Using:26,Will:30,With:49,access:49,add:30,address:30,advanc:42,after:51,aggreg:14,alias:13,all:[19,30],alloc:51,allocate_tokens_for_keyspac:6,allow:13,alter:[11,18,19,21],ani:30,apach:36,appendic:9,appendix:9,architectur:2,ask:30,assassin:55,assign:51,auth:49,authent:[6,19,49],author:[6,49],auto_snapshot:6,automat:19,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:37,batch:[13,30],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,befor:28,benefit:42,binari:34,blob:[14,30],bloom:38,boilerpl:23,bootstrap:[30,41,51,56],branch:28,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:46,bug:[5,28],build:26,bulk:[30,39],cach:[11,46,49],call:30,can:30,captur:[40,52],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,26,27,29,30,31,34,36,40,45,49,53],cast:14,cdc:40,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,chang:[10,28,30,31,38,40,41],characterist:21,checklist:25,choic:43,choos:28,circleci:29,claus:13,cleanup:[51,57],clear:52,clearsnapshot:58,client:[32,35,46,49],client_encryption_opt:6,clientstat:59,clojur:32,cloud:43,cluster:[11,30],cluster_nam:6,code:[23,28],collect:[21,41],column:11,column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[26,41,52],comment:12,commit:24,commit_failure_polici:6,commitlog:[4,46],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:40,common:[11,41,43],compact:[9,11,41,46,60],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:61,compactionstat:62,compactionstrategi:41,compat:52,compress:[11,42],concern:41,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_writ:6,condition:19,configur:[6,7,31,40,42],connect:30,consider:11,consist:[0,52],constant:12,contact:8,contribut:[5,28],control:19,convent:[12,23],convers:14,copi:52,count:14,counter:[13,21],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:43,cql:[9,15,46,52],cqlsh:[35,52],cqlshrc:52,creat:[11,14,16,18,19,20,21,28],credenti:19,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:29,custom:21,cython:52,data:[11,13,17,19,21,22,30,40,41,51],data_file_directori:6,databas:19,date:21,dead:51,debian:34,debug:26,decommiss:63,defin:[14,21],definit:[11,12],defragment:41,delet:[13,30,41],depend:52,describ:[52,65],describeclust:64,detail:41,detect:0,develop:27,dies:30,directori:[31,41],disabl:40,disableauditlog:66,disableautocompact:67,disablebackup:68,disablebinari:69,disablefullquerylog:70,disablegossip:71,disablehandoff:72,disablehintsfordc:73,disableoldprotocolvers:74,disk:[30,43],disk_failure_polici:6,disk_optimization_strategi:6,document:36,doe:30,drain:75,driver:[32,35],drop:[9,11,14,16,18,19,20,21,30],droppedmessag:46,dtest:29,durat:21,dynam:50,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:30,eclips:26,email:30,enabl:[40,49],enable_materialized_view:6,enable_scripted_user_defined_funct:6,enable_user_defined_funct:6,enableauditlog:76,enableautocompact:77,enablebackup:78,enablebinari:79,enablefullquerylog:80,enablegossip:81,enablehandoff:82,enablehintsfordc:83,enableoldprotocolvers:84,encod:17,encrypt:49,endpoint_snitch:6,engin:4,entri:30,environ:31,erlang:32,error:30,even:30,except:23,exist:30,exit:52,expand:52,expir:41,factor:30,fail:[30,51],failur:[0,30],failuredetector:85,file:[6,23,34],file_cache_size_in_mb:6,filedescriptorratio:46,filter:[13,38],fix:28,flush:86,format:23,frequent:30,from:[26,30,34,52],fromjson:17,fulli:41,further:40,garbag:41,garbagecollect:87,garbagecollector:46,gc_grace_second:41,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:88,gener:23,get:33,getbatchlogreplaythrottl:89,getcompactionthreshold:90,getcompactionthroughput:91,getconcurrentcompactor:92,getconcurrentviewbuild:93,getendpoint:94,getinterdcstreamthroughput:95,getlogginglevel:96,getmaxhintwindow:97,getreplica:98,getse:99,getsstabl:100,getstreamthroughput:101,gettimeout:102,gettraceprob:103,give:30,gossip:0,gossipinfo:104,grace:41,grant:19,group:13,guarante:1,handl:23,handoffwindow:105,hang:51,happen:30,hardwar:43,haskel:32,heap:30,help:[52,106],hint:44,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:46,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,host:[30,52],how:[24,30],idea:26,identifi:12,impact:42,incremental_backup:6,index:[16,46],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:108,initial_token:6,insert:[13,17,35],instal:34,integr:[26,49],intellij:26,inter:49,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[19,49],internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:109,invalidatekeycach:110,invalidaterowcach:111,irc:8,java:[30,32],jconsol:30,jmx:[30,41,46,49],join:[30,112],json:17,jvm:46,kei:[11,16,18],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,30,46],keyword:[9,12],lang:30,languag:15,larg:30,level:[0,41],limit:13,line:[26,52],list:[8,19,21,30],listen:30,listen_address:[6,30],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:113,liter:21,live:30,load:[30,39],locat:31,log:[30,31,41],login:52,lot:30,made:30,mail:8,main:31,major:41,manipul:13,manual:51,map:[16,21,30],materi:18,max:[14,30],max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:30,memori:[30,43,46],memorypool:46,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:41,messag:30,method:30,metric:46,min:14,minor:41,mintimeuuid:14,model:22,monitor:[46,51],more:[30,41],move:[51,114],movement:51,multilin:23,nativ:[14,21],native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:32,netstat:115,networktopologystrategi:0,newer:26,node:[30,49,51],nodej:32,nodetool:[30,41,54,116],noteworthi:21,now:14,num_token:6,one:30,onli:30,oper:[30,41,42,45],option:[11,18,41,52],order:[11,13],otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[11,30],outofmemoryerror:30,overview:[3,40],packag:34,page:52,paramet:[13,40,41],partit:11,partition:6,password:49,patch:28,pausehandoff:117,perform:29,permiss:19,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:32,pick:0,point:30,port:30,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:34,primari:[11,18],profileload:118,progress:51,project:26,properti:31,proxyhistogram:119,python:32,pytz:52,queri:[15,35],question:30,rang:[0,51],range_request_timeout_in_m:6,rangekeysampl:120,read:[40,47],read_request_timeout_in_m:6,rebuild:121,rebuild_index:122,refresh:123,refreshsizeestim:124,refus:30,releas:28,reloadlocalschema:125,reloadse:126,reloadssl:127,reloadtrigg:128,relocatesst:129,remot:30,remov:[41,51],removenod:130,repair:[41,47,48,131],repair_admin:132,replac:51,replaybatchlog:133,replic:[0,30],report:[5,30,46],request:46,request_schedul:6,request_scheduler_id:6,request_scheduler_opt:6,request_timeout_in_m:6,reserv:9,resetfullquerylog:134,resetlocalschema:135,result:13,resum:51,resumehandoff:136,revers:11,review:25,revok:19,rhel:30,right:28,ring:[0,30,137],role:[19,49],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpc_max_thread:6,rpc_min_thread:6,rpc_port:6,rpc_recv_buff_size_in_byt:6,rpc_send_buff_size_in_byt:6,rpc_server_typ:6,rubi:32,run:29,runtim:31,rust:32,safeti:6,sai:30,same:30,saved_caches_directori:6,scala:32,scalar:14,scrub:138,secondari:16,secur:[19,49],see:30,seed:30,seed_provid:6,select:[13,17,18],selector:13,serial:52,server_encryption_opt:6,session:52,set:[19,21,26,30],setbatchlogreplaythrottl:139,setcachecapac:140,setcachekeystosav:141,setcompactionthreshold:142,setcompactionthroughput:143,setconcurrentcompactor:144,setconcurrentviewbuild:145,sethintedhandoffthrottlekb:146,setinterdcstreamthroughput:147,setlogginglevel:148,setmaxhintwindow:149,setstreamthroughput:150,settimeout:151,settraceprob:152,setup:26,share:52,shell:52,show:[30,52],signatur:14,simplestrategi:0,singl:[30,41],size:41,slow_query_log_timeout_in_m:6,snapshot:153,snapshot_before_compact:6,snitch:50,sourc:[26,52],special:52,speed:30,ssl:49,ssl_storage_port:6,sstabl:[4,41,46],sstable_preemptive_open_interval_in_mb:6,stai:30,standard:49,start:[26,28,33],start_native_transport:6,start_rpc:6,starv:41,statement:[12,18,23],statu:154,statusautocompact:155,statusbackup:156,statusbinari:157,statusgossip:158,statushandoff:159,stc:41,stop:160,stopdaemon:161,storag:[4,9,46],storage_port:6,store:30,strategi:41,stream:[30,46,51],stream_throughput_outbound_megabits_per_sec:6,streaming_keep_alive_period_in_sec:6,stress:29,style:23,sum:14,support:17,tabl:[11,40,46],tablehistogram:162,tablestat:163,tarbal:34,term:12,test:[26,29],than:30,thei:30,though:30,threadpool:46,threshold:6,thrift_framed_transport_size_in_mb:6,thrift_prepared_statements_cache_size_mb:6,tick:28,tier:41,time:[14,21,41],timestamp:[21,30],timeuuid:14,timewindowcompactionstrategi:41,tock:28,todo:[0,1,3,4,11,22,37,39,44,47,48,54],tojson:17,token:[0,14,51],tombston:41,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[29,53],top:30,toppartit:164,tpstat:165,trace:52,tracetype_query_ttl:6,tracetype_repair_ttl:6,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[20,41],troubleshoot:171,truncat:11,truncate_request_timeout_in_m:6,truncatehint:166,ttl:[13,41],tunabl:0,tupl:21,two:30,type:[9,17,21,41,46],udt:21,unabl:30,unit:[26,29],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:41,unsubscrib:30,updat:[13,30],upgradesst:167,usag:[30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170],use:30,user:[14,19,21],using:[30,41],uuid:14,variabl:31,verifi:168,version:[10,52,169],view:18,viewbuildstatu:170,warn:40,welcom:36,what:[28,30],when:[30,41],where:13,whitespac:23,why:[30,41],window:41,windows_timer_interv:6,without:41,work:[21,28],write_request_timeout_in_m:6,writetim:13,yaml:40,you:28}}) \ No newline at end of file diff --git a/src/doc/3.11.3/tools/cqlsh.html b/src/doc/3.11.3/tools/cqlsh.html deleted file mode 100644 index 47738a2e8..000000000 --- a/src/doc/3.11.3/tools/cqlsh.html +++ /dev/null @@ -1,481 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/index.html b/src/doc/3.11.3/tools/index.html deleted file mode 100644 index ca52864a1..000000000 --- a/src/doc/3.11.3/tools/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool.html b/src/doc/3.11.3/tools/nodetool.html deleted file mode 100644 index 9c85eecf6..000000000 --- a/src/doc/3.11.3/tools/nodetool.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-

Todo

-

Try to autogenerate this from Nodetool’s help.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/assassinate.html b/src/doc/3.11.3/tools/nodetool/assassinate.html deleted file mode 100644 index 07c805138..000000000 --- a/src/doc/3.11.3/tools/nodetool/assassinate.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/bootstrap.html b/src/doc/3.11.3/tools/nodetool/bootstrap.html deleted file mode 100644 index 34ce590eb..000000000 --- a/src/doc/3.11.3/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-pp | --print-port)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-p <port> | --port <port>)] [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-pw <password> | --password <password>)]
-                bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/cleanup.html b/src/doc/3.11.3/tools/nodetool/cleanup.html deleted file mode 100644 index 6d2967268..000000000 --- a/src/doc/3.11.3/tools/nodetool/cleanup.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/clearsnapshot.html b/src/doc/3.11.3/tools/nodetool/clearsnapshot.html deleted file mode 100644 index e370e7445..000000000 --- a/src/doc/3.11.3/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/clientstats.html b/src/doc/3.11.3/tools/nodetool/clientstats.html deleted file mode 100644 index bffd68029..000000000 --- a/src/doc/3.11.3/tools/nodetool/clientstats.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/compact.html b/src/doc/3.11.3/tools/nodetool/compact.html deleted file mode 100644 index cb087eb71..000000000 --- a/src/doc/3.11.3/tools/nodetool/compact.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/compactionhistory.html b/src/doc/3.11.3/tools/nodetool/compactionhistory.html deleted file mode 100644 index dbd03dbb1..000000000 --- a/src/doc/3.11.3/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/compactionstats.html b/src/doc/3.11.3/tools/nodetool/compactionstats.html deleted file mode 100644 index 1e341da64..000000000 --- a/src/doc/3.11.3/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/decommission.html b/src/doc/3.11.3/tools/nodetool/decommission.html deleted file mode 100644 index 3aa389d78..000000000 --- a/src/doc/3.11.3/tools/nodetool/decommission.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/describecluster.html b/src/doc/3.11.3/tools/nodetool/describecluster.html deleted file mode 100644 index 78a743e1c..000000000 --- a/src/doc/3.11.3/tools/nodetool/describecluster.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/describering.html b/src/doc/3.11.3/tools/nodetool/describering.html deleted file mode 100644 index 6927a7322..000000000 --- a/src/doc/3.11.3/tools/nodetool/describering.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disableauditlog.html b/src/doc/3.11.3/tools/nodetool/disableauditlog.html deleted file mode 100644 index 0cb9d3199..000000000 --- a/src/doc/3.11.3/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disableautocompaction.html b/src/doc/3.11.3/tools/nodetool/disableautocompaction.html deleted file mode 100644 index d5799fab5..000000000 --- a/src/doc/3.11.3/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablebackup.html b/src/doc/3.11.3/tools/nodetool/disablebackup.html deleted file mode 100644 index 9e97308bd..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablebinary.html b/src/doc/3.11.3/tools/nodetool/disablebinary.html deleted file mode 100644 index 7be9b5f03..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablefullquerylog.html b/src/doc/3.11.3/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index ae3cbba81..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablegossip.html b/src/doc/3.11.3/tools/nodetool/disablegossip.html deleted file mode 100644 index 5ffdae7e9..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablehandoff.html b/src/doc/3.11.3/tools/nodetool/disablehandoff.html deleted file mode 100644 index 54c5e4fce..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disablehintsfordc.html b/src/doc/3.11.3/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index 582e204ca..000000000 --- a/src/doc/3.11.3/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/disableoldprotocolversions.html b/src/doc/3.11.3/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index ced002c6b..000000000 --- a/src/doc/3.11.3/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/drain.html b/src/doc/3.11.3/tools/nodetool/drain.html deleted file mode 100644 index 008c49874..000000000 --- a/src/doc/3.11.3/tools/nodetool/drain.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enableauditlog.html b/src/doc/3.11.3/tools/nodetool/enableauditlog.html deleted file mode 100644 index 6b78e9e32..000000000 --- a/src/doc/3.11.3/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enableautocompaction.html b/src/doc/3.11.3/tools/nodetool/enableautocompaction.html deleted file mode 100644 index bb18a8be9..000000000 --- a/src/doc/3.11.3/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablebackup.html b/src/doc/3.11.3/tools/nodetool/enablebackup.html deleted file mode 100644 index 1a9b60e64..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablebinary.html b/src/doc/3.11.3/tools/nodetool/enablebinary.html deleted file mode 100644 index 8185f92b5..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablefullquerylog.html b/src/doc/3.11.3/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index 113f64be0..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablegossip.html b/src/doc/3.11.3/tools/nodetool/enablegossip.html deleted file mode 100644 index a11b2378b..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablehandoff.html b/src/doc/3.11.3/tools/nodetool/enablehandoff.html deleted file mode 100644 index 07e96bf53..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enablehintsfordc.html b/src/doc/3.11.3/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index a8d32ae94..000000000 --- a/src/doc/3.11.3/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/enableoldprotocolversions.html b/src/doc/3.11.3/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 3e5e416f9..000000000 --- a/src/doc/3.11.3/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/failuredetector.html b/src/doc/3.11.3/tools/nodetool/failuredetector.html deleted file mode 100644 index 351f36b49..000000000 --- a/src/doc/3.11.3/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/flush.html b/src/doc/3.11.3/tools/nodetool/flush.html deleted file mode 100644 index d8a7c035e..000000000 --- a/src/doc/3.11.3/tools/nodetool/flush.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/garbagecollect.html b/src/doc/3.11.3/tools/nodetool/garbagecollect.html deleted file mode 100644 index 103939f6b..000000000 --- a/src/doc/3.11.3/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/gcstats.html b/src/doc/3.11.3/tools/nodetool/gcstats.html deleted file mode 100644 index 071f9f153..000000000 --- a/src/doc/3.11.3/tools/nodetool/gcstats.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/3.11.3/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index aa7d17cd8..000000000 --- a/src/doc/3.11.3/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getcompactionthreshold.html b/src/doc/3.11.3/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 337399eee..000000000 --- a/src/doc/3.11.3/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getcompactionthroughput.html b/src/doc/3.11.3/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index 25297ff14..000000000 --- a/src/doc/3.11.3/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getconcurrentcompactors.html b/src/doc/3.11.3/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index 5205c367b..000000000 --- a/src/doc/3.11.3/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/3.11.3/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index ab9333c23..000000000 --- a/src/doc/3.11.3/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getendpoints.html b/src/doc/3.11.3/tools/nodetool/getendpoints.html deleted file mode 100644 index 83394c94e..000000000 --- a/src/doc/3.11.3/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/3.11.3/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 4bfca9f6e..000000000 --- a/src/doc/3.11.3/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getlogginglevels.html b/src/doc/3.11.3/tools/nodetool/getlogginglevels.html deleted file mode 100644 index f91322107..000000000 --- a/src/doc/3.11.3/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getmaxhintwindow.html b/src/doc/3.11.3/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 6c07b1d40..000000000 --- a/src/doc/3.11.3/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getreplicas.html b/src/doc/3.11.3/tools/nodetool/getreplicas.html deleted file mode 100644 index 0ef91a6cc..000000000 --- a/src/doc/3.11.3/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getseeds.html b/src/doc/3.11.3/tools/nodetool/getseeds.html deleted file mode 100644 index e9794cdea..000000000 --- a/src/doc/3.11.3/tools/nodetool/getseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getsstables.html b/src/doc/3.11.3/tools/nodetool/getsstables.html deleted file mode 100644 index f60c5b4fe..000000000 --- a/src/doc/3.11.3/tools/nodetool/getsstables.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/getstreamthroughput.html b/src/doc/3.11.3/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 38bdc9fdc..000000000 --- a/src/doc/3.11.3/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/gettimeout.html b/src/doc/3.11.3/tools/nodetool/gettimeout.html deleted file mode 100644 index bce901580..000000000 --- a/src/doc/3.11.3/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/gettraceprobability.html b/src/doc/3.11.3/tools/nodetool/gettraceprobability.html deleted file mode 100644 index b177ed9c8..000000000 --- a/src/doc/3.11.3/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/gossipinfo.html b/src/doc/3.11.3/tools/nodetool/gossipinfo.html deleted file mode 100644 index 2d589d428..000000000 --- a/src/doc/3.11.3/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/handoffwindow.html b/src/doc/3.11.3/tools/nodetool/handoffwindow.html deleted file mode 100644 index a86004e28..000000000 --- a/src/doc/3.11.3/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/help.html b/src/doc/3.11.3/tools/nodetool/help.html deleted file mode 100644 index d6c7f3693..000000000 --- a/src/doc/3.11.3/tools/nodetool/help.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/import.html b/src/doc/3.11.3/tools/nodetool/import.html deleted file mode 100644 index e248f33b9..000000000 --- a/src/doc/3.11.3/tools/nodetool/import.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/info.html b/src/doc/3.11.3/tools/nodetool/info.html deleted file mode 100644 index 0d6358685..000000000 --- a/src/doc/3.11.3/tools/nodetool/info.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/invalidatecountercache.html b/src/doc/3.11.3/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index dfe060cca..000000000 --- a/src/doc/3.11.3/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/invalidatekeycache.html b/src/doc/3.11.3/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index b496043eb..000000000 --- a/src/doc/3.11.3/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/invalidaterowcache.html b/src/doc/3.11.3/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index a384e9ce9..000000000 --- a/src/doc/3.11.3/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/join.html b/src/doc/3.11.3/tools/nodetool/join.html deleted file mode 100644 index 016c1556d..000000000 --- a/src/doc/3.11.3/tools/nodetool/join.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/listsnapshots.html b/src/doc/3.11.3/tools/nodetool/listsnapshots.html deleted file mode 100644 index 871e7ad77..000000000 --- a/src/doc/3.11.3/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/move.html b/src/doc/3.11.3/tools/nodetool/move.html deleted file mode 100644 index d330513dd..000000000 --- a/src/doc/3.11.3/tools/nodetool/move.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/netstats.html b/src/doc/3.11.3/tools/nodetool/netstats.html deleted file mode 100644 index 881dd0484..000000000 --- a/src/doc/3.11.3/tools/nodetool/netstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/nodetool.html b/src/doc/3.11.3/tools/nodetool/nodetool.html deleted file mode 100644 index 755a096d4..000000000 --- a/src/doc/3.11.3/tools/nodetool/nodetool.html +++ /dev/null @@ -1,220 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Nodetool" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-pp | –print-port)]
-
[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-p <port> | –port <port>)] [(-u <username> | –username <username>)] -[(-h <host> | –host <host>)] [(-pw <password> | –password <password>)] -<command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, ...)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, ...)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/pausehandoff.html b/src/doc/3.11.3/tools/nodetool/pausehandoff.html deleted file mode 100644 index 6797c4897..000000000 --- a/src/doc/3.11.3/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/profileload.html b/src/doc/3.11.3/tools/nodetool/profileload.html deleted file mode 100644 index 1e82375a4..000000000 --- a/src/doc/3.11.3/tools/nodetool/profileload.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/proxyhistograms.html b/src/doc/3.11.3/tools/nodetool/proxyhistograms.html deleted file mode 100644 index dd0b1689f..000000000 --- a/src/doc/3.11.3/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/rangekeysample.html b/src/doc/3.11.3/tools/nodetool/rangekeysample.html deleted file mode 100644 index 92b3d21c4..000000000 --- a/src/doc/3.11.3/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/rebuild.html b/src/doc/3.11.3/tools/nodetool/rebuild.html deleted file mode 100644 index a657b1285..000000000 --- a/src/doc/3.11.3/tools/nodetool/rebuild.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/rebuild_index.html b/src/doc/3.11.3/tools/nodetool/rebuild_index.html deleted file mode 100644 index 5e5b79f1f..000000000 --- a/src/doc/3.11.3/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/refresh.html b/src/doc/3.11.3/tools/nodetool/refresh.html deleted file mode 100644 index d0b6adeff..000000000 --- a/src/doc/3.11.3/tools/nodetool/refresh.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/refreshsizeestimates.html b/src/doc/3.11.3/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 0fd68f997..000000000 --- a/src/doc/3.11.3/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/reloadlocalschema.html b/src/doc/3.11.3/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 166951d5a..000000000 --- a/src/doc/3.11.3/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/reloadseeds.html b/src/doc/3.11.3/tools/nodetool/reloadseeds.html deleted file mode 100644 index 4f0cfb115..000000000 --- a/src/doc/3.11.3/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/reloadssl.html b/src/doc/3.11.3/tools/nodetool/reloadssl.html deleted file mode 100644 index bea3d159b..000000000 --- a/src/doc/3.11.3/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/reloadtriggers.html b/src/doc/3.11.3/tools/nodetool/reloadtriggers.html deleted file mode 100644 index c44ff3aa9..000000000 --- a/src/doc/3.11.3/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/relocatesstables.html b/src/doc/3.11.3/tools/nodetool/relocatesstables.html deleted file mode 100644 index 41029ed3b..000000000 --- a/src/doc/3.11.3/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/removenode.html b/src/doc/3.11.3/tools/nodetool/removenode.html deleted file mode 100644 index e71ba5dd7..000000000 --- a/src/doc/3.11.3/tools/nodetool/removenode.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/repair.html b/src/doc/3.11.3/tools/nodetool/repair.html deleted file mode 100644 index e9c3632e6..000000000 --- a/src/doc/3.11.3/tools/nodetool/repair.html +++ /dev/null @@ -1,196 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/repair_admin.html b/src/doc/3.11.3/tools/nodetool/repair_admin.html deleted file mode 100644 index 35fca380f..000000000 --- a/src/doc/3.11.3/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/replaybatchlog.html b/src/doc/3.11.3/tools/nodetool/replaybatchlog.html deleted file mode 100644 index 552705e2c..000000000 --- a/src/doc/3.11.3/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/resetfullquerylog.html b/src/doc/3.11.3/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index bc3031928..000000000 --- a/src/doc/3.11.3/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/resetlocalschema.html b/src/doc/3.11.3/tools/nodetool/resetlocalschema.html deleted file mode 100644 index 25035a9a4..000000000 --- a/src/doc/3.11.3/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/resumehandoff.html b/src/doc/3.11.3/tools/nodetool/resumehandoff.html deleted file mode 100644 index 1cf218ba8..000000000 --- a/src/doc/3.11.3/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/ring.html b/src/doc/3.11.3/tools/nodetool/ring.html deleted file mode 100644 index d8d599acc..000000000 --- a/src/doc/3.11.3/tools/nodetool/ring.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/scrub.html b/src/doc/3.11.3/tools/nodetool/scrub.html deleted file mode 100644 index 6a4a948f8..000000000 --- a/src/doc/3.11.3/tools/nodetool/scrub.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/3.11.3/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 35ab43b71..000000000 --- a/src/doc/3.11.3/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setcachecapacity.html b/src/doc/3.11.3/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 31668f931..000000000 --- a/src/doc/3.11.3/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setcachekeystosave.html b/src/doc/3.11.3/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index 63ed39b75..000000000 --- a/src/doc/3.11.3/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setcompactionthreshold.html b/src/doc/3.11.3/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index 8be56313a..000000000 --- a/src/doc/3.11.3/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setcompactionthroughput.html b/src/doc/3.11.3/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 0300df2ee..000000000 --- a/src/doc/3.11.3/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setconcurrentcompactors.html b/src/doc/3.11.3/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index a6a99c1b8..000000000 --- a/src/doc/3.11.3/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/3.11.3/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 8a9f6edc7..000000000 --- a/src/doc/3.11.3/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/3.11.3/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index 89107e7c5..000000000 --- a/src/doc/3.11.3/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/3.11.3/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index 564b5388e..000000000 --- a/src/doc/3.11.3/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setlogginglevel.html b/src/doc/3.11.3/tools/nodetool/setlogginglevel.html deleted file mode 100644 index 9344c0728..000000000 --- a/src/doc/3.11.3/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setmaxhintwindow.html b/src/doc/3.11.3/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 05930c8aa..000000000 --- a/src/doc/3.11.3/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/setstreamthroughput.html b/src/doc/3.11.3/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index e837bf9ce..000000000 --- a/src/doc/3.11.3/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/settimeout.html b/src/doc/3.11.3/tools/nodetool/settimeout.html deleted file mode 100644 index 795a1806a..000000000 --- a/src/doc/3.11.3/tools/nodetool/settimeout.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/settraceprobability.html b/src/doc/3.11.3/tools/nodetool/settraceprobability.html deleted file mode 100644 index 37eaccefe..000000000 --- a/src/doc/3.11.3/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/snapshot.html b/src/doc/3.11.3/tools/nodetool/snapshot.html deleted file mode 100644 index b0a523045..000000000 --- a/src/doc/3.11.3/tools/nodetool/snapshot.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/status.html b/src/doc/3.11.3/tools/nodetool/status.html deleted file mode 100644 index ece158da3..000000000 --- a/src/doc/3.11.3/tools/nodetool/status.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/statusautocompaction.html b/src/doc/3.11.3/tools/nodetool/statusautocompaction.html deleted file mode 100644 index f12fc7426..000000000 --- a/src/doc/3.11.3/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/statusbackup.html b/src/doc/3.11.3/tools/nodetool/statusbackup.html deleted file mode 100644 index 063aff055..000000000 --- a/src/doc/3.11.3/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/statusbinary.html b/src/doc/3.11.3/tools/nodetool/statusbinary.html deleted file mode 100644 index 31dfaba47..000000000 --- a/src/doc/3.11.3/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/statusgossip.html b/src/doc/3.11.3/tools/nodetool/statusgossip.html deleted file mode 100644 index 5bdf0702d..000000000 --- a/src/doc/3.11.3/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/statushandoff.html b/src/doc/3.11.3/tools/nodetool/statushandoff.html deleted file mode 100644 index dfa15991f..000000000 --- a/src/doc/3.11.3/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/stop.html b/src/doc/3.11.3/tools/nodetool/stop.html deleted file mode 100644 index 53e950952..000000000 --- a/src/doc/3.11.3/tools/nodetool/stop.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB, VERIFY,
-            INDEX_BUILD
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/stopdaemon.html b/src/doc/3.11.3/tools/nodetool/stopdaemon.html deleted file mode 100644 index a6ed497c5..000000000 --- a/src/doc/3.11.3/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/tablehistograms.html b/src/doc/3.11.3/tools/nodetool/tablehistograms.html deleted file mode 100644 index f9bfb4efd..000000000 --- a/src/doc/3.11.3/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/tablestats.html b/src/doc/3.11.3/tools/nodetool/tablestats.html deleted file mode 100644 index a7cca3468..000000000 --- a/src/doc/3.11.3/tools/nodetool/tablestats.html +++ /dev/null @@ -1,167 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/toppartitions.html b/src/doc/3.11.3/tools/nodetool/toppartitions.html deleted file mode 100644 index 581dc9e46..000000000 --- a/src/doc/3.11.3/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/tpstats.html b/src/doc/3.11.3/tools/nodetool/tpstats.html deleted file mode 100644 index a7b210f05..000000000 --- a/src/doc/3.11.3/tools/nodetool/tpstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/truncatehints.html b/src/doc/3.11.3/tools/nodetool/truncatehints.html deleted file mode 100644 index 640356fd2..000000000 --- a/src/doc/3.11.3/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/upgradesstables.html b/src/doc/3.11.3/tools/nodetool/upgradesstables.html deleted file mode 100644 index 7f6f21324..000000000 --- a/src/doc/3.11.3/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/verify.html b/src/doc/3.11.3/tools/nodetool/verify.html deleted file mode 100644 index 44d5a33d9..000000000 --- a/src/doc/3.11.3/tools/nodetool/verify.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/version.html b/src/doc/3.11.3/tools/nodetool/version.html deleted file mode 100644 index b9a71f063..000000000 --- a/src/doc/3.11.3/tools/nodetool/version.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/tools/nodetool/viewbuildstatus.html b/src/doc/3.11.3/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 3362ca7d4..000000000 --- a/src/doc/3.11.3/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.3/troubleshooting/index.html b/src/doc/3.11.3/troubleshooting/index.html deleted file mode 100644 index 6cedfe67d..000000000 --- a/src/doc/3.11.3/troubleshooting/index.html +++ /dev/null @@ -1,100 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/.buildinfo b/src/doc/3.11.5/.buildinfo deleted file mode 100644 index 39b8e8a60..000000000 --- a/src/doc/3.11.5/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: d3263039dc2ab455a849453b031ecbbe -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/3.11.5/_images/eclipse_debug0.png b/src/doc/3.11.5/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug1.png b/src/doc/3.11.5/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug2.png b/src/doc/3.11.5/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug3.png b/src/doc/3.11.5/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug4.png b/src/doc/3.11.5/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug5.png b/src/doc/3.11.5/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/3.11.5/_images/eclipse_debug6.png b/src/doc/3.11.5/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/3.11.5/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/3.11.5/_sources/architecture/dynamo.rst.txt b/src/doc/3.11.5/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index a7dbb8750..000000000 --- a/src/doc/3.11.5/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,139 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and availability through *Consistency Levels*. -Essentially, an operation's consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this: - -- Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded - within a specified time window. -- Based on ``read_repair_chance`` and ``dclocal_read_repair_chance`` (part of a table's schema), read requests may be - randomly sent to all replicas in order to repair potentially inconsistent data. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels that are high enough to overlap, resulting in "strong" -consistency. This is typically expressed as ``W + R > RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/3.11.5/_sources/architecture/guarantees.rst.txt b/src/doc/3.11.5/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/3.11.5/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/3.11.5/_sources/architecture/index.rst.txt b/src/doc/3.11.5/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/3.11.5/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/3.11.5/_sources/architecture/overview.rst.txt b/src/doc/3.11.5/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/3.11.5/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/3.11.5/_sources/architecture/storage_engine.rst.txt b/src/doc/3.11.5/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index e4114e5af..000000000 --- a/src/doc/3.11.5/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -.. todo:: todo - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. diff --git a/src/doc/3.11.5/_sources/bugs.rst.txt b/src/doc/3.11.5/_sources/bugs.rst.txt deleted file mode 100644 index 240cfd495..000000000 --- a/src/doc/3.11.5/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs and Contributing -=============================== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``#cassandra`` :ref:`IRC channel `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/3.11.5/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/3.11.5/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index f205f7d30..000000000 --- a/src/doc/3.11.5/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,1911 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -*Default Value:* KEYSPACE - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using. - -Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down gossip and Thrift and kill the JVM, so the node can be replaced. - -stop - shut down gossip and Thrift, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``thrift_prepared_statements_cache_size_mb`` --------------------------------------------- - -Maximum size of the Thrift prepared statement cache - -If you do not use Thrift at all, it is safe to leave this value at "auto". - -See description of 'prepared_statements_cache_size_mb' above for more information. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync`` ------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic" or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.) - - -*Default Value:* batch - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -*Default Value:* 2 - -``commitlog_sync`` ------------------- - -the other option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_max_tree_depth`` ---------------------------------- -*This option is commented out by default.* - -Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs. - -The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -*Default Value:* 18 - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``start_rpc`` -------------- - -Whether to start the thrift rpc server. - -*Default Value:* false - -``rpc_address`` ---------------- - -The address or interface to bind the Thrift RPC service and native transport -server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``rpc_port`` ------------- - -port for Thrift to listen for clients on - -*Default Value:* 9160 - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``rpc_server_type`` -------------------- - -Cassandra provides two out-of-the-box options for the RPC Server: - -sync - One thread per thrift connection. For a very large number of clients, memory - will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size - per thread, and that will correspond to your use of virtual memory (but physical memory - may be limited depending on use of stack space). - -hsha - Stands for "half synchronous, half asynchronous." All thrift clients are handled - asynchronously using a small number of threads that does not vary with the amount - of thrift clients (and thus scales well to many clients). The rpc requests are still - synchronous (one thread per active request). If hsha is selected then it is essential - that rpc_max_threads is changed from the default value of unlimited. - -The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory. - -Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it. - -*Default Value:* sync - -``rpc_min_threads`` -------------------- -*This option is commented out by default.* - -Uncomment rpc_min|max_thread to set request pool size limits. - -Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all). - -The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently. - - -*Default Value:* 16 - -``rpc_max_threads`` -------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``rpc_send_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -uncomment to set socket buffer sizes on rpc connections - -``rpc_recv_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``thrift_framed_transport_size_in_mb`` --------------------------------------- - -Frame size for thrift (maximum message length). - -*Default Value:* 15 - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations - -*Default Value:* 10000 - -``slow_query_log_timeout_in_ms`` --------------------------------- - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero and read_repair_chance is < 1.0, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``request_scheduler`` ---------------------- - -request_scheduler -- Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below. - -*Default Value:* org.apache.cassandra.scheduler.NoScheduler - -``request_scheduler_options`` ------------------------------ -*This option is commented out by default.* - -Scheduler Options vary based on the type of scheduler - -NoScheduler - Has no options - -RoundRobin - throttle_limit - The throttle_limit is the number of in-flight - requests per client. Requests beyond - that limit are queued up until - running requests can complete. - The value of 80 here is twice the number of - concurrent_reads + concurrent_writes. - default_weight - default_weight is optional and allows for - overriding the default which is 1. - weights - Weights are optional and will default to 1 or the - overridden default_weight. The weight translates into how - many requests are handled during each turn of the - RoundRobin, based on the scheduler id. - - -*Default Value (complex option)*:: - - # throttle_limit: 80 - # default_weight: 5 - # weights: - # Keyspace1: 1 - # Keyspace2: 5 - -``request_scheduler_id`` ------------------------- -*This option is commented out by default.* -request_scheduler_id -- An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace. - -*Default Value:* keyspace - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack - -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client/server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_warn_threshold_in_ms`` ---------------------------- - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``enable_materialized_views`` ------------------------------ - - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* true - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* true diff --git a/src/doc/3.11.5/_sources/configuration/index.rst.txt b/src/doc/3.11.5/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/3.11.5/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/3.11.5/_sources/contactus.rst.txt b/src/doc/3.11.5/_sources/contactus.rst.txt deleted file mode 100644 index 8d0f5dd04..000000000 --- a/src/doc/3.11.5/_sources/contactus.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _irc-channels: - -IRC ---- - -To chat with developers or users in real-time, join our channels on `IRC freenode `__. The -following channels are available: - -- ``#cassandra`` - for user questions and general discussions. -- ``#cassandra-dev`` - strictly for questions or discussions related to Cassandra development. -- ``#cassandra-builds`` - results of automated test builds. - diff --git a/src/doc/3.11.5/_sources/cql/appendices.rst.txt b/src/doc/3.11.5/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/3.11.5/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/3.11.5/_sources/cql/changes.rst.txt b/src/doc/3.11.5/_sources/cql/changes.rst.txt deleted file mode 100644 index 1eee5369a..000000000 --- a/src/doc/3.11.5/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,204 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to -inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/3.11.5/_sources/cql/ddl.rst.txt b/src/doc/3.11.5/_sources/cql/ddl.rst.txt deleted file mode 100644 index 302777544..000000000 --- a/src/doc/3.11.5/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,649 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE Excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -- ``'SimpleStrategy'``: A simple strategy that defines a replication factor for the whole cluster. The only sub-options - supported is ``'replication_factor'`` to define that replication factor and is mandatory. -- ``'NetworkTopologyStrategy'``: A replication strategy that allows to set the replication factor independently for - each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is - the associated replication factor. - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records' - AND read_repair_chance = 1.0; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, c, d) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see `www.datastax.com/dev/blog/thrift-to-cql3 -`__ for more details) and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair_chance`` | *simple* | 0.1 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) for | -| | | | the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``dclocal_read_repair_chance`` | *simple* | 0 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) | -| | | | belonging to the same data center than the read | -| | | | coordinator for the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table' - AND read_repair_chance = 0.2; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/3.11.5/_sources/cql/definitions.rst.txt b/src/doc/3.11.5/_sources/cql/definitions.rst.txt deleted file mode 100644 index d4a5b59b9..000000000 --- a/src/doc/3.11.5/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,232 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/3.11.5/_sources/cql/dml.rst.txt b/src/doc/3.11.5/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/3.11.5/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/3.11.5/_sources/cql/functions.rst.txt b/src/doc/3.11.5/_sources/cql/functions.rst.txt deleted file mode 100644 index 47026cd94..000000000 --- a/src/doc/3.11.5/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,558 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Time conversion functions -````````````````````````` - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/3.11.5/_sources/cql/index.rst.txt b/src/doc/3.11.5/_sources/cql/index.rst.txt deleted file mode 100644 index 00d90e41e..000000000 --- a/src/doc/3.11.5/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do **not** refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL). - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/3.11.5/_sources/cql/indexes.rst.txt b/src/doc/3.11.5/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/3.11.5/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/3.11.5/_sources/cql/json.rst.txt b/src/doc/3.11.5/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/3.11.5/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/3.11.5/_sources/cql/mvs.rst.txt b/src/doc/3.11.5/_sources/cql/mvs.rst.txt deleted file mode 100644 index aabea10d8..000000000 --- a/src/doc/3.11.5/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,166 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. diff --git a/src/doc/3.11.5/_sources/cql/security.rst.txt b/src/doc/3.11.5/_sources/cql/security.rst.txt deleted file mode 100644 index 099fcc48e..000000000 --- a/src/doc/3.11.5/_sources/cql/security.rst.txt +++ /dev/null @@ -1,502 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/3.11.5/_sources/cql/triggers.rst.txt b/src/doc/3.11.5/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/3.11.5/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/3.11.5/_sources/cql/types.rst.txt b/src/doc/3.11.5/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/3.11.5/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/3.11.5/_sources/data_modeling/index.rst.txt b/src/doc/3.11.5/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/3.11.5/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/3.11.5/_sources/development/code_style.rst.txt b/src/doc/3.11.5/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/3.11.5/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/3.11.5/_sources/development/how_to_commit.rst.txt b/src/doc/3.11.5/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index d956c72d8..000000000 --- a/src/doc/3.11.5/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``—atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/3.11.5/_sources/development/how_to_review.rst.txt b/src/doc/3.11.5/_sources/development/how_to_review.rst.txt deleted file mode 100644 index dc9774362..000000000 --- a/src/doc/3.11.5/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,71 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/3.11.5/_sources/development/ide.rst.txt b/src/doc/3.11.5/_sources/development/ide.rst.txt deleted file mode 100644 index 298649576..000000000 --- a/src/doc/3.11.5/_sources/development/ide.rst.txt +++ /dev/null @@ -1,161 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -.. note:: - - `Bleeding edge development snapshots `_ of Cassandra are available from Jenkins continuous integration. - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/3.11.5/_sources/development/index.rst.txt b/src/doc/3.11.5/_sources/development/index.rst.txt deleted file mode 100644 index aefc5999c..000000000 --- a/src/doc/3.11.5/_sources/development/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Development -********************* - -.. toctree:: - :maxdepth: 2 - - ide - testing - patches - code_style - how_to_review - how_to_commit diff --git a/src/doc/3.11.5/_sources/development/patches.rst.txt b/src/doc/3.11.5/_sources/development/patches.rst.txt deleted file mode 100644 index e3d968fab..000000000 --- a/src/doc/3.11.5/_sources/development/patches.rst.txt +++ /dev/null @@ -1,125 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue tagged with the `low hanging fruit label `_ in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our `community page `_. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -3.x Tick-tock (see below) -3.0 Bug fixes only -2.2 Bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -Tick-Tock Releases -"""""""""""""""""" - -New releases created as part of the `tick-tock release process `_ will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk. - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: ``patch by X; reviewed by Y for CASSANDRA-ZZZZZ`` - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/3.11.5/_sources/development/testing.rst.txt b/src/doc/3.11.5/_sources/development/testing.rst.txt deleted file mode 100644 index b8eea6b28..000000000 --- a/src/doc/3.11.5/_sources/development/testing.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -TODO: `CASSANDRA-12365 `_ - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/3.11.5/_sources/faq/index.rst.txt b/src/doc/3.11.5/_sources/faq/index.rst.txt deleted file mode 100644 index d985e3716..000000000 --- a/src/doc/3.11.5/_sources/faq/index.rst.txt +++ /dev/null @@ -1,298 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/3.11.5/_sources/getting_started/configuring.rst.txt b/src/doc/3.11.5/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index 27fac7872..000000000 --- a/src/doc/3.11.5/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the steps above are enough, you don't really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/3.11.5/_sources/getting_started/drivers.rst.txt b/src/doc/3.11.5/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index baec82378..000000000 --- a/src/doc/3.11.5/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ diff --git a/src/doc/3.11.5/_sources/getting_started/index.rst.txt b/src/doc/3.11.5/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/3.11.5/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/3.11.5/_sources/getting_started/installing.rst.txt b/src/doc/3.11.5/_sources/getting_started/installing.rst.txt deleted file mode 100644 index 1a7b8ad3b..000000000 --- a/src/doc/3.11.5/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/3.11.5/_sources/getting_started/querying.rst.txt b/src/doc/3.11.5/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/3.11.5/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/3.11.5/_sources/index.rst.txt b/src/doc/3.11.5/_sources/index.rst.txt deleted file mode 100644 index 562603d19..000000000 --- a/src/doc/3.11.5/_sources/index.rst.txt +++ /dev/null @@ -1,41 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - - bugs - contactus diff --git a/src/doc/3.11.5/_sources/operating/backups.rst.txt b/src/doc/3.11.5/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/3.11.5/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/3.11.5/_sources/operating/bloom_filters.rst.txt b/src/doc/3.11.5/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/3.11.5/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/3.11.5/_sources/operating/bulk_loading.rst.txt b/src/doc/3.11.5/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/3.11.5/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/3.11.5/_sources/operating/cdc.rst.txt b/src/doc/3.11.5/_sources/operating/cdc.rst.txt deleted file mode 100644 index 192f62a09..000000000 --- a/src/doc/3.11.5/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property ``cdc=true`` (either when :ref:`creating the table -` or :ref:`altering it `), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in ``cassandra.yaml`` on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disable CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -This implementation included a refactor of CommitLogReplayer into `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -The initial implementation of Change Data Capture does not include a parser (see :ref:`reading-commitlogsegments` above) -so, if CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `Design doc `__ -- `JIRA ticket `__ diff --git a/src/doc/3.11.5/_sources/operating/compaction.rst.txt b/src/doc/3.11.5/_sources/operating/compaction.rst.txt deleted file mode 100644 index 0f3900042..000000000 --- a/src/doc/3.11.5/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,442 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table's ``read_repair_chance`` and ``dclocal_read_repair_chance`` to 0. - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/3.11.5/_sources/operating/compression.rst.txt b/src/doc/3.11.5/_sources/operating/compression.rst.txt deleted file mode 100644 index 01da34b6d..000000000 --- a/src/doc/3.11.5/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides three classes (``LZ4Compressor``, - ``SnappyCompressor``, and ``DeflateCompressor`` ). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/3.11.5/_sources/operating/hardware.rst.txt b/src/doc/3.11.5/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/3.11.5/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/3.11.5/_sources/operating/hints.rst.txt b/src/doc/3.11.5/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/3.11.5/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/3.11.5/_sources/operating/index.rst.txt b/src/doc/3.11.5/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/3.11.5/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/3.11.5/_sources/operating/metrics.rst.txt b/src/doc/3.11.5/_sources/operating/metrics.rst.txt deleted file mode 100644 index 04abb48e9..000000000 --- a/src/doc/3.11.5/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,706 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -These metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools scope= type= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessages..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMetrics scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -connectedNativeClients Counter Number of clients connected to this nodes native protocol server -connectedThriftClients Counter Number of clients connected to this nodes thrift protocol server -=========================== ============== =========== - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/3.11.5/_sources/operating/read_repair.rst.txt b/src/doc/3.11.5/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/3.11.5/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/3.11.5/_sources/operating/repair.rst.txt b/src/doc/3.11.5/_sources/operating/repair.rst.txt deleted file mode 100644 index 97d8ce8ba..000000000 --- a/src/doc/3.11.5/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Repair ------- - -.. todo:: todo diff --git a/src/doc/3.11.5/_sources/operating/security.rst.txt b/src/doc/3.11.5/_sources/operating/security.rst.txt deleted file mode 100644 index dfcd9e6c5..000000000 --- a/src/doc/3.11.5/_sources/operating/security.rst.txt +++ /dev/null @@ -1,410 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- - -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/3.11.5/_sources/operating/snitch.rst.txt b/src/doc/3.11.5/_sources/operating/snitch.rst.txt deleted file mode 100644 index faea0b3e1..000000000 --- a/src/doc/3.11.5/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero and read_repair_chance is < 1.0, this will allow - 'pinning' of replicas to hosts in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/3.11.5/_sources/operating/topo_changes.rst.txt b/src/doc/3.11.5/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index c42708e02..000000000 --- a/src/doc/3.11.5/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,124 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase. - -Once the bootstrapping is complete the node will be marked "UP", we rely on the hinted handoff's for making this node -consistent (since we don't accept writes since the start of the bootstrap). - -.. Note:: If the replacement process takes longer than ``max_hint_window_in_ms`` you **MUST** run repair to make the - replaced node consistent again, since it missed ongoing writes during bootstrapping. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/3.11.5/_sources/tools/cqlsh.rst.txt b/src/doc/3.11.5/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/3.11.5/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/3.11.5/_sources/tools/index.rst.txt b/src/doc/3.11.5/_sources/tools/index.rst.txt deleted file mode 100644 index 5a5e4d5ae..000000000 --- a/src/doc/3.11.5/_sources/tools/index.rst.txt +++ /dev/null @@ -1,26 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cqlsh - nodetool diff --git a/src/doc/3.11.5/_sources/tools/nodetool.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool.rst.txt deleted file mode 100644 index e37303110..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _nodetool: - -Nodetool --------- - -.. todo:: Try to autogenerate this from Nodetool’s help. diff --git a/src/doc/3.11.5/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/compact.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/decommission.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/describering.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/drain.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/flush.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/help.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/import.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/info.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/join.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/move.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/netstats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c20d0ac21..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,256 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-u | --username )] - [(-h | --host )] [(-p | --port )] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-pp | --print-port)] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`sjk` - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk --help' for more information. - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/profileload.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/refresh.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/removenode.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/repair.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/ring.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/scrub.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/sjk.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/status.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/stop.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/verify.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/version.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/3.11.5/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/3.11.5/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/3.11.5/_sources/troubleshooting/index.rst.txt b/src/doc/3.11.5/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 2e5cf106d..000000000 --- a/src/doc/3.11.5/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -.. TODO: todo diff --git a/src/doc/3.11.5/_static/ajax-loader.gif b/src/doc/3.11.5/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/3.11.5/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/3.11.5/_static/basic.css b/src/doc/3.11.5/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/3.11.5/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/3.11.5/_static/comment-bright.png b/src/doc/3.11.5/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/3.11.5/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/comment-close.png b/src/doc/3.11.5/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/3.11.5/_static/comment-close.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/comment.png b/src/doc/3.11.5/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/3.11.5/_static/comment.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/doctools.js b/src/doc/3.11.5/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/3.11.5/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/3.11.5/_static/documentation_options.js b/src/doc/3.11.5/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/3.11.5/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/3.11.5/_static/down-pressed.png b/src/doc/3.11.5/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/3.11.5/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/down.png b/src/doc/3.11.5/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/3.11.5/_static/down.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/extra.css b/src/doc/3.11.5/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/3.11.5/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/3.11.5/_static/file.png b/src/doc/3.11.5/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/3.11.5/_static/file.png and /dev/null differ diff --git a/src/doc/3.11.5/_static/jquery-3.2.1.js b/src/doc/3.11.5/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/3.11.5/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this:

-
    -
  • Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded -within a specified time window.
  • -
  • Based on read_repair_chance and dclocal_read_repair_chance (part of a table’s schema), read requests may be -randomly sent to all replicas in order to repair potentially inconsistent data.
  • -
-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/architecture/guarantees.html b/src/doc/3.11.5/architecture/guarantees.html deleted file mode 100644 index d51f64406..000000000 --- a/src/doc/3.11.5/architecture/guarantees.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-
-

Todo

-

todo

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/architecture/index.html b/src/doc/3.11.5/architecture/index.html deleted file mode 100644 index 919b14f61..000000000 --- a/src/doc/3.11.5/architecture/index.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Architecture

-

This section describes the general architecture of Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/architecture/overview.html b/src/doc/3.11.5/architecture/overview.html deleted file mode 100644 index 3eedbf92c..000000000 --- a/src/doc/3.11.5/architecture/overview.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/3.11.5/architecture/storage_engine.html b/src/doc/3.11.5/architecture/storage_engine.html deleted file mode 100644 index 26dca95cc..000000000 --- a/src/doc/3.11.5/architecture/storage_engine.html +++ /dev/null @@ -1,164 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-
-

Todo

-

todo

-
-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/bugs.html b/src/doc/3.11.5/bugs.html deleted file mode 100644 index 84733defb..000000000 --- a/src/doc/3.11.5/bugs.html +++ /dev/null @@ -1,108 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs and Contributing" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs and Contributing

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the #cassandra IRC channel.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Cassandra Development section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/configuration/cassandra_config_file.html b/src/doc/3.11.5/configuration/cassandra_config_file.html deleted file mode 100644 index 959abcf4d..000000000 --- a/src/doc/3.11.5/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1826 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Default Value: KEYSPACE

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using.

-

Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-
stop
-
shut down gossip and Thrift, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

thrift_prepared_statements_cache_size_mb

-

Maximum size of the Thrift prepared statement cache

-

If you do not use Thrift at all, it is safe to leave this value at “auto”.

-

See description of ‘prepared_statements_cache_size_mb’ above for more information.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync

-

This option is commented out by default.

-

commitlog_sync may be either “periodic” or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.)

-

Default Value: batch

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

Default Value: 2

-
-
-

commitlog_sync

-

the other option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_max_tree_depth

-

This option is commented out by default.

-

Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs.

-

The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-

Default Value: 18

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

start_rpc

-

Whether to start the thrift rpc server.

-

Default Value: false

-
-
-

rpc_address

-

The address or interface to bind the Thrift RPC service and native transport -server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

rpc_port

-

port for Thrift to listen for clients on

-

Default Value: 9160

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

rpc_server_type

-

Cassandra provides two out-of-the-box options for the RPC Server:

-
-
sync
-
One thread per thrift connection. For a very large number of clients, memory -will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size -per thread, and that will correspond to your use of virtual memory (but physical memory -may be limited depending on use of stack space).
-
hsha
-
Stands for “half synchronous, half asynchronous.” All thrift clients are handled -asynchronously using a small number of threads that does not vary with the amount -of thrift clients (and thus scales well to many clients). The rpc requests are still -synchronous (one thread per active request). If hsha is selected then it is essential -that rpc_max_threads is changed from the default value of unlimited.
-
-

The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory.

-

Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it.

-

Default Value: sync

-
-
-

rpc_min_threads

-

This option is commented out by default.

-

Uncomment rpc_min|max_thread to set request pool size limits.

-

Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all).

-

The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently.

-

Default Value: 16

-
-
-

rpc_max_threads

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

rpc_send_buff_size_in_bytes

-

This option is commented out by default.

-

uncomment to set socket buffer sizes on rpc connections

-
-
-

rpc_recv_buff_size_in_bytes

-

This option is commented out by default.

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

thrift_framed_transport_size_in_mb

-

Frame size for thrift (maximum message length).

-

Default Value: 15

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.)

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations

-

Default Value: 10000

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

request_scheduler

-

request_scheduler – Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below.

-

Default Value: org.apache.cassandra.scheduler.NoScheduler

-
-
-

request_scheduler_options

-

This option is commented out by default.

-

Scheduler Options vary based on the type of scheduler

-
-
NoScheduler
-
Has no options
-
RoundRobin
-
-
throttle_limit
-
The throttle_limit is the number of in-flight -requests per client. Requests beyond -that limit are queued up until -running requests can complete. -The value of 80 here is twice the number of -concurrent_reads + concurrent_writes.
-
default_weight
-
default_weight is optional and allows for -overriding the default which is 1.
-
weights
-
Weights are optional and will default to 1 or the -overridden default_weight. The weight translates into how -many requests are handled during each turn of the -RoundRobin, based on the scheduler id.
-
-
-
-

Default Value (complex option):

-
#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-
-
-
-

request_scheduler_id

-

This option is commented out by default. -request_scheduler_id – An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace.

-

Default Value: keyspace

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack

-

If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client/server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_warn_threshold_in_ms

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/configuration/index.html b/src/doc/3.11.5/configuration/index.html deleted file mode 100644 index 07b230a0e..000000000 --- a/src/doc/3.11.5/configuration/index.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/contactus.html b/src/doc/3.11.5/contactus.html deleted file mode 100644 index d53c5150c..000000000 --- a/src/doc/3.11.5/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

IRC

-

To chat with developers or users in real-time, join our channels on IRC freenode. The -following channels are available:

-
    -
  • #cassandra - for user questions and general discussions.
  • -
  • #cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
  • #cassandra-builds - results of automated test builds.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/appendices.html b/src/doc/3.11.5/cql/appendices.html deleted file mode 100644 index 18ce02b06..000000000 --- a/src/doc/3.11.5/cql/appendices.html +++ /dev/null @@ -1,565 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/changes.html b/src/doc/3.11.5/cql/changes.html deleted file mode 100644 index b001a44b7..000000000 --- a/src/doc/3.11.5/cql/changes.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

-
    -
  • Adds a new ``duration `` data types (CASSANDRA-11873).
  • -
  • Support for GROUP BY (CASSANDRA-10707).
  • -
  • Adds a DEFAULT UNSET option for INSERT JSON to ignore omitted columns (CASSANDRA-11424).
  • -
  • Allows null as a legal value for TTL on insert and update. It will be treated as equivalent to
  • -
-

inserting a 0 (CASSANDRA-12216).

-
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/ddl.html b/src/doc/3.11.5/cql/ddl.html deleted file mode 100644 index 7d8c08d41..000000000 --- a/src/doc/3.11.5/cql/ddl.html +++ /dev/null @@ -1,765 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-
-
-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
    -
  • 'SimpleStrategy': A simple strategy that defines a replication factor for the whole cluster. The only sub-options -supported is 'replication_factor' to define that replication factor and is mandatory.
  • -
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for -each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is -the associated replication factor.
  • -
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-
-

Column definitions

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-
-

Static columns

-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-
-
-
-

The Primary key

-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-
-

The partition key

-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-
-
-

The clustering columns

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, c, d)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-
-
-
-

Table options

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Compact tables

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as COMPACT -STORAGE cannot, as of Cassandra 3.11.5, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details) and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-
-
-

Reversing the clustering order

-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-
-

Other table options

-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
read_repair_chancesimple0.1The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) for -the purpose of read repairs.
dclocal_read_repair_chancesimple0The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) -belonging to the same data center than the read -coordinator for the purpose of read repairs.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
-
-
Compaction options
-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-
-
-
Compression options
-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-
-
-
Caching options
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-
-
-
Other considerations:
-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table'
-       AND read_repair_chance = 0.2;
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/definitions.html b/src/doc/3.11.5/cql/definitions.html deleted file mode 100644 index a4a7e6355..000000000 --- a/src/doc/3.11.5/cql/definitions.html +++ /dev/null @@ -1,312 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term          ::=  constant | literal | function_call | type_hint | bind_marker
-literal       ::=  collection_literal | udt_literal | tuple_literal
-function_call ::=  identifier '(' [ term (',' term)* ] ')'
-type_hint     ::=  '(' cql_type `)` term
-bind_marker   ::=  '?' | ':' identifier
-
-

A term is thus one of:

-
    -
  • A constant.
  • -
  • A literal for either a collection, a user-defined type or a tuple -(see the linked sections for details).
  • -
  • A function call: see the section on functions for details on which native function exists and how to define your own user-defined ones.
  • -
  • A type hint: see the related section for details.
  • -
  • A bind marker, which denotes a variable to be bound at execution time. See the section on Prepared Statements -for details. A bind marker can be either anonymous (?) or named (:some_name). The latter form provides a more -convenient way to refer to the variable for binding it and should generally be preferred.
  • -
-
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/dml.html b/src/doc/3.11.5/cql/dml.html deleted file mode 100644 index 80c5202c7..000000000 --- a/src/doc/3.11.5/cql/dml.html +++ /dev/null @@ -1,558 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/functions.html b/src/doc/3.11.5/cql/functions.html deleted file mode 100644 index 929050ec1..000000000 --- a/src/doc/3.11.5/cql/functions.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Time conversion functions

-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/index.html b/src/doc/3.11.5/cql/index.html deleted file mode 100644 index d31b92129..000000000 --- a/src/doc/3.11.5/cql/index.html +++ /dev/null @@ -1,239 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL).

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/indexes.html b/src/doc/3.11.5/cql/indexes.html deleted file mode 100644 index 3e9cc811d..000000000 --- a/src/doc/3.11.5/cql/indexes.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/json.html b/src/doc/3.11.5/cql/json.html deleted file mode 100644 index e919f6a5a..000000000 --- a/src/doc/3.11.5/cql/json.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/mvs.html b/src/doc/3.11.5/cql/mvs.html deleted file mode 100644 index 875ba76d6..000000000 --- a/src/doc/3.11.5/cql/mvs.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/security.html b/src/doc/3.11.5/cql/security.html deleted file mode 100644 index f87b6ccb3..000000000 --- a/src/doc/3.11.5/cql/security.html +++ /dev/null @@ -1,704 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/triggers.html b/src/doc/3.11.5/cql/triggers.html deleted file mode 100644 index 21fd46496..000000000 --- a/src/doc/3.11.5/cql/triggers.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/cql/types.html b/src/doc/3.11.5/cql/types.html deleted file mode 100644 index 8f13d6d31..000000000 --- a/src/doc/3.11.5/cql/types.html +++ /dev/null @@ -1,697 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 3.11.5, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/data_modeling/index.html b/src/doc/3.11.5/data_modeling/index.html deleted file mode 100644 index 8cbaacdc9..000000000 --- a/src/doc/3.11.5/data_modeling/index.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/code_style.html b/src/doc/3.11.5/development/code_style.html deleted file mode 100644 index dc3a698e3..000000000 --- a/src/doc/3.11.5/development/code_style.html +++ /dev/null @@ -1,208 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/how_to_commit.html b/src/doc/3.11.5/development/how_to_commit.html deleted file mode 100644 index 8465d341a..000000000 --- a/src/doc/3.11.5/development/how_to_commit.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

—atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/how_to_review.html b/src/doc/3.11.5/development/how_to_review.html deleted file mode 100644 index 3205c4db4..000000000 --- a/src/doc/3.11.5/development/how_to_review.html +++ /dev/null @@ -1,172 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/ide.html b/src/doc/3.11.5/development/ide.html deleted file mode 100644 index 541eecf80..000000000 --- a/src/doc/3.11.5/development/ide.html +++ /dev/null @@ -1,234 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

Note

-

Bleeding edge development snapshots of Cassandra are available from Jenkins continuous integration.

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/index.html b/src/doc/3.11.5/development/index.html deleted file mode 100644 index e5cd929db..000000000 --- a/src/doc/3.11.5/development/index.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Development" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/development/patches.html b/src/doc/3.11.5/development/patches.html deleted file mode 100644 index 2f7dcc74d..000000000 --- a/src/doc/3.11.5/development/patches.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue tagged with the low hanging fruit label in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our community page.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - -
VersionPolicy
3.xTick-tock (see below)
3.0Bug fixes only
2.2Bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

Tick-Tock Releases

-

New releases created as part of the tick-tock release process will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk.

-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: patch by X; reviewed by Y for CASSANDRA-ZZZZZ
  2. -
  3. When you’re happy with the result, create a patch:
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/development/testing.html b/src/doc/3.11.5/development/testing.html deleted file mode 100644 index fb67403d6..000000000 --- a/src/doc/3.11.5/development/testing.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

TODO: CASSANDRA-12365

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/faq/index.html b/src/doc/3.11.5/faq/index.html deleted file mode 100644 index 688aabde7..000000000 --- a/src/doc/3.11.5/faq/index.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/genindex.html b/src/doc/3.11.5/genindex.html deleted file mode 100644 index 1ba5fb481..000000000 --- a/src/doc/3.11.5/genindex.html +++ /dev/null @@ -1,93 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/getting_started/configuring.html b/src/doc/3.11.5/getting_started/configuring.html deleted file mode 100644 index c8283376a..000000000 --- a/src/doc/3.11.5/getting_started/configuring.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the steps above are enough, you don’t really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/getting_started/drivers.html b/src/doc/3.11.5/getting_started/drivers.html deleted file mode 100644 index 2ae69320c..000000000 --- a/src/doc/3.11.5/getting_started/drivers.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/getting_started/index.html b/src/doc/3.11.5/getting_started/index.html deleted file mode 100644 index bda343a3c..000000000 --- a/src/doc/3.11.5/getting_started/index.html +++ /dev/null @@ -1,146 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/getting_started/installing.html b/src/doc/3.11.5/getting_started/installing.html deleted file mode 100644 index 2a42eb241..000000000 --- a/src/doc/3.11.5/getting_started/installing.html +++ /dev/null @@ -1,196 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/getting_started/querying.html b/src/doc/3.11.5/getting_started/querying.html deleted file mode 100644 index fed81f798..000000000 --- a/src/doc/3.11.5/getting_started/querying.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/index.html b/src/doc/3.11.5/index.html deleted file mode 100644 index 27a5d1106..000000000 --- a/src/doc/3.11.5/index.html +++ /dev/null @@ -1,75 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v3.11.5

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/3.11.5/objects.inv b/src/doc/3.11.5/objects.inv deleted file mode 100644 index b9093454f..000000000 Binary files a/src/doc/3.11.5/objects.inv and /dev/null differ diff --git a/src/doc/3.11.5/operating/backups.html b/src/doc/3.11.5/operating/backups.html deleted file mode 100644 index 9e9335792..000000000 --- a/src/doc/3.11.5/operating/backups.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/operating/bloom_filters.html b/src/doc/3.11.5/operating/bloom_filters.html deleted file mode 100644 index 577446214..000000000 --- a/src/doc/3.11.5/operating/bloom_filters.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/bulk_loading.html b/src/doc/3.11.5/operating/bulk_loading.html deleted file mode 100644 index 52f183604..000000000 --- a/src/doc/3.11.5/operating/bulk_loading.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/operating/cdc.html b/src/doc/3.11.5/operating/cdc.html deleted file mode 100644 index ff0102cb8..000000000 --- a/src/doc/3.11.5/operating/cdc.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property cdc=true (either when creating the table or altering it), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in cassandra.yaml on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory.

-
-
-

Configuration

-
-

Enabling or disable CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

This implementation included a refactor of CommitLogReplayer into CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

The initial implementation of Change Data Capture does not include a parser (see Reading CommitLogSegments above) -so, if CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/compaction.html b/src/doc/3.11.5/operating/compaction.html deleted file mode 100644 index 1e8325ade..000000000 --- a/src/doc/3.11.5/operating/compaction.html +++ /dev/null @@ -1,514 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy).

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table’s read_repair_chance and dclocal_read_repair_chance to 0.

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/compression.html b/src/doc/3.11.5/operating/compression.html deleted file mode 100644 index 3f0446c0b..000000000 --- a/src/doc/3.11.5/operating/compression.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides three classes (LZ4Compressor, -SnappyCompressor, and DeflateCompressor ). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/hardware.html b/src/doc/3.11.5/operating/hardware.html deleted file mode 100644 index 553004d78..000000000 --- a/src/doc/3.11.5/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/hints.html b/src/doc/3.11.5/operating/hints.html deleted file mode 100644 index c27aec9d4..000000000 --- a/src/doc/3.11.5/operating/hints.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/operating/index.html b/src/doc/3.11.5/operating/index.html deleted file mode 100644 index d05a413bd..000000000 --- a/src/doc/3.11.5/operating/index.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/metrics.html b/src/doc/3.11.5/operating/metrics.html deleted file mode 100644 index c5e686dbb..000000000 --- a/src/doc/3.11.5/operating/metrics.html +++ /dev/null @@ -1,1601 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

These metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools scope=<ThreadPoolName> type=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessages.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMetrics scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsCounterNumber of clients connected to this nodes native protocol server
connectedThriftClientsCounterNumber of clients connected to this nodes thrift protocol server
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/read_repair.html b/src/doc/3.11.5/operating/read_repair.html deleted file mode 100644 index 10d84a013..000000000 --- a/src/doc/3.11.5/operating/read_repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/operating/repair.html b/src/doc/3.11.5/operating/repair.html deleted file mode 100644 index 295689716..000000000 --- a/src/doc/3.11.5/operating/repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.5/operating/security.html b/src/doc/3.11.5/operating/security.html deleted file mode 100644 index e49e69b3f..000000000 --- a/src/doc/3.11.5/operating/security.html +++ /dev/null @@ -1,446 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/snitch.html b/src/doc/3.11.5/operating/snitch.html deleted file mode 100644 index bb92451ac..000000000 --- a/src/doc/3.11.5/operating/snitch.html +++ /dev/null @@ -1,176 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/operating/topo_changes.html b/src/doc/3.11.5/operating/topo_changes.html deleted file mode 100644 index 55b511a65..000000000 --- a/src/doc/3.11.5/operating/topo_changes.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase.

-

Once the bootstrapping is complete the node will be marked “UP”, we rely on the hinted handoff’s for making this node -consistent (since we don’t accept writes since the start of the bootstrap).

-
-

Note

-

If the replacement process takes longer than max_hint_window_in_ms you MUST run repair to make the -replaced node consistent again, since it missed ongoing writes during bootstrapping.

-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/search.html b/src/doc/3.11.5/search.html deleted file mode 100644 index 7e87d673c..000000000 --- a/src/doc/3.11.5/search.html +++ /dev/null @@ -1,103 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/searchindex.js b/src/doc/3.11.5/searchindex.js deleted file mode 100644 index c7b0903f3..000000000 --- a/src/doc/3.11.5/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/security","cql/triggers","cql/types","data_modeling/index","development/code_style","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","tools/cqlsh","tools/index","tools/nodetool","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","troubleshooting/index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/code_style.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","troubleshooting/index.rst"],objects:{},objnames:{},objtypes:{},terms:{"00t89":21,"03t04":21,"0x0000000000000003":14,"0x00000004":13,"100mb":6,"10mb":6,"10s":52,"10x":[6,41],"11e6":52,"128th":4,"12gb":43,"12h30m":21,"15m":46,"160mb":41,"16mb":[30,41],"180kb":6,"19t03":139,"1mo":21,"1st":21,"24h":21,"250m":6,"256mb":6,"256th":6,"29d":21,"2e10":10,"2gb":43,"2nd":[6,11,50],"2xlarg":43,"300s":6,"327e":52,"32gb":43,"32mb":[6,30],"36x":34,"3ff3e5109f22":13,"3gb":42,"3rd":[6,46,50],"40f3":13,"4ae3":13,"4kb":11,"4xlarg":43,"50kb":6,"50mb":[6,41],"512mb":6,"5573e5b09f14":13,"5kb":6,"5mb":41,"64k":6,"64kb":42,"6ms":6,"6tb":43,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":24,"75th":46,"86400s":41,"89h4m48":21,"8gb":43,"8th":[6,40],"90th":46,"95ac6470":52,"95th":46,"98th":46,"99th":46,"9th":46,"\u00eatre":9,"abstract":[23,25],"boolean":[9,12,14,17,19,21,52],"break":[28,41],"byte":[6,9,13,21,46,62,80,116,166],"case":[6,10,11,12,13,14,16,17,18,21,24,25,28,29,30,38,43,49,51,52],"catch":23,"class":[6,11,14,21,23,26,29,41,42,45,49,117,129,150],"default":[4,6,10,11,13,14,17,19,21,26,29,30,31,34,38,40,41,42,46,49,51,52,57,76,80,87,116,117,119,122,132,133,139,154,156,167],"enum":9,"export":[26,46,52],"final":[14,19,23,26,41,43,49,133],"float":[9,10,11,12,14,17,21,38,42],"function":[6,9,10,11,12,15,16,18,19,21,25,32,36,49,50,52],"import":[11,14,21,26,27,29,31,41,43,46,52,117],"int":[9,10,11,13,14,17,18,19,21,29,40,42],"long":[6,13,21,24,25,30,41,46],"new":[0,4,6,10,11,14,16,17,18,19,20,21,23,25,26,28,29,33,36,38,41,43,49,51,108,115,117],"null":[9,10,12,13,14,17,18,21,23,52],"public":[6,14,23,29,30,34,49,50],"return":[6,9,11,13,14,16,17,18,19,21,25,132],"short":[6,21],"static":[6,9,10,18,50],"super":49,"switch":[6,10,19,26,30,45,46,49,50],"throw":[6,14,23,29],"true":[6,11,12,17,19,21,26,30,40,41,49,51,52,114,117],"try":[6,11,23,26,28,30,41,54,132],"var":[6,23,34],"void":29,"while":[6,10,11,12,13,21,24,28,38,41,42,43,49,52],AES:6,AND:[9,11,13,14,18,19,49,52],AWS:43,Added:10,Adding:[6,11,19,21,30,36,45,49],And:[11,14,19,49],Are:25,Ave:21,BUT:23,But:[13,15,19,21,23,28,30,52],CAS:6,CFs:[132,139],CLS:52,DCs:6,DNS:30,Doing:10,EBS:43,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,20,21,28,29,30,31,34,35,41,43,49,50,52],GCs:6,Has:[6,25],IDE:[27,36],IDEs:[26,27],IDs:[117,157],INTO:[6,9,11,13,14,17,21],IPs:[6,50,138,157],Ids:163,JKS:6,KBs:6,LCS:11,NFS:43,NOT:[6,9,10,11,13,14,16,18,19,20,21],Not:[13,19,28,41,42],ONE:[0,6,46,52],One:[6,29,30,41],PFS:6,Pis:43,Such:21,THE:6,TLS:[6,45],That:[11,12,18,21,28,30,41,52],The:[0,4,6,8,9,10,12,14,16,18,19,20,21,23,24,26,28,29,30,31,34,35,36,38,40,42,43,46,49,50,51,52,57,60,65,67,73,77,83,86,87,90,95,99,101,103,108,115,117,119,123,124,130,132,139,142,143,150,156,157,158,165,167,170,171,173],Their:21,Then:[13,29,30,34,41,49],There:[0,6,10,11,12,13,14,21,26,28,29,30,41,46,49],These:[4,6,11,14,26,46,49,52],USE:[9,14,15],USING:[9,13,16,20,21,41],Use:[11,13,19,30,35,45,52,55,60,117,122,132,163,170],Used:46,Uses:[6,17,45,50],Using:[11,13,29,30,49],WILL:6,WITH:[9,11,12,16,18,19,38,40,41,42,49,52],Will:[6,36,80,117,150],With:[6,13,17,30,41,51,56],Yes:30,_cache_max_entri:49,_if_:6,_must_:6,_trace:46,_udt:14,_update_interval_in_m:49,_use:14,_validity_in_m:49,a278b781fe4b2bda:34,abil:[14,30,42],abilityid:16,abl:[6,14,21,26,29,30,41],about:[4,6,19,26,28,29,30,38,41,50,52,59,117,138],abov:[6,8,11,12,13,14,21,26,28,30,31,40,41,46],absenc:12,abstracttyp:21,accept:[0,6,10,11,12,13,17,28,29,38,51,75,117],access:[6,10,21,26,28,43,45,46],accompani:6,accord:[6,30],accordingli:[6,14,30],account:[6,21,29],accru:[41,46],accumul:[6,41,46],accur:[6,30,38,138],accuraci:[38,119,167],acheiv:49,achiev:[41,46],achil:32,ack:6,acoount:46,acquir:[19,46],across:[6,11,19,28,46,49,50,117,121],action:[6,13],activ:[4,6,28,40,46,52,117,119,167],activetask:46,actual:[4,6,13,20,23,25,30,34,41,50,132],acycl:19,add:[0,6,9,10,11,21,24,25,28,31,34,36,41,49],addamsfamili:11,added:[0,6,10,11,14,25,41],adding:[6,13,14,25,43,52],addit:[0,6,9,11,13,19,21,26,28,31,41,43,46,49,52],addition:[11,13,41],address:[6,8,17,21,26,28,31,36,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],adher:10,adjac:41,adjust:[6,38],adv:34,advanc:[6,45,49],advantag:43,advers:30,advic:[28,30],advis:[6,12,21,30],af08:13,afd:21,affect:[6,25,28,30,41,139],afford:6,after:[5,6,10,11,12,13,14,16,17,18,26,28,30,40,41,43,45,46,49,50,52],afterward:[26,29],afunct:14,again:[6,28,41,51,52],against:[6,11,14,28,29,30,43,51,52,132],agent:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],agentlib:26,aggreg:[6,9,10,13,15,18,19,46,52],aid:12,aim:6,akeyspac:14,algorithm:[6,11,51],alia:[10,13,32],alias:[6,10,18],alic:19,align:23,aliv:6,all:[0,6,9,11,12,13,14,17,18,21,23,24,25,26,28,29,36,38,40,41,46,49,51,52,57,58,59,75,87,92,108,109,114,117,119,121,130,133,139,154,156,158,167,169,170,171],allmemtableslivedatas:46,allmemtablesoffheaps:46,allmemtablesonheaps:46,alloc:[6,30,40,43,46],allocate_tokens_for_keyspac:51,allow:[0,4,6,9,10,11,12,14,16,17,18,21,31,38,40,41,42,43,50],allowallauthent:[6,49],allowallauthor:[6,49],allowallinternodeauthent:6,almost:[6,14,21,41],alon:[6,23],along:[6,13,114,117],alongsid:[35,52],alphabet:23,alphanumer:[11,19],alreadi:[6,11,14,16,18,21,28,41,49,170],also:[0,4,6,10,11,12,13,14,17,18,19,21,26,28,29,30,31,41,43,46,49,51,52,87,171],alter:[9,10,15,17,30,38,40,41,42,49],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:21,alter_type_stat:[12,21],alter_user_stat:12,altern:[6,10,11,12,13,17,21,26,28,31,43,49],although:[6,28],alwai:[0,6,9,10,11,13,14,18,21,23,28,29,30,41,43],amend:24,amongst:11,amount:[6,11,13,21,26,28,29,30,41,42,43,46,51,52,132],amplif:[41,43],anaggreg:14,analogu:13,analyt:38,analyz:29,ani:[0,6,10,11,12,13,14,17,18,19,20,21,24,25,26,28,29,31,34,36,40,41,43,46,49,51,52,55,108,114,117,122,139,154],annot:23,anonym:[12,21],anoth:[6,11,14,19,21,29,41,49,52],anotherarg:14,ant:[26,28,29],anti:[6,21],anticip:11,anticompact:[41,163],antientropystag:46,antipattern:43,anymor:[24,41],anyon:23,anyth:41,anywai:6,anywher:13,apach:[2,5,6,7,14,20,23,24,25,26,28,29,30,33,34,41,42,46,49,53],api:[6,8,11,15,17,35,50],appear:[12,14,41,52],append:[21,24,43,46,52],appendic:[15,36],appendix:[12,15],appl:21,appli:[6,9,10,11,12,13,19,21,24,28,29,30,46,52],applic:[6,11,19,23,25,26,49],appreci:28,approach:[4,41,51],appropri:[6,11,19,21,25,28,49,50,51],approxim:[41,46],apt:34,arbitrari:[11,12,21],architectur:[30,36],archiv:[6,40,80],archive_command:80,archive_retri:80,aren:13,arg:[14,117,155],argnam:14,argnum:14,argument:[6,11,13,14,16,17,30,31,42,52,55,56,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],arguments_declar:14,arguments_signatur:14,around:[6,19,41,43,50],arrai:[6,30],arriv:[6,28,30],artifact:26,artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,21],asf:26,ask:[5,28,29,36,49],aspect:11,assassin:117,assertionerror:23,assertrow:29,assign:[6,13,30],associ:[6,11],assum:[6,11,14,26,49,50],assumpt:49,astyanax:32,async:[6,49],asynchron:[6,16,30,43],asynchroni:46,atabl:14,atom:[11,13,20,24],atomiclong:46,attach:28,attemp:46,attempt:[0,6,11,16,18,19,21,30,41,46,49,52,133],attent:[23,28],attribut:41,audit:[66,76,117],auditlog:76,auth:6,authent:[10,45,52],authenticatedus:6,author:[9,19,21,45],authorizationproxi:49,auto:[6,30,158],auto_bootstrap:51,autocompact:[41,67,77,117,158],autogener:54,autom:[8,23],automat:[6,13,14,16,26,29,30,34,41,49,51],avail:[0,6,8,11,14,19,26,28,29,34,40,49,50,52,57,87,130,139,150,170],availabil:6,averag:[6,14,41,46],average_live_cells_per_slice_last_five_minut:166,average_s:11,average_tombstones_per_slice_last_five_minut:166,averagefin:14,averagest:14,avg_bucket_s:41,avoid:[6,11,12,23,25,28,38,41,43,49,50,52,171],awai:[26,51,52],awar:[0,11,28,38,42,138],azur:43,b124:13,b70de1d0:13,back:[6,41,46,51,114,117],backend:6,background:[30,34,41,49],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,36,41,45,51,52,68,78,117,159],backward:[6,10,11,15,19,21],bad:[6,14,30,50],balanc:51,banana:21,band:21,bar:[12,23],bardet:21,bare:6,base:[0,4,6,10,11,13,14,18,19,21,24,28,29,30,41,43,46,49,51],bash:30,basi:[6,30,42],basic:[11,41,43],batch:[6,9,15,29,36,52],batch_remov:46,batch_stat:12,batch_stor:46,batchlog:[13,46,89,117,134,140],be34:13,beatl:21,beca:52,becaus:[6,13,14,34,41,42,49],becom:[4,6,11,14,19,28,41,46,49,51],been:[0,4,6,10,13,14,15,19,21,25,28,41,43,49,139],befor:[0,6,10,11,13,14,16,20,21,26,27,29,32,41,49,50,52,80,156],begin:[9,12,13,29,49,52],beginn:28,begintoken:52,behavior:[0,6,10,14,17,21,23,25,38,41,51,133],behind:[6,23,29,30,41],being:[6,11,13,17,21,25,29,30,38,41,46,51],belong:[11,13,14,46,57,117],below:[6,11,12,13,17,19,21,28,34,41,52,63],benchmark:43,benefici:41,benefit:[6,38,41,43,45],besid:6,best:[6,29,41,49,50],best_effort:6,better:[6,23,28,41,43],between:[0,6,9,10,13,15,28,30,38,41,46,49,51,132,154],beyond:[6,52,171],big:[41,60],bigger:[11,41],biggest:14,bigint:[9,14,17,21],bigintasblob:14,bin:[26,34,35,52],binari:[14,33,69,79,117,160],binauditlogg:76,bind:[6,10,12,14,30],bind_mark:[12,13,18,21],biolog:11,birth:13,birth_year:13,bit:[6,14,17,21,28,30,42,43],bite:30,bitrot:11,bitstr:9,black:6,blank:[6,23,30],bleed:26,blindli:30,blob:[9,10,12,17,21,36,42],blobasbigint:14,blobastyp:14,block:[4,6,11,24,31,41,43,46,49,80],blockedonalloc:6,blog:[6,11,13],blog_til:13,blog_titl:13,bloom:[4,11,36,43,45,46],bloom_filter_false_posit:166,bloom_filter_false_ratio:166,bloom_filter_fp_ch:[11,38],bloom_filter_off_heap_memory_us:166,bloom_filter_space_us:166,bloomfilterdiskspaceus:46,bloomfilterfalseposit:46,bloomfilterfalseratio:46,bloomfilteroffheapmemoryus:46,blunt:49,bnf:12,bob:[13,19],bodi:[11,12],boilerpl:27,boolstyl:52,boost:6,boot:30,bootstrap:[0,6,36,42,45,46,49,117,122,150],born:13,both:[0,6,11,13,14,18,21,24,25,28,30,31,38,41,42,43,46,49,51,52],bottleneck:6,bottom:30,bound:[6,11,12,21,43,49],box:[6,49,50],brace:23,bracket:12,braket:12,branch:[24,25,26,29],branchnam:28,breakpoint:26,breed:29,bring:6,brk:30,broadcast:6,broadcast_address:50,broken:[41,46],brows:6,browser:52,bucket:41,bucket_high:41,bucket_low:41,buffer:[4,6,46],bufferpool:45,bug:[10,24,29,30,36],build:[8,27,29,36,46,117,173],builder:[94,117,147],built:[26,46],bulk:[36,45],bump:10,bunch:23,burn:40,button:30,bytebuff:14,byteorderedpartition:[6,14],bytescompact:46,bytesflush:46,bytestyp:9,c73de1d3:13,cach:[6,30,31,43,45,50,108,110,111,112,117,141,142],cachecleanupexecutor:46,cachenam:46,calcul:[6,38,40,41,46,50],call:[9,11,12,13,14,19,23,31,36,41,43,46,51,117,150],callback:46,caller:23,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,23,24,25,26,28,29,31,34,35,36,38,40,41,42,43,46,49,50,51,52,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],cancel:[10,133],candid:41,cannot:[6,9,11,13,14,17,18,19,21,41,49,55,117],cap:[12,91,96,102,117,144,149,152],capabl:[6,30,52],capac:[6,40,46,50,117,119,141,167],captur:[6,36,45],care:[6,41,132],carlo:19,carri:[23,132],cascommit:46,cascontent:[103,153],casprepar:46,caspropos:46,casread:46,cassablanca:21,cassafort:32,cassandra:[0,2,4,5,8,10,11,13,14,19,20,21,23,24,28,32,33,35,38,41,42,43,46,50,51,52,76,80,117,128,132,135,139,164,172],cassandra_hom:[6,40,49],cassandraauthor:[6,49],cassandradaemon:[26,34],cassandralogin:49,cassandrarolemanag:[6,49],casser:32,cassi:32,cast:[10,13,18],caswrit:46,cat:21,categor:46,categori:[11,12,13,14,76],caught:[25,46],caus:[6,18,30,41,49],caution:6,caveat:49,cbc:6,ccm:[25,29],ccmlib:29,cdc:[6,11],cdc_enabl:40,cdc_free_space_check_interval_m:40,cdc_free_space_in_mb:40,cdc_raw:[6,40],cdc_raw_directori:40,cdccompactor:6,cell:[6,21,46,87,171],center:[6,11,21,30,50,51,73,83,117,132],central:[26,49,52],centric:19,certain:[6,9,11,19,29,41,49],certainli:14,certif:[49,117,128],cfname:[101,119,167],cfs:23,chain:19,chanc:38,chang:[6,11,12,15,19,21,24,26,27,33,34,36,42,45,46,49,150],channel:[5,8,28],charact:[11,12,13,17,19,21,23,52],chat:8,cheap:6,check:[0,6,11,13,23,25,26,28,29,30,38,40,41,46,49,108,117,132,171],checklist:[27,28,36],checkout:[26,28],checksum:[11,42,117,171],cherri:24,chess:13,child:52,chmod:49,choic:[6,11,36,41,45],choos:[0,6,11,27,32,43,46],chosen:[0,6,11,14],chown:49,christoph:21,chrome:52,chunk:[4,6,30,42,52],chunk_length_in_kb:[11,42],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:46,chunksiz:52,churn:6,cipher:[6,49],cipher_suit:6,circular:19,citi:21,clash:12,class_nam:6,classpath:[6,14,21,46],claus:[10,11,14,16,17,18,19,23],clean:[6,23,46,57,117,135],cleanli:28,cleanup:[30,41,45,46,87,117,163],clear:[25,28,59,108],clearsnapshot:117,click:[13,26,28,29],client:[0,6,8,10,11,13,17,19,21,25,30,31,33,36,43,45,52,59,117],client_encryption_opt:49,clientrequest:46,clientstat:117,clock:6,clockr:6,clojur:33,clone:[26,30,52],close:[6,15,49],closer:38,cloud:45,cluster:[0,4,6,9,10,13,14,20,21,25,29,31,35,36,41,43,46,49,50,51,52,64,85,89,105,117,140,157],cluster_nam:[31,35],clustering_column:11,clustering_ord:11,cmsparallelremarken:26,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,20,24,25,26,27,29,36,42,46],codestyl:23,col:14,cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,43,45,46,87],collection_liter:12,collection_typ:21,color:[21,52],column1:9,column:[6,9,10,12,13,14,15,16,17,18,21,42,46,52,101,119,139,156,167],column_definit:11,column_nam:[11,13,16],columnfamili:[6,9,23,41],colupdatetimedeltahistogram:46,com:[6,11,14,23,24,49],combin:[4,6,10,40,41],come:[6,9,49],comingl:41,comma:[6,11,12,13,31,49,51,52,76,119,122,167],command:[0,6,24,29,30,31,34,35,42,45,53,55,56,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],comment:[6,11,15,18,23,25,49],commit:[6,8,11,27,28,36,46],commitlog:[2,6,30,31,43,45],commitlog_archiv:6,commitlog_directori:[31,43],commitlog_segment_size_in_mb:30,commitlogread:40,commitlogreadhandl:40,commitlogreplay:40,commitlogseg:[6,45,46],committ:[24,28,29],common:[0,14,15,23,25,28,45,52],common_nam:11,commonli:117,commun:[6,8,25,26,28,30,31,35,49],commut:30,compact:[4,6,15,30,36,38,42,43,45,57,61,62,87,90,91,117,130,139,143,144,150,158,163,170],compacted_partition_maximum_byt:166,compacted_partition_mean_byt:166,compacted_partition_minimum_byt:166,compaction_:163,compaction_window_s:41,compaction_window_unit:41,compactionbyteswritten:46,compactionexecutor:46,compactionhistori:[41,117],compactionid:163,compactionparamet:41,compactionparametersjson:41,compactionstat:[41,117],compactionstrategi:45,compactor:[93,117,146],compar:[6,28,41,46],compat:[6,9,10,11,13,15,19,25,28],compatilibi:21,compet:6,compil:[23,26,52],complain:26,complet:[6,13,14,28,30,41,46,49,51,52,117,131,133],completedtask:46,complex:[6,9,14,21,28],complexarg:14,compliant:[6,14,49],complic:28,compon:[4,11,25,38,46,49,117,150],compos:[11,13,21],composit:11,compound:17,comprehens:25,compress:[4,6,29,36,41,43,45,46],compression_metadata_off_heap_memory_us:166,compressioninfo:4,compressionmetadataoffheapmemoryus:46,compressionratio:46,compressor:[6,11],compris:[4,11,42],compromis:49,comput:[6,14],concaten:14,concept:[15,19,41],concern:[13,14],concret:[12,21],concurr:[6,43,92,93,94,117,132,145,146,147],concurrentmarksweep:43,condens:13,condit:[6,10,12,13,19,21,23,24,41,46,49,52],conditionnotmet:46,conf:[6,30,31,34,46,49,52],config:[46,49,52],configur:[0,4,11,21,26,29,30,33,34,36,45,46,49,50,52,63,80,117,135,150],confirm:[6,8,25,26],conflict:[13,21,24],conform:[18,25],confus:[10,12,30],conjunct:52,connect:[6,11,19,21,26,35,36,46,49,50,52,59,63,116,117],connectednativecli:46,connectedthriftcli:46,connector:[30,32,49],consecut:31,consequ:[11,13,21,43],conserv:6,consid:[0,6,13,21,28,31,38,41,43],consider:[13,21],consist:[2,11,12,13,14,25,49,51],consol:[26,31,52],constant:[10,11,15,17,21],constantli:[6,41],construct:12,constructor:[6,23],consum:[6,29,38,40,46],consumpt:40,contact:[6,11,30,36],contain:[0,6,8,9,10,11,12,13,15,16,18,19,21,26,28,40,41,42,49,52,156],contend:[6,46],content:[4,6,11,12,13,36,41,52,80],contentionhistogram:46,context:[6,9,19,21,28,30,49],contigu:13,continu:[0,6,23,26,29,41,49,50],contrarili:12,contrast:[29,49],contribut:[24,27,29,36],contributor:[24,28,29,34],control:[0,6,10,11,13,15,25,31,34,41,49,50,52],conveni:[9,12,14,17,29,51],convent:[6,11,14,15,24,27,28,29,49,50],convers:10,convert:[10,13,14,41],coordin:[0,6,11,13,14,21,30,46,133],coordinatorreadlat:46,coordinatorscanlat:46,cop:23,copi:[0,30,41],core:[6,14,43,145],correct:[10,25,34,41,42,117,130],correctli:[6,11,30,41,49],correl:[6,10,50],correspond:[6,9,11,13,14,18,21,28,29,30,40,50],corrupt:[6,11,41,42,43,139,171],cost:[6,13,21,42],could:[6,12,21,25,28,41,52],couldn:34,count:[6,9,13,21,30,41,46,51],counter:[6,9,14,43,46,110,117,139,141,142],counter_mut:46,countercach:46,countermutationstag:46,counterwrit:[103,153],countri:[13,21],country_cod:21,coupl:[0,6],cours:[6,13],cover:[25,28,29,30,33,41,46],cpu:[6,11,40,42,45],cqerl:32,cql3:[11,14,25,29,52],cql:[6,10,11,12,13,14,16,17,19,21,29,32,35,36,41,45,49,53,150],cql_type:[11,12,13,14,19,21],cqlc:32,cqldefinit:14,cqlsh:[30,33,34,36,49,53],cqltester:[25,29],crash:43,crc32:4,crc:4,crc_check_chanc:[11,42],creat:[6,9,10,12,13,15,17,26,27,29,30,40,41,42,49,51,52,60],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,21],create_user_stat:12,createkeystor:6,createt:29,creation:[6,10,11,13,14,18,21],creator:19,credenti:[6,49],critic:[25,28,49],cross:[6,30,50],crossnodedroppedlat:46,cryptographi:6,csv:52,cuddli:21,curl:[24,34],current:[6,9,11,13,14,19,21,26,28,34,41,46,51,52,82,100,104,106,108,117,131,162,170],currentlyblockedtask:46,custom:[6,9,10,11,14,15,16,19,28,50,52],custom_option1:19,custom_option2:19,custom_typ:[14,21],cute:21,cvh:25,cycl:[6,40,80],daemon:[26,117,164],dai:[17,21,41],daili:80,danger:6,dash:12,data:[0,4,6,10,12,14,15,16,18,25,31,34,36,38,42,43,45,46,49,50,52,55,60,73,80,83,87,108,117,122,132,156,171],data_file_directori:[31,43],data_read:19,data_writ:19,databas:[12,13,15,20,41,43,49],datacent:[0,6,50,73,83,96,117,132,149],datacenter1:6,dataset:6,datastax:[6,11,14,32],datatyp:14,date:[9,10,14,15,17,139],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,41],daylight:21,db_user:49,dba:49,dc1:[6,11,49],dc2:[6,11,49],dcassandra:[41,46,49,51],dclocal_read_repair_ch:[0,11,41],dcom:49,dcpar:132,ddl:[11,52],ddl_statement:12,dead:[6,45,55,117],dead_node_ip:51,deb:34,debian:[30,33],debug:[31,52],decid:[9,41,50],decim:[9,14,17,21,52],decimalsep:52,declar:[11,12,14,21],decod:[17,21],decommiss:[6,51,117],decompress:42,decreas:[6,41],decrement:[13,21],decrypt:6,dedic:6,dedupl:[114,117],deem:6,deeper:28,default_time_to_l:[10,11,13],default_weight:6,defend:30,defin:[0,6,9,10,11,12,13,15,16,17,18,19,20,26,41,46,49,50,51,52,60,117],definit:[9,13,14,15,18,21,36,38],deflat:6,deflatecompressor:[11,42],degrad:6,delet:[6,9,10,11,12,15,17,19,21,28,36,52,80,87,117,169],delete_stat:[12,13],delimit:6,deliv:[0,6],deliveri:[6,117,118,137,148],delta:46,demand:49,deni:30,denorm:21,denot:12,dens:38,depend:[4,6,11,12,13,14,21,25,26,28,29,41],deploi:[30,31],deploy:[6,49,50],deprec:[6,10,11,14,15,30,41],depth:6,desc:[9,11,13,52],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,19,21,25,26,28,38,49,53,117],describeclust:117,descript:[6,10,11,14,21,46,52],descriptor:46,design:[14,40,41,43],desir:[16,21,30],destin:[40,52],detail:[5,6,10,11,12,13,14,21,30,45,49,52],detect:[2,6,11,24,30,49],detector:[85,117],determin:[0,6,13,19,38,42,50,132],determinist:30,dev:[6,8,11,30],develop:[5,8,26,28,29,36,43],dfb660d92ad8:52,dfp:171,dht:6,dictat:[6,49],did:[25,46],die:6,dies:36,diff:[15,23],differ:[0,6,11,12,13,14,15,19,21,24,26,28,29,30,31,34,41,42,43,46,51],difficult:[6,29],difficulti:21,digest:4,digit:[17,21,30],diminish:21,direct:[6,11,17,19,28,46],directli:[13,18,19,26,41],director:13,directori:[6,20,26,29,30,33,34,35,40,43,45,52,108,117,135],dirti:6,disabl:[6,11,14,41,42,49,50,52,66,67,68,69,70,71,72,73,74,83,117,140,142,144,149,152,153,154],disable_stcs_in_l0:41,disableauditlog:117,disableautocompact:[41,117],disablebackup:117,disablebinari:117,disablefullquerylog:117,disablegossip:117,disablehandoff:117,disablehintsfordc:117,disableoldprotocolvers:117,disablesnapshot:139,disallow:6,disambigu:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],discard:[6,40],disconnect:41,discourag:[11,21,28],discov:30,discuss:[8,21,28],disk:[4,6,11,31,36,38,40,41,42,45,46,80,114,117,130,171],displai:[11,52,56,62,92,107,109,116,117,166],disrupt:[30,49],dist:34,distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,29,30,41,46,49,51],divid:12,djava:[26,30,49],dml:20,dml_statement:12,dmx4jaddress:46,dmx4jport:46,dns:30,dobar:23,doc:[6,25,40,49],document:[5,12,14,15,17,25,28,35,49,52],doe:[6,11,13,14,16,17,18,19,21,24,25,28,36,38,40,41,42,49,50,51,114,117],doesn:[6,14,21,23,29,30],dofoo:23,doing:[6,13,29,30,41,51],dollar:[10,12],domain:[49,138,157],don:[5,13,23,24,25,26,28,30,31,41,51,108,132],done:[6,11,13,21,28,29,31,35,41],doubl:[6,9,10,11,12,14,17,21,26,46,50],down:[6,19,41,46,50,51,71,117,132],download:[6,26,34,46],downward:19,drain:117,drive:[6,41,43],driver:[6,12,14,29,33,36,52],drop:[6,10,15,36,41,46,80],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,21],drop_user_stat:12,droppabl:[6,41],dropped_mut:166,droppedmessag:45,droppedmetr:46,droppedmut:46,dropwizard:46,dt_socket:26,dtest:[25,27],due:[11,13,21,30,34,46,51],dump:52,duplic:25,durable_writ:11,durat:[6,10,15,19,41,46,119,167],dure:[6,11,14,20,28,29,30,41,42,46,49,51,52,139],dying:30,dynam:[6,45,49],dynamic_snitch:50,dynamic_snitch_badness_threshold:50,dynamic_snitch_reset_interval_in_m:50,dynamic_snitch_update_interval_in_m:50,dynamo:[2,36],each:[0,4,6,10,11,12,13,14,17,18,19,21,24,28,35,36,41,42,43,46,49,50,51,52,117,142,158,171],each_quorum:0,earli:[6,12,28],earlier:15,easi:[9,28],easier:[0,28],easiest:30,ec2:[6,43,50],ec2multiregionsnitch:[6,50],ec2snitch:[6,50],ecc:43,echo:34,eclips:[23,27,29],ecosystem:25,edg:[25,26],edit:[26,31,34,46,49],effect:[6,11,21,28,30,38,42,49,71,117],effectiv:46,effici:[6,11,41,50,51],effort:6,either:[6,8,12,13,14,16,21,23,24,26,28,30,34,35,40,41,46,49,169],elaps:[41,46],element:[21,52],elig:6,els:[11,13,23,28],email:[8,16,21,36],embed:29,emploi:38,empti:[6,9,10,11,12,52],emptytyp:9,enabl:[6,11,14,17,19,29,30,41,42,50,51,52,76,77,78,80,83,84,117,154],enable_user_defined_funct:14,enableauditlog:117,enableautocompact:[41,117],enablebackup:117,enablebinari:117,enablefullquerylog:117,enablegossip:117,enablehandoff:117,enablehintsfordc:117,enableoldprotocolvers:117,encapsul:[23,46],enclos:[9,10,12,14,19],enclosur:12,encod:[15,21,25,52],encount:[5,13,34,46],encourag:[6,11],encrypt:[6,45],encryption_opt:6,end:[21,28,30,41,49,52,60,95,117,132],end_token:[60,132],end_token_1:122,end_token_2:122,end_token_n:122,endpoint:[46,50,55,95,117,132,169],endpoint_snitch:50,endtoken:52,enforc:[17,49],engin:[2,11,28,36,46],enhanc:43,enough:[0,6,21,30,31,41,50,52],enqueu:6,ensur:[11,13,18,20,30,42,49],entail:30,enter:[30,52],entir:[0,4,6,14,21,30,38,41,49,51,52],entri:[4,6,9,13,16,28,36,46,49,52],entropi:6,entry_titl:13,enumer:19,env:[30,31,46,49],environ:[0,5,6,26,30,33,43],ephemer:43,epoch:21,equal:[0,6,10,11,13,21,23,41],equival:[10,11,12,13,14,19,24,41],eras:11,erlang:33,erlcass:32,err:52,errfil:52,error:[6,11,12,14,16,18,19,21,23,25,26,34,36,52,133],escap:[12,17],especi:[28,30,41,52],essenti:[0,6,14,30,52],establish:[6,19,50],estim:46,estimatedcolumncounthistogram:46,estimatedpartitioncount:46,estimatedpartitionsizehistogram:46,etc:[6,18,21,23,25,30,31,34,41,46,49],eth0:6,eth1:6,ev1:21,even:[0,6,10,12,13,14,17,21,28,36,41,49,52,63,139,170],evenli:6,event:[13,21,41,52,132],event_typ:13,eventu:[4,13],ever:[23,29,30,43],everi:[4,6,11,13,14,18,19,20,21,35,38,41,43,52],everyth:[12,23,26,30],evict:46,evil:[6,14],exact:[11,12,14,42],exactli:[11,14,18,49],exampl:[0,6,11,13,14,17,19,21,29,34,35,41,49,50,52],exaust:6,excalibur:11,exce:[4,6,17,23],exceed:[6,43],excel:11,excelsior:11,except:[0,13,14,17,25,27,28,29,30,46],excess:38,exchang:[6,30],exclud:[46,76,100,117],excluded_categori:76,excluded_keyspac:76,excluded_us:76,exclus:[21,29,132],execut:[6,9,11,12,13,14,19,26,29,35,41,46,49,52],exhaust:6,exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,29,36,38,41,42,50,51],expect:[6,10,12,21,23,25,28,41,49],expens:[6,38,50],experi:[6,41],experienc:6,experiment:132,expir:[6,10,11,13,21,45,49,139],expiri:41,explain:[23,25,28,34],explicit:10,explicitli:[6,10,13,17,21,23,41,50],explor:26,expon:10,exponenti:46,expos:[6,9,49],express:[0,6,10,12,50],expung:30,extend:[21,28,29,108,171],extens:[6,11,49],extern:[46,51],extra:[0,6,11,41],extract:[23,34],extrem:[6,13],fact:[21,29,30],factor:[0,6,11,36,42,49],fail:[6,13,14,21,36,41,52,117,133],failur:[2,6,28,36,41,43,46,50,85,117,171],failuredetector:117,fairli:[6,40,49],fake:14,fall:6,fallback:[6,50],fals:[6,11,12,17,19,21,38,40,41,42,46,49,51,52,139],famili:[6,43,101,119,156,167],fanout_s:41,fast:[6,38,41],faster:[6,28,42,43,117,142],fastest:[6,24,50],fatal:6,fault:30,fav:[16,21],fax:21,fct:14,fct_using_udt:14,fear:30,feasibl:21,featur:[25,26,28,49],fed:6,feedback:28,feel:24,fetch:[6,11,52],few:[41,43],fewer:[6,28],fffffffff:[17,21],field:[10,13,14,17,21,23,38],field_definit:21,field_nam:13,fifteen:46,fifteenminutecachehitr:46,figur:41,file:[4,7,11,26,27,28,29,30,31,33,36,38,41,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],filenam:[11,52,101,117],filesystem:49,fill:[40,41],fillion:13,filter:[4,9,11,18,36,43,45,46,132],finalfunc:[9,14],find:[6,26,29,34,38,41,51,95,99],fine:[6,28,49],finer:6,finish:[26,28,117,134],fip:[6,49],fire:20,firefox:52,firewal:[6,30,31,50],first:[5,6,11,13,14,21,28,30,33,41,43,49,52,132,139],firstnam:13,fit:[6,41,46],five:46,fiveminutecachehitr:46,fix:[6,10,12,24,30,41,43],flag:[6,13,24,25,28,40,46,51],flexibl:49,flight:[6,49],flip:11,floor:6,flow:[6,19,25],fluent:32,flush:[4,6,40,41,43,46,75,117,156],fname:14,focu:28,folder:[26,163],follow:[0,5,6,8,9,10,11,12,13,14,17,18,19,21,23,24,25,26,28,29,30,31,34,36,40,41,42,46,49,50,52,57,60,67,77,86,87,123,132,139,153,158,170,171],font:12,foo:[11,12,40],footprint:[117,119],forc:[4,6,11,13,52,60,63,117,131,132,133],forcefulli:[55,117],foreground:[31,34],forev:41,forget:5,fork:28,form:[6,10,11,12,14,19,62,116,166],formal:12,format:[6,10,17,21,24,25,27,28,46,52,61,80,101,122,166,168],former:[6,46],forward:[6,11],found:[5,12,14,15,28,29,31,35,49,52,163,171],four:13,fqcn:29,fraction:6,frame:6,framework:[25,29],franc:[13,21],free:[6,11,21,24,26,46],freed:4,freenod:8,frequenc:[6,40],frequent:[6,29,36,41,49],fresh:51,friendli:[6,21,29],from:[0,4,6,9,11,12,13,14,15,17,18,19,21,24,27,28,29,33,35,36,38,40,41,42,43,46,49,50,51,54,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,126,127,130,131,132,133,135,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],fromjson:15,froom:21,frozen:[9,10,11,13,14,21],fruit:[21,28],fsync:[6,46],full:[6,9,11,13,16,19,28,34,35,41,42,49,52,70,80,117,123,132,135],full_nam:166,fulli:[6,11,12,14,45,49],function_cal:12,function_nam:[13,14,19],fundament:17,further:[5,6,11,18,21,41,45,49],furthermor:[10,13,49],futur:[6,9,10,11,21,28,82,117,162],g1gc:43,game:[14,21],garbag:[11,43,45,46,87],garbage_collect:163,garbagecollect:117,gather:41,gaug:46,gaurante:0,gc_grace_second:11,gc_type:46,gce:[30,43],gcg:6,gcstat:117,gener:[0,2,4,6,8,11,12,13,14,17,21,25,26,27,28,30,43,49,52,103,139,153],genuin:23,get:[6,8,24,26,28,30,34,36,38,41,92,93,94,97,100,117],getbatchlogreplaythrottl:117,getcompactionthreshold:117,getcompactionthroughput:117,getconcurr:117,getconcurrentcompactor:117,getconcurrentviewbuild:117,getendpoint:117,getint:14,getinterdcstreamthroughput:117,getlocalhost:[6,30],getlogginglevel:117,getlong:14,getmaxhintwindow:117,getpartition:23,getreplica:117,getse:117,getsstabl:117,getstr:14,getstreamthroughput:117,gettempsstablepath:23,getter:[19,23],gettimeout:117,gettraceprob:117,gib:[62,116,166],gist:23,git:[5,24,26,28],github:[23,24,28,29],give:[18,19,21,28,29,36,52],given:[0,6,11,12,13,14,16,21,28,38,41,49,51,52,58,60,65,67,77,90,99,103,117,123,143,150,154,158,165],global:[6,52,117,141],gmt:21,goal:[6,41],gocassa:32,gocql:32,going:[6,28,41],gone:6,good:[6,23,28,29,30,52],googl:[23,52],gori:30,gossip:[2,6,30,46,50,71,81,105,117,161],gossipinfo:117,gossipingpropertyfilesnitch:[6,50],gossipstag:46,got:6,gp2:43,gpg:34,grace:45,grai:21,grain:49,grammar:[11,12],grant:[6,9,49],grant_permission_stat:12,grant_role_stat:12,granular:[6,87],graph:19,gravesit:11,great:[28,41],greater:[0,6,21,30,50,146,147],greatli:6,green:21,group:[6,10,11,19,41,46,49,50],group_by_claus:13,grow:21,guarante:[0,2,11,13,14,21,28,36,38,41,51,52],guid:[6,26],guidelin:[10,25,43],had:[9,10,41],half:[6,24,30],hand:[6,13,43],handl:[6,14,25,27,28,30,40,43,46,49,80],handoff:[6,46,51,72,106,117,148],handoffwindow:117,hang:28,happen:[6,13,23,24,28,36,41,46,50],happi:28,happili:43,hard:[6,14,41,43],harder:6,hardwar:[6,36,45],has:[0,4,6,10,11,12,13,14,18,19,21,23,28,30,41,43,46,49,50,52],hash:[4,6,41],hashcod:23,haskel:33,hasn:80,have:[0,5,6,9,10,11,12,13,14,15,18,19,21,23,24,25,26,28,29,30,31,34,38,41,42,43,46,49,50,80,139],haven:28,hayt:32,hdd:[6,43],head:28,header:[26,52],headroom:6,heap:[4,6,26,31,36,38,42,43,46],heap_buff:6,heavi:6,heavili:43,held:[6,43,117,121],help:[5,6,10,28,29,35,54,56,117,155],helper:29,henc:[5,6,11,21],here:[6,24,29,30,32,41,46,49],hex:[12,17,101],hexadecim:[10,12,101],hibern:51,hidden:51,hide:[23,25],hierarch:19,hierarchi:19,high:[0,6,30,41,43],higher:[0,19,28,38,41,46,51,119,167],highest:41,highli:[28,30,43,49],hint:[0,6,11,12,30,31,36,45,46,51,72,73,82,83,98,106,117,118,137,148,151,162,169],hintedhandoff:[6,45],hintedhandoffmanag:46,hints_creat:46,hints_directori:31,hints_not_stor:46,hintsdispatch:46,histogram:[41,46,117,120,165],histor:28,histori:[23,59,61,117],hit:[6,41,46],hitrat:46,hoc:29,hold:[0,6,10,13,19,30,41,52],home:[21,52],hope:41,hopefulli:28,host:[6,31,36,46,50,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hostnam:[6,30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hot:[6,46],hotspot:11,hotspotdiagnost:49,hottest:6,hour:[6,21,28,41],hourli:80,how:[0,5,6,7,8,11,12,21,25,26,27,28,29,33,35,36,41,42,46,50,52,80],howev:[6,9,10,11,12,13,15,17,18,21,28,29,30,31,34,38,42,43,49,52],hsha:6,html:6,http:[6,23,24,26,34,46],httpadaptor:46,hub:30,human:[11,62,116,166],hypothet:24,iauthent:6,iauthor:6,icompressor:42,idea:[6,14,27,28,29,30,41,52],ideal:[6,29,41,49],idempot:[13,21],idemptot:21,ident:0,identifi:[6,9,10,11,13,14,15,16,19,20,21],idiomat:8,idl:6,ieee:[17,21],iendpointsnitch:[6,50],ignor:[0,6,10,14,21,23,52,166],iinternodeauthent:6,illeg:14,illustr:19,imag:21,imagin:41,immedi:[6,11,21,28,38,42,57,117],immut:[4,30,42,43],impact:[6,11,25,41,45,49],implement:[6,10,13,14,18,19,23,29,30,40,42,49,50],implementor:6,impli:[11,12,21],implic:[0,49],implicitli:14,import_:52,imposs:41,improv:[0,6,11,21,28,29,38,41,43,50,51,52],inact:30,includ:[4,6,10,11,12,13,18,19,21,23,28,40,41,43,46,49,52,76,133,170],included_categori:76,included_keyspac:76,included_us:76,inclus:[28,132],incom:6,incomingbyt:46,incompat:[6,10],incomplet:25,inconsist:[0,30],incorrect:30,increas:[6,11,30,38,41,42,43,46,50,51,132],increment:[6,10,13,21,28,41,68,78,117,133,139,159],incur:[13,21,46],indent:23,independ:[11,41,43,49],index:[4,6,9,10,11,12,13,15,21,36,41,45,52,117,123],index_build:163,index_identifi:16,index_nam:16,index_summari:163,index_summary_off_heap_memory_us:166,indexclass:16,indexedentrys:46,indexinfocount:46,indexinfoget:46,indexnam:123,indexsummaryoffheapmemoryus:46,indic:[5,6,12,13,23,28,30,132],indirectli:13,individu:[6,10,14,21,28,29,43,49],induc:13,inequ:[10,13],inet:[9,11,14,17,21],inetaddress:[6,30],inexpens:43,infin:[9,10,12],influenc:11,info:[6,31,46,65,117],inform:[4,6,12,13,21,35,49,50,51,52,56,59,85,105,107,108,109,116,117,138,155,157],ingest:6,ingestr:52,inher:[11,21],inherit:19,init:46,initcond:[9,14],initi:[6,14,23,25,40,46,49,52,117,150],initial_token:51,input:[9,10,14,17,21,25,52],inputd:21,inreleas:34,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,19,21,30,33,36,43,52],insert_stat:[12,13],insid:[6,11,12,13,21,23,52],inspect:[6,26,52],instabl:6,instal:[6,20,30,33,36,52],instanc:[6,10,11,12,13,14,16,18,19,20,21,26,29,30,40,41,43,46],instantan:46,instanti:10,instantli:6,instead:[10,11,13,18,21,23,30,41,138,157],instruct:[6,8,11,24,26,36],instrument:49,intasblob:13,integ:[0,10,11,12,13,17,21,46],integr:[27,29,36],intellij:[23,27],intend:[25,49],intens:[6,29,30],intent:25,inter:[6,96,117,149],interact:[29,35,52],interest:[0,41,49],interfac:[6,10,14,23,30,31,42,49],intern:[6,9,11,13,18,21,25,30,43,46],internaldroppedlat:46,internalresponsestag:46,internet:6,internod:[6,30],internode_encrypt:[6,49],internodeconnect:[103,153],internodeus:[103,153],interpret:[10,21,52],interrupt:30,interv:[6,9,46],intra:[6,46,50],intrins:21,introduc:[6,10,17,28,51],introduct:[10,19,29],intvalu:14,invalid:[6,13,19,25,49,108,110,111,112,117],invalidatecountercach:117,invalidatekeycach:117,invalidaterowcach:117,invertedindex:20,investig:6,invoc:14,invok:[24,34,49,171],involv:[6,13,41,42,49],ioerror:23,ip1:6,ip2:6,ip3:6,ip_address:55,ipv4:[6,17,21,30],ipv6:[6,17,21],irc:[5,28,36],irolemanag:6,irrevers:[11,21],isn:[0,18,23,28,30],iso:21,isol:[6,11,13],issu:[0,6,19,24,28,29,30,38,41,42,132],item:[12,21,25,26],iter:[0,6],its:[4,6,11,12,13,14,21,26,30,41,46,49,50,51],itself:[6,11,16,30,34],iv_length:6,jaa:49,jacki:24,jamm:26,januari:21,jar:[14,23,26,46],java7:49,java:[6,14,20,21,23,26,28,33,34,36,40,41,43,46,49,117,155],javaag:26,javadoc:[23,25],javas:6,javascript:[6,14],javax:49,jbod:43,jce8:6,jce:6,jcek:6,jconsol:[36,41,49],jdk:6,jdwp:26,jenkin:[26,29],jetbrain:26,jira:[5,6,25,28,29,40],jkskeyprovid:6,jmc:[41,49],jmx:[6,19,36,45,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],jmx_password:49,jmx_user:49,jmxremot:49,job:[28,57,87,130,132,139,170],job_thread:132,john:[13,21],join:[6,8,13,36,41,49,51,117],joss:13,jpg:21,jsmith:21,json:[9,10,13,15,36,41,42,61,166,168],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,judgement:23,junit:[23,26,29],jurisdict:6,just:[6,14,19,26,28,29,30,41,49],jvm:[6,20,26,30,31,45,49,51],jvm_extra_opt:26,jvm_opt:[31,49],jvmstabilityinspector:25,keep:[6,8,11,23,28,30,41,46,108],keepal:[6,30],kei:[4,6,9,10,13,14,17,21,29,30,34,40,41,42,43,46,49,57,95,99,101,111,117,121,141,142,166],kept:[6,41,46],kernel:[6,30],key_alia:6,key_password:6,key_provid:6,keycach:46,keycachehitr:46,keyserv:34,keyspac:[0,6,9,10,12,14,15,16,19,21,36,38,41,42,45,49,51,52,57,58,60,65,67,76,77,86,87,90,95,99,101,108,117,119,121,122,123,124,130,132,138,139,143,156,157,158,165,166,167,170,171,173],keyspace1:[6,19],keyspace2:6,keyspace_nam:[11,14,19,21,41],keystor:[6,49],keystore_password:6,keystorepassword:49,keyword:[10,11,13,14,15,16,17,21],kib:[62,116,166],kick:[117,134],kill:[6,34],kilobyt:42,kind:[11,12,21,28,40,41],kitten:21,knife:[117,155],know:[6,13,21,23,41],known:[19,21,32,35,38,41],ks_owner:49,ks_user:49,ktlist:156,kundera:32,label:[21,28],lag:46,land:42,landlin:21,lang:[36,46,49],languag:[6,9,10,12,14,20,21,32,35,36,52],larg:[6,11,13,14,21,29,36,41,43,46,52],larger:[6,29,30,41,42,43],largest:[6,46],last:[6,12,13,14,15,28,41,46,55,117],lastli:[13,21],lastnam:13,latenc:[0,6,30,46,50],later:[0,11,21,23,28,30],latest:[0,28,34,41,52,171],latter:12,layer:43,layout:11,lazi:11,lazili:11,lead:[6,10,21,41],learn:[6,29,30,52],least:[0,6,11,12,13,18,30,41,43],leav:[6,12,13,23,29,30,52],left:[6,17,41],legaci:[6,19],legal:10,length:[4,6,10,17,21,25,41],less:[6,21,28,30,38,43],let:[6,41],letter:17,level:[6,10,11,13,19,23,25,31,43,45,46,49,52,97,108,117,150],leveledcompactionstrategi:[11,38,41],lexic:30,lib:[6,20,25,26,34],libqtcassandra:32,librari:[8,25,29,32,46,52],licenc:25,licens:[25,26,28],life:28,lifespan:43,like:[0,6,12,13,14,17,21,23,24,25,28,29,30,36,41,42,43,49],likewis:19,limit:[6,9,10,11,18,19,21,30,40,41,42,49],line:[12,23,28,29,31,34,35,49,53,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],linear:43,linearli:38,link:[6,8,11,12,28,29,34],linux:[6,30],list:[4,5,6,9,10,11,12,13,14,17,26,28,29,31,34,35,36,41,49,51,52,55,57,58,59,60,65,67,73,76,77,83,86,87,90,92,95,99,100,101,103,107,108,114,115,117,119,122,123,124,127,130,131,132,133,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],list_liter:[13,21],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,36,46],listen_address:[31,35,36],listen_interfac:31,listsnapshot:117,liter:[10,12,14,17,52],littl:23,live:[13,36,41,46,51],livediskspaceus:46,livescannedhistogram:46,livesstablecount:46,load:[0,6,11,20,21,36,45,46,49,50,51,109,117,124,132,157],local:[0,6,11,26,28,29,35,43,46,49,50,52,117,126,132,136,169],local_jmx:49,local_on:[0,49,52],local_quorum:[0,52],local_read_count:166,local_read_latency_m:166,local_seri:52,local_write_latency_m:166,localhost:[6,35,49],locat:[6,33,34,42,46,49,50,52,163],lock:[6,30,46],log:[6,11,13,25,29,33,34,36,40,45,46,49,66,70,76,80,97,117,132,135,150,163],log_al:41,logback:31,logger:[23,31,76],logic:[6,20],login:[6,9,19,29,49],lol:21,longer:[6,9,10,30,41,51,57,117],look:[6,12,24,28,29,41,43],lookup:46,loop:23,lose:[6,41,51],loss:[6,21],lost:[41,51],lot:[6,35,36],low:[6,28,117,119],lower:[0,6,11,12,13,19,30,38,41,46,51],lowercas:12,lowest:[28,41],lz4:6,lz4compressor:[6,11,42],macaddr:9,machin:[6,11,29,30,46,49,50,51],made:[6,21,36,38,43,49],magnet:6,magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,19,21,25,26,28,29,30,34,38,41,49,50,51,52,139],mail:[5,28,36],main:[0,14,18,26,30,33,34,49,51,52],main_actor:13,mainli:[6,11],maintain:[6,28],mainten:46,major:[0,10,28,49,60,117],make:[0,6,8,9,20,21,23,26,28,29,30,31,34,41,49,51,52,114,117],man:6,manag:[6,19,26,29,46,49,51,56,117],mandatori:[11,14],mani:[0,6,11,23,25,28,41,42,43,46,49,52,57,60,67,77,80,86,87,132,139,158,170,171],manipul:[12,15,29,36],manual:[6,24,30],map:[6,9,10,11,13,14,17,19,36,46],map_liter:[11,16,19,21],mar:21,mark:[6,19,41,51,71,117],marker:[6,11,12,25,30],match:[6,12,13,14,17,19,46,50],materi:[6,10,11,12,15,36,46,52,117,173],materialized_view_stat:12,matter:[11,30],max:[6,36,41,46,49,52,80,90,98,117,132,143,151],max_hint_window_in_m:51,max_log_s:80,max_map_count:30,max_mutation_size_in_kb:[6,30],max_queue_weight:80,max_thread:6,max_threshold:41,maxattempt:52,maxbatchs:52,maxfiledescriptorcount:46,maxhintwindow:151,maxim:43,maximum:[4,6,14,38,46,52,80,92,117,139,145],maximum_live_cells_per_slice_last_five_minut:166,maximum_tombstones_per_slice_last_five_minut:166,maxinserterror:52,maxoutputs:52,maxparseerror:52,maxpartitions:46,maxpools:46,maxrequest:52,maxrow:52,maxthreshold:143,maxtimeuuid:10,mayb:13,mbean:[6,19,41,46,49],mbeanserv:19,mbp:6,mct:6,mean:[6,9,11,12,13,14,17,18,21,36,41,46,50,52,132],meaning:13,meanpartitions:46,meant:[21,30,46],measur:[6,25,29,46,51,52],mechan:40,median:46,meet:[6,25],megabyt:6,member:23,membership:6,memlock:30,memori:[4,6,11,36,38,41,45],memory_pool:46,memtabl:[2,6,38,40,41,42,43,46,156],memtable_allocation_typ:4,memtable_cell_count:166,memtable_cleanup_threshold:4,memtable_data_s:166,memtable_off_heap_memory_us:166,memtable_switch_count:166,memtablecolumnscount:46,memtableflushwrit:46,memtablelivedatas:46,memtableoffheaps:46,memtableonheaps:46,memtablepool:6,memtablepostflush:46,memtablereclaimmemori:46,memtableswitchcount:46,mention:[6,21,28,46,49],menu:26,mere:23,merg:[24,28,38,42,43,45],mergetool:24,merkl:[6,46],mess:[28,29],messag:[6,21,25,28,34,36,46],met:13,meta:[13,46],metadata:[4,19,42,43,46],metal:6,meter:46,method:[10,13,14,19,23,25,26,29,36,49],metric:[6,45],metricnam:46,metricsreporterconfigfil:46,mib:[6,62,116,166],microsecond:[6,11,13,21,46],midnight:21,might:[6,13,41,46,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],migrat:[6,46,50],migrationstag:46,millisecond:[6,10,21,46,119,139,167],min:[6,30,40,41,46,52,90,117,143],min_sstable_s:41,min_threshold:41,minbatchs:52,mind:6,minim:[6,41,43],minimum:[6,11,14,31,46],minor:[10,12,45],minpartitions:46,minthreshold:143,mintimeuuid:10,minut:[6,21,41,46,80],misbehav:41,misc:[103,153],miscelen:46,miscellan:6,miscstag:46,miss:[11,41,46,51],misslat:46,mistaken:[55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],mitig:[6,49],mix:[6,41],mmap:30,mnt:16,mock:29,mode:[6,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],model:[11,15,19,28,36,49],moder:43,modern:43,modif:[13,19],modifi:[6,9,10,11,14,19,21,28,38,41,42],modification_stat:13,modul:52,modular:25,moment:[6,28],monitor:[30,36,45,49,50,56,117],monkeyspeci:[11,18],monkeyspecies_by_popul:18,month:21,more:[0,4,6,10,11,12,13,21,23,28,29,31,35,36,38,43,45,46,49,50,51,60,86,87,117,119,132,139,155,167,171],moreov:13,most:[6,11,12,13,21,26,28,29,30,31,41,42,43,49,52,59,117,167],mostli:[6,11,21],motiv:[29,41],mount:6,move:[6,28,30,36,40,45,46,117],movement:45,movi:[13,21],movingaverag:6,mtime:11,much:[0,5,6,11,38,41,50],multi:[0,6,12,25],multilin:27,multipl:[4,6,10,11,12,13,14,21,23,25,26,28,30,31,41,43,50,122],multipli:41,murmur3partit:4,murmur3partition:[6,14,52],must:[0,6,10,11,13,14,17,18,19,23,28,29,30,31,41,46,49,51,52,156],mutant:16,mutat:[0,6,13,30,40,46,171],mutationstag:46,mv1:18,mx4j:46,mx4j_address:46,mx4j_port:46,mx4jtool:46,mxbean:19,myaggreg:14,mycolumn:17,mydir:52,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,20],mytrigg:20,nairo:21,name:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,28,29,30,31,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],names_valu:13,nan:[9,10,12],nanosecond:21,nathan:13,nativ:[6,10,12,15,17,25,30,35,46,52,69,79,117,123,160],native_transport_min_thread:6,native_transport_port:31,native_transport_port_ssl:49,native_typ:21,natur:[11,21,23,41,42],nearli:26,neccessari:6,necessari:[6,11,14,19,28,34,42,49],necessarili:[6,12,31],need:[0,6,10,11,12,13,19,21,23,25,26,28,29,30,31,34,35,38,41,42,43,49,50,52,95,99],neg:6,neglig:13,neighbour:41,neither:[18,21,49],neon:26,nerdmovi:[13,16],nest:[12,13,23],net:[6,26,30,33,34,49],netstat:[51,117],network:[6,13,30,43,49,50,116,117,120],networktopologystrategi:[11,49],never:[6,10,11,12,13,14,21,23,30,41],nevertheless:13,new_rol:19,new_superus:49,newargtuplevalu:14,newargudtvalu:14,newer:[41,43,52,87],newest:[11,41],newli:[11,21,28,40,117,124],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[6,30,35,41,52],ngem3b:13,ngem3c:13,nifti:24,nio:[6,14,46],no_pubkei:34,node:[0,4,6,11,13,14,20,21,25,29,31,32,35,36,38,40,41,43,45,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nodej:33,nodetool:[34,36,38,42,45,49,51,53,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nologin:9,non:[6,9,10,11,12,13,14,19,21,30,38,42,46,49,52],none:[6,11,13,21,49],nonsens:19,nor:[11,18,21],norecurs:[9,19],norm:46,normal:[14,17,26,30,34,46,51,52],noschedul:6,nosuperus:[9,19],notabl:[14,17],notat:[10,12,13,52],note:[0,5,6,10,11,12,13,14,15,17,19,21,24,28,30,41,49],noth:[6,11,14,24,29,30],notic:6,notif:8,notion:[11,12],now:[10,23,26,41,51],ntp:6,nullval:52,num_cor:52,num_token:51,number:[0,6,10,11,12,13,14,17,18,21,26,28,29,30,34,38,41,42,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:166,numer:[15,38],numprocess:52,object:[6,11,12,25],objectnam:19,observ:23,obsolet:[6,43,46],obtain:[12,49],obviou:[14,24],obvious:11,occup:13,occupi:[6,46],occur:[10,12,13,20,21,30,41,43,46],occurr:21,octet:[6,50],odd:28,off:[4,6,30,42,46,49,52,117,134],off_heap_memory_used_tot:166,offer:[15,29,42],offheap:[38,43],offheap_buff:6,offheap_object:6,offici:[36,52],offset:[4,46],often:[6,11,12,23,28,29,30,41,42,43,49,50,52,80],ohc:6,ohcprovid:6,okai:23,old:[4,6,41,51,74,84,117],older:[6,14,26,34,41,43,52],oldest:[6,11],omit:[6,10,11,13,17,21,150],onc:[4,6,11,12,14,21,24,26,28,29,30,40,41,42,43,46,49,51,52],one:[0,4,6,9,10,11,12,13,14,17,18,19,21,23,26,28,29,31,36,38,41,43,46,49,50,51,52,57,60,67,77,86,87,103,117,132,139,153,156,158,170,171],oneminutecachehitr:46,ones:[6,11,12,13,14,18,19,46],ongo:[41,51],onli:[0,6,9,11,12,13,14,17,18,19,21,23,28,29,31,36,38,41,42,43,46,49,50,52,132,156,166],onlin:52,only_purge_repaired_tombston:41,onto:[4,41],open:[5,6,26,49,50],openfiledescriptorcount:46,openjdk:34,oper:[0,6,10,11,13,16,18,19,21,23,36,38,40,43,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],operatingsystem:46,opertaion:6,opportun:38,ops:30,opt:14,optim:[6,11,12,13,30,41,43,51],optimis:132,option1_valu:19,option:[4,6,9,10,12,13,14,16,19,21,26,29,30,34,42,43,45,49,51,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],oracl:[6,34,49],order:[0,4,6,9,10,14,18,21,23,28,30,38,40,41,50,51,52],ordering_claus:13,orderpreservingpartition:6,org:[6,14,20,23,26,29,30,34,41,42,46,49],organ:[4,26,32],origin:[9,24,28,139],orign:13,other:[0,4,6,10,12,13,14,18,19,21,24,26,28,31,36,38,41,43,46,49,50,51,117,122,133],other_rol:19,otherwis:[0,9,12,13,16,21,92],our:[5,6,8,24,26,28,41],ourselv:24,out:[6,12,23,26,28,41,46,49,50,51,132],outbound:6,outboundtcpconnect:6,outgo:6,outgoingbyt:46,outlin:49,outofmemoryerror:36,output:[14,19,25,26,38,41,52,60,61,166,168],outsid:[11,20,21],over:[0,6,11,21,30,41,46,49,50,51],overal:14,overflow:[17,139],overhead:[6,30,42,46,51],overidden:49,overlap:[0,41],overload:[6,14,30],overrid:[6,23,49,51,139],overridden:[6,11],overview:[2,36,45],overwhelm:6,overwrit:[42,43],overwritten:[46,87],own:[0,6,11,12,14,21,28,30,34,41,42,46,49,95,101,108,117,171],owner:21,ownership:[41,138],p0000:21,pacif:21,packag:[26,30,31,33,35,52],packet:6,page:[6,21,26,28,29,30,43,46],paged_slic:46,pages:52,pagetimeout:52,pai:23,pair:[6,11,19,21,41,49],parallel:[29,41,132],paramet:[6,14,23,25,26,31,38,43,50,51,117,150],paranoid:6,parenthesi:[11,52],parnew:43,pars:[6,12,40,52],parser:[9,10,40],part:[0,5,6,11,13,14,18,21,25,26,28,29,30,50,51,52],parti:[25,46],partial:4,particip:[0,20],particular:[11,12,13,14,17,19,21,30,43,46,49],particularli:[12,21,49],partit:[4,6,10,13,14,30,38,41,43,46,87,95,99,117,139,167],partition:[4,10,13,14,52,64,117,132],partition_kei:[11,13],partli:13,pass:[25,28,31,52,155],password:[6,9,13,19,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],password_a:19,password_b:19,passwordauthent:[6,49],passwordfilepath:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],past:[6,46],patch:[10,13,23,24,25,27,29,36],path:[5,6,16,25,34,38,41,42,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],patter:19,pattern:[6,19,21],paus:[6,30,117,118],pausehandoff:117,paxo:[13,46,52],peer:[6,46],peerip:46,penalti:[6,13],pend:[41,46,117,131],pending_flush:166,pendingcompact:46,pendingflush:46,pendingrangecalcul:46,pendingtask:46,pendingtasksbytablenam:46,pennsylvania:21,peopl:[28,30],per:[0,4,6,10,11,13,23,24,28,30,38,40,41,42,46,49,52,117,140,148],percent:46,percent_repair:166,percentag:[6,46,50],percentil:46,percentrepair:46,perdiskmemtableflushwriter_0:46,perfect:14,perform:[6,11,13,19,21,24,25,27,30,31,38,41,43,46,49,50,52,132],period:[6,43,46,49,117,119],perman:[11,30,41,43],permiss:[6,9,12,29,49],permit:[6,19,40,49],persist:[4,30,38,43,49],perspect:30,pet:21,pgrep:34,phantom:32,phase:[51,52],phi:6,phone:[13,21],php:33,physic:[0,6,11,30,43,50],pick:[24,28,30,41,49,51,122],pid:[30,34],piec:[12,41,46],pile:6,pin:[6,50],ping:28,pkcs5pad:6,pkill:34,place:[5,6,16,20,23,24,28,40,41,46,49,52,117,124],placehold:[14,52],plai:[14,21],plain:4,plan:[11,24,28],platform:19,platter:[6,43],player:[14,21],playorm:32,pleas:[5,6,11,13,14,15,21,23,26,29,30],plu:[14,41,46],plug:6,pluggabl:[19,49],plugin:46,poe:21,point:[6,10,17,21,23,26,36,49,52,95,117],pointer:14,polici:[6,28,49,171],pool:[6,34,46,117,145,168],popul:[11,18],popular:[26,43],port:[6,26,31,36,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],portion:[43,52],posit:[4,6,10,11,21,38,46,51],possbili:6,possess:19,possibl:[6,10,11,13,14,17,19,21,25,28,29,30,38,41,43,46,49,51],post:[13,117,142],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,25,41,43,49,51,139],power:6,pr3z1den7:21,practic:[11,12,13,49],pre:[6,17,21,43,49],preced:30,precis:[10,17,21,41],precondit:46,predefin:11,predict:13,prefer:[0,6,11,12,21,23,28,49,50],preferipv4stack:26,prefix:[11,12,21],prepar:[6,14,15,46],preparedstatementscount:46,preparedstatementsevict:46,preparedstatementsexecut:46,preparedstatementsratio:46,prepend:21,prerequisit:33,present:[12,13,18,46],preserv:[6,17,19],press:34,pressur:[6,46],pretti:52,prevent:[6,29,40],preview:132,previou:[6,10,11,21,41,51],previous:6,previsouli:[83,117],primari:[9,10,13,14,21,29,40,41,42,49,51],primarili:[6,11],primary_kei:[11,18],print:[52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],prior:[6,13,19,21],prioriti:28,privat:[6,23,49,50],privileg:[19,34,49],probabilist:[38,42],probabl:[6,11,29,38,41,104,117,154],problem:[5,6,14,24,25,30,49],problemat:21,proc:[6,30],proce:[25,42,51],procedur:[13,49],process:[0,6,14,24,25,26,28,29,30,34,40,42,43,46,49,51,52,56,92,117,118,137,145],prod_clust:52,produc:[13,14,41,80],product:[6,28,30,43,50],profil:[13,117,119],profileload:117,program:[14,29],progress:[23,24,28,38,45,117,173],project:[23,29,46],promin:11,prompt:52,propag:[6,11,14,23,25,50],proper:[11,21,30,49],properli:[6,25],properti:[6,11,19,33,40,41,49,50,51],propertyfilesnitch:[6,50],proport:[6,13],proportion:[6,89,117,140],propos:[6,46],protect:[6,43],protocol:[6,25,30,35,46,49,52,59,69,74,79,84,117,160],provid:[0,5,6,11,12,13,14,15,17,21,26,28,35,40,41,42,43,46,49,50,51,53,116,117,127,131],proxim:[6,50],proxyhistogram:117,prv:132,ps1:49,ps22dhd:13,pt89h8m53:21,pull:[29,41,46,132],purg:43,purpos:[11,12,13,21,43,49],push:[24,28,46],put:[15,28,31,41,51,108,132],pwf:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],python:[14,28,29,33,34,52],quak:[14,21],qualifi:[6,11,14,28],qualiti:49,quantiti:21,queri:[6,10,11,12,13,14,16,18,19,33,36,41,46,52,70,80,117,135],question:[8,19,36],queu:[6,46],queue:[6,46,80],quick:[108,171],quickli:[30,41],quill:32,quintana:21,quit:[41,52],quorum:[0,49,52],quot:[9,10,11,12,14,17,19,52],quotat:19,quoted_identifi:12,quoted_nam:11,race:[21,24],rack1:6,rack:[0,6,49,50],rackdc:[6,50],rackinferringsnitch:[6,50],raid0:43,raid1:43,raid5:43,rain:12,rais:[12,30],raison:9,ram:[38,42,43],random:[11,14,30,51],randomli:[0,6,51],randompartition:[6,13,14],rang:[2,6,10,11,13,21,25,41,45,46,52,60,65,103,117,122,132,153],range_slic:46,rangekeysampl:117,rangelat:46,rangemov:51,rangeslic:46,rapid:43,rare:[10,38],raspberri:43,rate:[6,11,46,49,52],ratebasedbackpressur:6,ratefil:52,rather:[13,30,41,43],ratio:[6,42,43,46],raw:[6,14],reach:[6,28,30,40,41],read:[0,6,11,13,21,23,25,29,30,33,36,38,41,42,43,45,46,49,50,52,103,153,166,171],read_lat:166,read_repair:46,read_repair_ch:[0,6,11,41,50],read_request_timeout:30,readabl:[11,62,116,166],readi:[28,49],readlat:46,readrepair:46,readrepairstag:46,readstag:46,readwrit:49,real:[8,11,23,30],realiz:41,realli:[6,29,31],reason:[0,6,13,14,15,30,31,34,41,43,49,51],rebuild:[38,41,42,46,117,123,139],rebuild_index:117,receiv:[6,14,28,30,41,43],recent:[6,28,29,43,59],reclaim:41,recogn:[13,26,28],recommend:[6,11,21,30,43,49,51],recompact:41,recompress:42,reconnect:49,record:[11,13,21,28,41],recov:[6,30,41],recoveri:6,recreat:52,recurs:80,recv:34,recycl:[6,46],redistribut:6,redo:28,reduc:[6,30,41,42,63,89,117,132,140],reduct:6,redund:[0,6,23,25,28,43],reenabl:[79,81,82,117],refactor:40,refer:[6,11,12,13,14,15,21,23,29,30,34,35,52],referenc:6,reflect:41,refresh:[6,49,52,117,125],refreshsizeestim:117,refus:36,regard:[11,13],regardless:[0,6,19,28],regener:38,regexp:12,region:[6,50],regist:21,registri:49,regress:[25,29],regular:[9,12,26,29,30,46,52],regularstatementsexecut:46,reinsert:139,reject:[6,13,30,40,49],rel:[6,21,52],relat:[8,10,12,13,26,28,41,46],releas:[6,10,34,52],relev:[13,19,21,28,42,49],reli:[6,14,21,30,51],reliabl:41,reload:[6,117,126,127,128,129],reloadlocalschema:117,reloadse:117,reloadssl:117,reloadtrigg:117,reloc:[117,130,163],relocatesst:117,remain:[6,13,14,21,24,41,46,51,166],remaind:[17,42],remedi:41,remot:[0,24,26,36,41,49,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],remov:[4,6,10,11,12,13,14,15,17,21,25,30,36,40,45,49,55,58,87,117,131],removenod:[51,55,117],renam:[9,21],reorder:6,repair:[0,4,6,11,30,36,42,45,46,50,51,108,117,133,150,171],repair_admin:117,repeat:[12,34,42,49],replac:[6,9,14,19,21,25,30,36,41,45,80],replace_address_first_boot:51,replai:[0,21,43,46,89,117,134,140],replaybatchlog:117,replic:[2,6,11,36,41,43,49,51,55,117],replica:[0,6,11,13,30,41,46,50,51,63,99,117],replication_factor:[0,11,49],repo:[24,26],report:[28,36,45],report_writ:19,reportfrequ:52,repositori:[5,8,26,28,29,34],repres:[6,10,17,19,21,30,41,46,49,50,52],represent:[10,17],request:[0,6,13,19,20,29,30,38,41,43,45,49,50,52,117,154,170],request_respons:46,requestresponsestag:46,requestschedul:6,requesttyp:46,requir:[0,6,11,13,14,19,23,24,25,26,28,30,38,42,43,49],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15],reset:[6,13,117,136,150],reset_bootstrap_progress:51,resetfullquerylog:117,resetlocalschema:117,resid:[6,13,30,46],resolut:[6,13,30],resolv:[24,30,138,157],resort:[55,117],resourc:[19,49],resp:14,respect:[6,10,14,34,50,80],respond:[0,6,12],respons:[0,6,19,30,46,51],ressourc:21,rest:[6,11,12,21,25,51],restart:[30,41,49,51,117,124,142],restor:[41,51,52],restrict:[10,11,13,18,19],result:[0,6,8,10,11,12,14,17,19,21,28,30,41,46,52],resum:[56,117,137],resumehandoff:117,resurrect:41,resync:[117,136],retain:[30,41],rethrow:23,retri:[0,6,21,46,80],retriev:[11,13,19],reus:25,revers:13,review:[11,23,27,28,29,36],revok:[9,49],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[38,41,42,117,139,170],rewritten:[43,139],rfc:[14,21],rhel:36,rich:21,rider:21,riderresult:21,right:[6,26,30,52],ring:[2,6,36,49,51,52,113,115,117,150],risk:11,rmem_max:6,rmi:[30,49],robin:6,rogu:14,role:[6,9,10,12,15,45],role_a:19,role_admin:19,role_b:19,role_c:19,role_manag:49,role_nam:19,role_opt:19,role_or_permission_stat:12,role_permiss:6,roll:[30,49,80],roll_cycl:80,romain:21,root:[6,24,28,34],rotat:6,roughli:6,round:[6,13,41,46],roundrobin:6,roundrobinschedul:6,rout:[6,50],row:[0,4,6,10,11,13,14,15,17,18,29,35,38,42,43,46,52,87,108,112,117,139,141,142],rowcach:46,rowcachehit:46,rowcachehitoutofrang:46,rowcachemiss:46,rowindexentri:46,rows_per_partit:11,rpc:[6,46],rpc_min:6,rpc_timeout_in_m:[103,153],rsc:171,rubi:[14,33],rule:[6,12,14,28,30],run:[5,6,12,21,24,26,28,30,31,34,41,43,46,49,51,108,117,132,155],runtim:[6,33,97,117],runtimeexcept:23,rust:33,safe:[6,14,21,41,49],safeguard:43,safeti:[41,51],sai:36,said:[11,28,30,117,170],same:[0,5,6,11,12,13,14,15,17,18,19,21,24,26,28,31,36,38,41,46,49,50,132],sampl:[4,6,12,14,46,52,80,117,119,121,167],sampler:[46,119,167],san:43,sandbox:[6,14],sasi:6,satisfi:[0,23,43,46,51],satur:[6,46],save:[6,13,21,30,31,38,42,43,51,117,142],saved_cach:6,saved_caches_directori:31,sbin:30,scala:[14,33],scalar:15,scale:[6,29,42],scan:[6,13,38,46],scenario:24,scene:30,schedul:6,schema:[0,9,11,14,17,46,52,64,117,126,136],schema_own:19,scope:[19,46,49],score:[6,14,21,50],script:[6,14,26,29,80],scrub:[38,41,42,46,117,163],search:28,second:[6,11,12,13,21,30,40,43,49,52,117,140,148],secondari:[10,12,13,15,36,41,46,117,123],secondary_index_stat:12,secondaryindexmanag:46,section:[2,5,7,10,11,12,13,15,19,21,30,33,34,35,41,46,49,51,53],secur:[6,14,15,36,45],see:[0,4,6,10,11,12,13,14,17,19,21,26,28,35,36,40,41,46,49,51,52,87,117,132],seed:[6,31,36,50,100,117,127],seedprovid:6,seek:[6,43,46],seen:[6,11],segment:[4,6,40,46,52,80],select:[6,9,10,11,12,14,15,19,26,29,30,35,38,41,49,52,122],select_claus:13,select_stat:[12,18],self:25,selinux:30,semant:[10,13,14],semi:30,send:[6,8,30],sens:[6,10,13,15,30],sensic:14,sensit:[11,12,14,17],sensor:21,sent:[0,6,21,30,46],separ:[4,6,11,13,23,28,31,41,43,49,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],seq:[6,132],sequenc:12,sequenti:[6,43,132],seren:13,seri:[11,41,52],serial:6,serializingcacheprovid:6,serv:[13,43,49],server:[6,12,13,21,26,29,30,43,46,49],server_encryption_opt:49,servic:[6,26,34,49,51],session:[6,19,49,117,133],set:[0,6,9,10,11,12,13,14,17,18,25,27,28,29,31,36,38,40,41,42,43,46,49,50,51,52,57,76,87,117,130,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,170],set_liter:21,setbatchlogreplaythrottl:117,setcachecapac:117,setcachekeystosav:117,setcompactionthreshold:[41,117],setcompactionthroughput:[41,117],setconcurr:117,setconcurrentcompactor:117,setconcurrentviewbuild:117,sethintedhandoffthrottlekb:117,setint:14,setinterdcstreamthroughput:117,setlogginglevel:117,setlong:14,setmaxhintwindow:117,setstr:14,setstreamthroughput:117,setter:[19,23],settimeout:117,settraceprob:117,setup:[28,29,49],sever:[4,13,19,41,49],sfunc:[9,14],sha:24,shadow:41,share:[11,13,26],sharedpool:52,sharp:32,shed:30,shell:[35,36,53],shift:21,ship:[29,35,49,52],shortcut:18,shorter:49,shorthand:52,should:[0,5,6,10,11,12,13,14,17,19,21,25,26,28,29,30,31,32,33,35,38,41,42,43,46,49,50,51,52,122,132,153],shouldn:11,show:[19,36,51,65,85,105,117,121,131,138,157,158,166,173],shown:[12,52,166],shrink:6,shut:6,shutdown:[6,43],side:[11,13,17,21,49],sign:[13,21,30],signal:[117,128],signatur:[34,40],signific:[6,26,28,29,43],significantli:6,silent:14,similar:[6,13,14,42,43],similarli:[0,10,17,23,43,117,122],simpl:[6,11,26,29,49],simple_classnam:29,simple_select:13,simplequerytest:29,simplereplicationstrategi:49,simpleseedprovid:6,simplesnitch:[6,50],simplestrategi:11,simpli:[0,6,11,13,14,17,21,26,29,41,43,46,51,171],simul:29,simultan:[6,43,52,57,87,130,139,170],sinc:[6,11,13,14,21,26,30,34,41,46,51],singl:[0,6,10,11,12,13,14,17,18,19,21,23,28,31,35,36,45,46,49,50,52,60],singleton:25,situat:[6,29,41],size:[4,6,11,21,23,30,31,38,40,42,43,45,46,49,52,80,114,117],size_estim:[117,125],sizetieredcompactionstrategi:[11,41],sjk:117,skip:[6,13,46,51,52,139,156],skipcol:52,skiprow:52,sks:34,sla:25,slash:12,slf4j:23,slightli:6,slow:[6,50],slower:[6,11,38],slowest:6,slowli:[6,21],small:[6,11,13,21,30,41,43],smaller:[6,30,41,43,52],smallest:[0,11,14,46],smallint:[9,10,14,17,21],smith:21,smoother:10,smoothli:6,snappi:6,snappycompressor:[11,42],snapshot:[6,26,46,58,114,117,139],snapshot_nam:58,snapshotnam:[58,117],snitch:[6,36,45,64,117],socket:[6,49,153],sole:11,solid:[6,43],some:[0,6,9,11,12,13,14,21,26,28,29,30,31,40,41,42,46,49,51,52],some_funct:14,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[24,41],sometim:[6,12,13],someudt:14,somewher:34,soon:49,sooner:6,sort:[4,11,13,21,41,43,166],sort_kei:166,sourc:[5,6,8,14,27,34,46,122],source_elaps:52,space:[6,23,30,40,41,43,46],space_used_by_snapshots_tot:166,space_used_l:166,space_used_tot:166,span:[6,13,41],sparingli:13,spark:32,spec:[25,35,46,52],speci:[11,18],special:[12,13,29,30,41,46],specif:[6,9,11,12,13,19,21,26,28,30,32,40,41,46,49,52,117,122,132],specifc:46,specifi:[0,6,10,11,12,13,14,16,18,19,21,26,30,35,40,41,42,46,49,51,52,58,60,101,117,122,132,138,151,153,156,163,166,169],specific_dc:132,specific_host:132,specific_keyspac:122,specific_sourc:122,specific_token:122,specul:[0,46],speculativeretri:46,speed:[6,36],spent:46,spike:30,spin:[6,43],spindl:6,spirit:[6,50],split:[23,30,41,46,52,60],spread:[6,50],sql:[13,15],squar:12,squash:28,src:122,ssd:[6,16,43],ssl:[6,30,45,52,117,128],ssl_storage_port:50,sss:17,sstabl:[2,6,11,30,38,42,43,45,57,60,87,101,108,114,117,124,130,139,170,171],sstable_compression_ratio:166,sstable_count:166,sstable_s:41,sstable_size_in_mb:41,sstableexpiredblock:41,sstablesperreadhistogram:46,sstablewrit:23,stabil:28,stabl:[34,52],stack:6,stage:[28,92,117,145],stai:[36,41],stale:49,stall:[6,51],stand:[6,29],standalon:29,standard:[6,21,30,34,46],start:[0,6,9,13,27,30,31,34,36,41,43,46,49,51,60,132,163],start_token:[60,132],start_token_1:122,start_token_2:122,start_token_n:122,starter:28,startup:[6,20,26,30,41,46,51],starvat:6,state:[6,14,38,41,43,46,51,117,157],statement:[6,9,10,11,13,14,15,16,17,19,20,21,25,27,28,38,41,46,49,52],static0:11,static1:11,statist:[4,41,46,52,62,88,117,120,165,166,168],statu:[19,25,28,30,34,52,117,131,158,159,160,161,162,171],statusautocompact:117,statusbackup:117,statusbinari:117,statusgossip:117,statushandoff:117,stc:11,stdin:52,stdout:52,step:[6,26,31,49],still:[0,6,10,13,14,17,21,23,49,51,52],stop:[6,34,52,75,117,135,164],stop_commit:6,stop_paranoid:6,stopdaemon:117,storag:[2,11,15,16,28,30,36,42,43,45],storage_port:[31,50],storageservic:[6,23],store:[0,4,6,10,11,12,13,21,36,38,41,42,43,46,49,52,72,80,82,117,162],store_typ:6,straight:51,straightforward:40,strategi:[0,6,11,45,50],stream:[4,6,36,41,42,45,56,96,102,117,122,132,149,150,152,153],street:21,strength:6,strict:[10,41],strictli:[8,11,14],string:[6,10,11,12,13,14,16,17,19,20,21,46,52,101],strong:0,strongli:[6,11,12,49],structur:[4,6,9,19,25,38,46],stub:49,style:[6,25,26,27,28,29,36],stype:[9,14],sub:[11,13,21,34,41],subclass:6,subdirectori:[6,20],subject:[6,14,49],submiss:[6,28],submit:[28,29,36,60],subscrib:8,subscript:8,subsequ:[6,13,30,41,42],subset:[19,41,52],substitut:34,subsystem:49,subvert:41,succed:46,succesfulli:46,success:[0,52],sudden:6,sudo:[30,34],suffici:[6,43],suggest:[12,28,43],suit:[6,28,29,49],suitabl:[13,14,25,28],sum:40,summari:[4,6,46],sun:[23,49],sunx509:6,supercolumn:9,supersed:[10,139],superus:[9,19,49],suppli:[13,24],support:[0,6,9,10,11,12,13,14,15,16,18,19,21,28,29,30,32,36,41,49,52,139,163],suppos:13,sure:[6,8,23,26,28,29,30,31,34,41],surplu:30,surpris:0,surprisingli:6,surround:[17,52],suscept:14,suspect:[5,28],suspend:26,swamp:30,swap:6,swiss:[117,155],symmetri:17,symptom:30,sync:[6,30,46,132],synchron:6,synonym:19,synopsi:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],syntact:[11,19],syntax:[10,12,13,14,19,21,41,42],sys:6,sysctl:30,sysintern:6,system:[6,11,14,19,29,30,31,35,41,43,46,49,52,91,93,94,96,102,108,117,124,125,126,144,146,147,149,152],system_auth:[6,49],system_trac:132,tab:23,tabl:[0,4,6,9,10,12,13,14,15,16,17,18,19,20,21,29,38,41,42,45,49,52,57,60,67,75,77,86,87,90,95,99,108,117,123,124,126,130,132,139,143,156,158,163,165,166,170,171],table1:19,table_nam:[11,13,16,19,20,41,166],table_opt:[11,18],tablehistogram:117,tablestat:117,tag:[21,25,28,156],take:[6,10,11,13,14,21,25,26,28,30,38,41,42,43,51,117,156],taken:[6,40,41,46],tar:34,tarbal:[31,33,52],target:[11,19,26,29,41],task:[6,26,28,46,52],tcp:[6,30],tcp_keepalive_intvl:30,tcp_keepalive_prob:30,tcp_keepalive_tim:30,tcp_nodelai:6,tcp_wmem:6,teach:[6,50],team:30,technetwork:6,technic:[11,15],technot:6,tee:34,tell:[6,13,25,30,31,46],temporari:49,temporarili:6,tenanc:6,tend:[6,30,43],tendenc:6,terabyt:42,term:[6,13,14,15,18,21],termin:[12,52],ternari:23,test:[6,8,23,25,27,28,35,36,43,52],test_keyspac:49,testabl:[25,28],testbatchandlist:29,testmethod1:29,testmethod2:29,testsom:29,teststaticcompactt:29,text:[4,9,11,12,13,14,17,21,40,42,49],than:[0,6,11,12,13,14,15,18,21,23,28,36,41,42,43,49,50,51,133,146,147],thei:[6,9,10,11,12,13,14,15,18,19,21,23,25,28,29,36,38,41,42,43,46,49],them:[6,10,11,13,14,21,23,28,29,30,35,38,41,46,49,117,170],themselv:[13,19],theoret:11,therefor:[28,29,49],thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,19,21,23,24,25,26,28,29,30,31,33,34,36,38,40,41,42,43,46,49,50,51,52,53,54,55,57,58,60,63,65,67,73,77,83,86,87,89,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],thing:[6,21,24,28,30,33,41],think:6,third:[21,25,46],thobb:52,those:[11,12,13,14,16,17,18,19,21,28,30,40,41,49,52,170],though:[6,10,12,21,36,41,42,46],thousand:52,thousandssep:52,thread:[6,43,46,49,57,87,117,130,132,139,148,168,170],threadpool:45,threadpoolnam:46,threadprioritypolici:26,three:[0,6,38,41,42,49,52],threshold:[4,40,43,50,90,117,143,150],thrift:[6,9,11,15,30,46],throttl:[6,89,117,140,144,148,149,152],throttle_limit:6,through:[0,5,9,10,11,12,13,26,28,30,35,40,41,52],throughout:49,throughput:[0,6,41,42,43,46,91,96,102,117,144,149,152],throwabl:[25,29],thrown:21,thu:[6,10,11,12,13,18,21,30,46,50,51,117,170],thumb:[6,28],thusli:21,tib:[62,116,166],ticket:[5,24,25,28,29,40],tie:30,tier:45,ties:13,tighter:6,tightli:6,tild:52,time:[0,6,8,9,10,11,12,13,15,16,17,18,23,25,26,28,29,30,38,40,42,45,46,49,52,117,119],timehorizon:6,timelin:11,timeout:[6,21,30,46,52,103,117,153],timeout_in_m:153,timeout_typ:[103,153],timer:[6,46],timestamp:[4,9,10,11,13,14,15,17,36,41,52,139],timeunit:41,timeuuid:[9,10,11,17,21],timewindowcompactionstrategi:11,timezon:[17,52],tini:[6,41],tinyint:[9,10,14,17,21],tjake:23,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,toc:4,todai:12,todat:14,todo:[25,29],togeth:[6,11,13,14,41],toggl:49,tojson:15,token:[2,4,6,9,10,12,13,30,41,46,52,60,65,108,109,115,117,122,132,138,171],toler:38,tom:13,tombston:[4,6,11,17,30,45,46,87,139],tombstone_compact:163,tombstone_compaction_interv:41,tombstone_threshold:41,tombstonescannedhistogram:46,ton:29,too:[6,11,12,14,21,25,41],tool:[6,12,28,30,36,41,46,49,51],top:[13,21,28,36,46,119,166,167],topcount:[119,167],topic:52,topolog:[6,50,138],toppartit:117,total:[6,13,40,41,46,114,117],totalblockedtask:46,totalcommitlogs:46,totalcompactionscomplet:46,totaldiskspaceus:46,totalhint:46,totalhintsinprogress:46,totallat:46,totimestamp:14,touch:[8,30,41],tough:29,tounixtimestamp:14,tour:21,toward:11,tpstat:117,trace:[6,46,104,117,132,154],track:[6,41,46],tracker:28,tradeoff:[0,6],tradit:[41,42],traffic:[6,50],trail:23,transact:[13,20,46,163],transfer:[6,30,49],transform:13,transit:[10,19],translat:6,transpar:[6,30],transport:[6,26,46,69,79,117,160],treat:[0,6,10,30,50],tree:[6,26,46],tri:41,trigger:[4,6,9,12,15,36,38,42,45,57,117,129],trigger_nam:20,trigger_stat:12,trip:[6,13],trivial:49,troubleshoot:[25,36],truediskspaceus:[114,117],truesnapshotss:46,truli:9,truncat:[6,9,10,15,19,103,117,153,169],truncate_stat:12,truncatehint:117,trunk:[24,25,26,28],trust:49,trustor:6,truststor:[6,49],truststore_password:6,truststorepassword:49,tserverfactori:6,ttl:[4,6,9,10,11,14,17,21,45,139],tty:52,tunabl:2,tune:[30,38,41,43],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:21,tuplevalu:[10,14],turn:[0,6,28,30,49],twc:[11,41],twice:[6,21],two:[0,6,11,12,13,14,17,26,36,38,41,43,49,50,52],txt:[4,14,24,25,28],type:[0,6,10,11,12,13,14,15,19,25,34,36,43,45,49,52,103,117,153,163],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,30,38,41,43,46,49,52],ubuntu:26,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:21,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:30,unabl:[6,25,36],unaffect:21,unavail:[6,11,46,49,51],unblock:46,unbound:21,unchecked_tombstone_compact:41,uncom:[6,46,49],uncommon:28,uncompress:[6,42,46],undelet:41,under:[6,21,23,29,46,49],underli:[6,18,41,49],understand:[6,28,30],unencrypt:[6,49],unexpectedli:21,unfinishedcommit:46,unflush:[40,156],unfortun:29,uniqu:[11,14,21],unit:[21,25,27,41,117,141],unixtimestampof:[10,14],unless:[6,11,13,16,18,19,21,23,40,49,50],unlik:[6,10,13,21],unlimit:[6,30,52],unlog:9,unnecessari:[25,51],unnecessarili:40,unpredict:13,unprepar:46,unquot:12,unquoted_identifi:12,unquoted_nam:11,unrel:28,unreleas:28,unrepair:45,unsecur:49,unset:[6,10,13,17],unsign:21,unspecifi:6,unsubscrib:[8,36],untar:34,until:[0,6,21,38,40,41,42,49,50],unus:6,unusu:25,updat:[6,9,10,11,12,14,15,17,18,19,21,25,28,29,34,36,41,42,46,49,52],update_paramet:13,update_stat:[12,13],upgrad:[6,41,117,170],upgrade_sst:163,upgradesst:[38,41,42,117],upload:28,upon:[6,21,38,42],upper:[12,17,41,49],ups:43,upstream:28,uptim:[109,117],url:24,usag:[4,6,11,21,36,38,40,42,46,52],use:[6,9,10,11,12,13,14,16,17,18,19,21,23,25,26,28,29,31,34,35,36,38,40,41,43,46,49,50,51,52,57,87,100,117,119,130,139,167,170],use_stat:12,usecas:41,useconcmarksweepgc:26,usecondcardmark:26,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,21,25,26,28,29,30,41,43,46,49,50,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useecassandra:49,useful:[0,6,11,14,28,41,42,46,51,52,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useparnewgc:26,user1:13,user2:13,user3:13,user4:13,user:[5,6,8,9,10,11,12,13,15,16,17,18,25,28,30,34,38,41,42,43,49,52,60,76,117],user_count:13,user_defined_typ:21,user_funct:19,user_nam:13,user_occup:13,user_opt:19,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],uses:[0,4,6,11,12,13,14,16,19,20,29,30,49],usethreadprior:26,using:[4,6,10,11,12,13,14,18,19,21,26,28,29,33,34,35,36,38,42,43,45,46,49,51,52,60,122,139,156],usr:52,usual:[6,13,21,24,29,38,49,132],utc:[17,52],utd:11,utf8:[21,52],utf8typ:9,utf:52,util:[14,25,41,52],uuid:[9,10,11,12,17,21],val0:11,val1:11,val:14,valid:[6,10,11,12,13,14,17,21,30,41,42,46,49,52,132,139,163],validationexecutor:46,valu:[6,9,10,11,12,13,14,16,17,21,25,26,30,38,41,46,49,50,52,76,104,108,117,140,144,146,147,148,149,151,152,153,154],value1:13,value2:13,value_in_kb_per_sec:[140,148],value_in_m:151,value_in_mb:[144,149,152],valueof:14,varchar:[9,11,14,17,21],vari:[6,42],variabl:[6,10,12,17,21,26,33],variant:12,varieti:40,varint:[9,11,14,17,21],variou:[26,29,43,49],veri:[6,11,13,28,29,30,38,41,42,43],verifi:[28,30,32,34,42,108,117,163],version:[5,6,9,11,14,15,21,26,28,32,34,41,46,51,59,64,74,84,117,170,171],vertic:52,via:[6,8,10,19,25,30,31,41,42,43,46,49,50],view:[6,10,11,12,15,19,36,46,52,94,117,147,173],view_build:163,view_nam:18,viewbuildstatu:117,viewlockacquiretim:46,viewmutationstag:46,viewpendingmut:46,viewreadtim:46,viewreplicasattempt:46,viewreplicassuccess:46,viewwrit:46,viewwritelat:46,virtual:[0,6,30,41,46,51],visibl:[11,19,23,38],vnode:[6,42],volum:[6,40,42],vulner:[6,49],wai:[4,6,12,15,17,18,21,24,26,29,30,41,42,132],wait:[0,6,11,28,30,46,117,134],waitingoncommit:46,waitingonfreememtablespac:46,waitingonsegmentalloc:46,want:[6,11,13,26,28,29,30,49,51],warmup:[117,142],warn:[6,11,23,29,45,132],washington:21,wasn:10,wast:6,watch:29,weaker:0,websit:[29,34],week:21,weight:[6,46,80],welcom:8,well:[6,11,13,14,17,21,25,26,40,42,43,49,50,117,135],went:46,were:[6,9,10,19,25,26,41,46],what:[11,13,21,27,29,31,36,41,43,49,52],whatev:[10,13,30],whedon:13,when:[4,6,9,10,11,12,13,14,15,16,17,19,21,23,25,28,29,31,36,38,40,42,43,45,46,49,50,51,52,55,57,58,60,63,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],where:[0,4,6,9,10,11,12,14,16,17,18,19,21,25,29,31,34,38,41,42,49,51,52,80,132],where_claus:13,wherea:[21,49],whether:[0,6,9,11,13,26,41,50,52,80],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,28,29,30,31,34,35,38,40,41,42,43,46,49,50,51,60,95,99,108,114,117,122,132],whichev:[0,6],whitelist:49,whitespac:27,who:[19,28,30],whole:[6,11,13,14,21,41],whose:[11,21,163],why:[25,28,36],wide:[4,40],width:12,wiki:[6,26],wildcard:[13,19],window:[0,6,45,46,49,98,106,117,151],winner:30,wip:[26,28],wipe:[30,51],wire:30,wise:11,wish:[6,41,46],within:[0,4,6,11,12,13,16,28,30,41,43,46,49],withing:6,without:[6,11,12,13,14,19,21,24,26,28,29,30,40,43,46,49,52,55,108,117,124],wmem_max:6,won:[6,13,24],wont:41,word:[10,11,12,18,19,21,30],work:[6,10,11,14,15,17,23,24,26,27,29,30,41,43,46,49,50,51,52],worker:52,workload:[6,25,38,41,43],workspac:26,worktre:26,worri:[28,30],wors:[6,50],worst:[6,28],worthwhil:6,would:[6,12,13,14,17,19,26,28,29,36,41,42,43,49,50],wrap:50,write:[0,4,6,10,11,13,21,23,25,29,30,40,41,42,43,46,49,50,51,52,75,103,117,153,166],write_lat:166,write_request_timeout:30,writelat:46,writer:[6,23],writetim:[9,14],writetimeoutexcept:6,written:[4,6,20,30,38,41,42,46],wrong:6,wrte:46,www:[6,11,34],xlarg:43,xml:31,xmn220m:26,xms1024m:26,xmx1024m:26,xmx:43,xss256k:26,xvf:34,yaml:[6,14,31,34,46,49,50,51,61,76,80,117,135,166,168],year:[13,21],yes:[9,11,49],yet:[11,46],yield:[13,51],you:[5,6,8,10,11,12,13,14,16,17,18,20,21,23,24,26,27,29,30,31,32,33,34,35,36,41,46,49,50,51,52,55,117,156],younger:14,your:[0,5,6,8,10,11,12,23,26,28,29,30,31,34,36,41,43,49,50,52],yourself:[24,29],yyyi:[17,21],z_0:[11,16,18],zero:[6,10,30,46,50],zip:21,zipcod:21,zone:[6,21,50],zzzzz:28},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs and Contributing","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Security","Triggers","Data Types","Data Modeling","Code Style","How-to Commit","Review Checklist","Building and IDE Integration","Cassandra Development","Contributing Code Changes","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","cqlsh: the CQL shell","Cassandra Tools","Nodetool","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","Troubleshooting"],titleterms:{"class":50,"function":[13,14,17],"import":[23,108],"long":29,"new":30,"static":11,"switch":41,Adding:51,IDE:26,IDEs:23,LCS:41,TLS:49,The:[11,13,15,17,41],USE:11,Use:42,Uses:42,Using:26,Will:30,With:49,access:49,add:30,address:30,advanc:42,after:51,aggreg:14,alias:13,all:[19,30],alloc:51,allocate_tokens_for_keyspac:6,allow:13,alter:[11,18,19,21],ani:30,apach:36,appendic:9,appendix:9,architectur:2,ask:30,assassin:55,assign:51,auth:49,authent:[6,19,49],author:[6,49],auto_snapshot:6,automat:19,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:37,batch:[13,30],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,befor:28,benefit:42,binari:34,blob:[14,30],bloom:38,boilerpl:23,bootstrap:[30,41,51,56],branch:28,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:46,bug:[5,28],build:26,bulk:[30,39],cach:[11,46,49],call:30,can:30,captur:[40,52],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,26,27,29,30,31,34,36,40,45,49,53],cast:14,cdc:40,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,chang:[10,28,30,31,38,40,41],characterist:21,checklist:25,choic:43,choos:28,circleci:29,claus:13,cleanup:[51,57],clear:52,clearsnapshot:58,client:[32,35,46,49],client_encryption_opt:6,clientstat:59,clojur:32,cloud:43,cluster:[11,30],cluster_nam:6,code:[23,28],collect:[21,41],column:11,column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[26,41,52],comment:12,commit:24,commit_failure_polici:6,commitlog:[4,46],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:40,common:[11,41,43],compact:[9,11,41,46,60],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:61,compactionstat:62,compactionstrategi:41,compat:52,compress:[11,42],concern:41,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_writ:6,condition:19,configur:[6,7,31,40,42],connect:30,consider:11,consist:[0,52],constant:12,contact:8,contribut:[5,28],control:19,convent:[12,23],convers:14,copi:52,count:14,counter:[13,21],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:43,cql:[9,15,46,52],cqlsh:[35,52],cqlshrc:52,creat:[11,14,16,18,19,20,21,28],credenti:19,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:29,custom:21,cython:52,data:[11,13,17,19,21,22,30,40,41,51],data_file_directori:6,databas:19,date:21,dead:51,debian:34,debug:26,decommiss:63,defin:[14,21],definit:[11,12],defragment:41,delet:[13,30,41],depend:52,describ:[52,65],describeclust:64,detail:41,detect:0,develop:27,dies:30,directori:[31,41],disabl:40,disableauditlog:66,disableautocompact:67,disablebackup:68,disablebinari:69,disablefullquerylog:70,disablegossip:71,disablehandoff:72,disablehintsfordc:73,disableoldprotocolvers:74,disk:[30,43],disk_failure_polici:6,disk_optimization_strategi:6,document:36,doe:30,drain:75,driver:[32,35],drop:[9,11,14,16,18,19,20,21,30],droppedmessag:46,dtest:29,durat:21,dynam:50,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:30,eclips:26,email:30,enabl:[40,49],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_user_defined_funct:6,enableauditlog:76,enableautocompact:77,enablebackup:78,enablebinari:79,enablefullquerylog:80,enablegossip:81,enablehandoff:82,enablehintsfordc:83,enableoldprotocolvers:84,encod:17,encrypt:49,endpoint_snitch:6,engin:4,entri:30,environ:31,erlang:32,error:30,even:30,except:23,exist:30,exit:52,expand:52,experiment:6,expir:41,factor:30,fail:[30,51],failur:[0,30],failuredetector:85,featur:6,file:[6,23,34],file_cache_size_in_mb:6,filedescriptorratio:46,filter:[13,38],fix:28,flush:86,format:23,frequent:30,from:[26,30,34,52],fromjson:17,fulli:41,further:40,garbag:41,garbagecollect:87,garbagecollector:46,gc_grace_second:41,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:88,gener:23,get:33,getbatchlogreplaythrottl:89,getcompactionthreshold:90,getcompactionthroughput:91,getconcurr:92,getconcurrentcompactor:93,getconcurrentviewbuild:94,getendpoint:95,getinterdcstreamthroughput:96,getlogginglevel:97,getmaxhintwindow:98,getreplica:99,getse:100,getsstabl:101,getstreamthroughput:102,gettimeout:103,gettraceprob:104,give:30,gossip:0,gossipinfo:105,grace:41,grant:19,group:13,guarante:1,handl:23,handoffwindow:106,hang:51,happen:30,hardwar:43,haskel:32,heap:30,help:[52,107],hint:44,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:46,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,host:[30,52],how:[24,30],idea:26,identifi:12,impact:42,incremental_backup:6,index:[16,46],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:109,initial_token:6,insert:[13,17,35],instal:34,integr:[26,49],intellij:26,inter:49,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[19,49],internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:110,invalidatekeycach:111,invalidaterowcach:112,irc:8,java:[30,32],jconsol:30,jmx:[30,41,46,49],join:[30,113],json:17,jvm:46,kei:[11,16,18],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,30,46],keyword:[9,12],lang:30,languag:15,larg:30,level:[0,41],limit:13,line:[26,52],list:[8,19,21,30],listen:30,listen_address:[6,30],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:114,liter:21,live:30,load:[30,39],locat:31,log:[30,31,41],login:52,lot:30,made:30,mail:8,main:31,major:41,manipul:13,manual:51,map:[16,21,30],materi:18,max:[14,30],max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:30,memori:[30,43,46],memorypool:46,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:41,messag:30,method:30,metric:46,min:14,minor:41,mintimeuuid:14,model:22,monitor:[46,51],more:[30,41],move:[51,115],movement:51,multilin:23,nativ:[14,21],native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:32,netstat:116,networktopologystrategi:0,newer:26,node:[30,49,51],nodej:32,nodetool:[30,41,54,117],noteworthi:21,now:14,num_token:6,one:30,onli:30,oper:[30,41,42,45],option:[11,18,41,52],order:[11,13],otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[11,30],outofmemoryerror:30,overview:[3,40],packag:34,page:52,paramet:[13,40,41],partit:11,partition:6,password:49,patch:28,pausehandoff:118,perform:29,permiss:19,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:32,pick:0,point:30,port:30,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:34,primari:[11,18],profileload:119,progress:51,project:26,properti:31,proxyhistogram:120,python:32,pytz:52,queri:[15,35],question:30,rang:[0,51],range_request_timeout_in_m:6,rangekeysampl:121,read:[40,47],read_request_timeout_in_m:6,rebuild:122,rebuild_index:123,refresh:124,refreshsizeestim:125,refus:30,releas:28,reloadlocalschema:126,reloadse:127,reloadssl:128,reloadtrigg:129,relocatesst:130,remot:30,remov:[41,51],removenod:131,repair:[41,47,48,132],repair_admin:133,repair_session_max_tree_depth:6,replac:51,replaybatchlog:134,replic:[0,30],report:[5,30,46],request:46,request_schedul:6,request_scheduler_id:6,request_scheduler_opt:6,request_timeout_in_m:6,reserv:9,resetfullquerylog:135,resetlocalschema:136,result:13,resum:51,resumehandoff:137,revers:11,review:25,revok:19,rhel:30,right:28,ring:[0,30,138],role:[19,49],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpc_max_thread:6,rpc_min_thread:6,rpc_port:6,rpc_recv_buff_size_in_byt:6,rpc_send_buff_size_in_byt:6,rpc_server_typ:6,rubi:32,run:29,runtim:31,rust:32,safeti:6,sai:30,same:30,saved_caches_directori:6,scala:32,scalar:14,scrub:139,secondari:16,secur:[19,49],see:30,seed:30,seed_provid:6,select:[13,17,18],selector:13,serial:52,server_encryption_opt:6,session:52,set:[19,21,26,30],setbatchlogreplaythrottl:140,setcachecapac:141,setcachekeystosav:142,setcompactionthreshold:143,setcompactionthroughput:144,setconcurr:145,setconcurrentcompactor:146,setconcurrentviewbuild:147,sethintedhandoffthrottlekb:148,setinterdcstreamthroughput:149,setlogginglevel:150,setmaxhintwindow:151,setstreamthroughput:152,settimeout:153,settraceprob:154,setup:26,share:52,shell:52,show:[30,52],signatur:14,simplestrategi:0,singl:[30,41],size:41,sjk:155,slow_query_log_timeout_in_m:6,snapshot:156,snapshot_before_compact:6,snitch:50,sourc:[26,52],special:52,speed:30,ssl:49,ssl_storage_port:6,sstabl:[4,41,46],sstable_preemptive_open_interval_in_mb:6,stai:30,standard:49,start:[26,28,33],start_native_transport:6,start_rpc:6,starv:41,statement:[12,18,23],statu:157,statusautocompact:158,statusbackup:159,statusbinari:160,statusgossip:161,statushandoff:162,stc:41,stop:163,stopdaemon:164,storag:[4,9,46],storage_port:6,store:30,strategi:41,stream:[30,46,51],stream_throughput_outbound_megabits_per_sec:6,streaming_keep_alive_period_in_sec:6,stress:29,style:23,sum:14,support:17,tabl:[11,40,46],tablehistogram:165,tablestat:166,tarbal:34,term:12,test:[26,29],than:30,thei:30,though:30,threadpool:46,threshold:6,thrift_framed_transport_size_in_mb:6,thrift_prepared_statements_cache_size_mb:6,tick:28,tier:41,time:[14,21,41],timestamp:[21,30],timeuuid:14,timewindowcompactionstrategi:41,tock:28,todo:[0,1,3,4,11,22,37,39,44,47,48,54],tojson:17,token:[0,14,51],tombston:41,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[29,53],top:30,toppartit:167,tpstat:168,trace:52,tracetype_query_ttl:6,tracetype_repair_ttl:6,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[20,41],troubleshoot:174,truncat:11,truncate_request_timeout_in_m:6,truncatehint:169,ttl:[13,41],tunabl:0,tupl:21,two:30,type:[9,17,21,41,46],udt:21,unabl:30,unit:[26,29],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:41,unsubscrib:30,updat:[13,30],upgradesst:170,usag:[30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],use:30,user:[14,19,21],using:[30,41],uuid:14,variabl:31,verifi:171,version:[10,52,172],view:18,viewbuildstatu:173,warn:40,welcom:36,what:[28,30],when:[30,41],where:13,whitespac:23,why:[30,41],window:41,windows_timer_interv:6,without:41,work:[21,28],write_request_timeout_in_m:6,writetim:13,yaml:40,you:28}}) \ No newline at end of file diff --git a/src/doc/3.11.5/tools/cqlsh.html b/src/doc/3.11.5/tools/cqlsh.html deleted file mode 100644 index 8cf7c4e78..000000000 --- a/src/doc/3.11.5/tools/cqlsh.html +++ /dev/null @@ -1,481 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/index.html b/src/doc/3.11.5/tools/index.html deleted file mode 100644 index 1cddfdbc3..000000000 --- a/src/doc/3.11.5/tools/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool.html b/src/doc/3.11.5/tools/nodetool.html deleted file mode 100644 index 680bab2d7..000000000 --- a/src/doc/3.11.5/tools/nodetool.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-

Todo

-

Try to autogenerate this from Nodetool’s help.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/assassinate.html b/src/doc/3.11.5/tools/nodetool/assassinate.html deleted file mode 100644 index 6410bb10b..000000000 --- a/src/doc/3.11.5/tools/nodetool/assassinate.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/bootstrap.html b/src/doc/3.11.5/tools/nodetool/bootstrap.html deleted file mode 100644 index 8a22c24dc..000000000 --- a/src/doc/3.11.5/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/cleanup.html b/src/doc/3.11.5/tools/nodetool/cleanup.html deleted file mode 100644 index 76cc6fc48..000000000 --- a/src/doc/3.11.5/tools/nodetool/cleanup.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/clearsnapshot.html b/src/doc/3.11.5/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 5b77d1299..000000000 --- a/src/doc/3.11.5/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/clientstats.html b/src/doc/3.11.5/tools/nodetool/clientstats.html deleted file mode 100644 index 9b2686abb..000000000 --- a/src/doc/3.11.5/tools/nodetool/clientstats.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/compact.html b/src/doc/3.11.5/tools/nodetool/compact.html deleted file mode 100644 index 724b1869b..000000000 --- a/src/doc/3.11.5/tools/nodetool/compact.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/compactionhistory.html b/src/doc/3.11.5/tools/nodetool/compactionhistory.html deleted file mode 100644 index 00cc28a9d..000000000 --- a/src/doc/3.11.5/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/compactionstats.html b/src/doc/3.11.5/tools/nodetool/compactionstats.html deleted file mode 100644 index fe3d961e1..000000000 --- a/src/doc/3.11.5/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/decommission.html b/src/doc/3.11.5/tools/nodetool/decommission.html deleted file mode 100644 index 7437554df..000000000 --- a/src/doc/3.11.5/tools/nodetool/decommission.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/describecluster.html b/src/doc/3.11.5/tools/nodetool/describecluster.html deleted file mode 100644 index 71830f8f3..000000000 --- a/src/doc/3.11.5/tools/nodetool/describecluster.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/describering.html b/src/doc/3.11.5/tools/nodetool/describering.html deleted file mode 100644 index 478691631..000000000 --- a/src/doc/3.11.5/tools/nodetool/describering.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disableauditlog.html b/src/doc/3.11.5/tools/nodetool/disableauditlog.html deleted file mode 100644 index 4113782da..000000000 --- a/src/doc/3.11.5/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disableautocompaction.html b/src/doc/3.11.5/tools/nodetool/disableautocompaction.html deleted file mode 100644 index b29c5024e..000000000 --- a/src/doc/3.11.5/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablebackup.html b/src/doc/3.11.5/tools/nodetool/disablebackup.html deleted file mode 100644 index 587990cbe..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablebinary.html b/src/doc/3.11.5/tools/nodetool/disablebinary.html deleted file mode 100644 index acea2fe4f..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablefullquerylog.html b/src/doc/3.11.5/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index 3d276dc55..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablegossip.html b/src/doc/3.11.5/tools/nodetool/disablegossip.html deleted file mode 100644 index c282f286e..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablehandoff.html b/src/doc/3.11.5/tools/nodetool/disablehandoff.html deleted file mode 100644 index ee5b00a13..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disablehintsfordc.html b/src/doc/3.11.5/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index d30151d88..000000000 --- a/src/doc/3.11.5/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/disableoldprotocolversions.html b/src/doc/3.11.5/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index d2554c468..000000000 --- a/src/doc/3.11.5/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/drain.html b/src/doc/3.11.5/tools/nodetool/drain.html deleted file mode 100644 index 5639ec9d0..000000000 --- a/src/doc/3.11.5/tools/nodetool/drain.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enableauditlog.html b/src/doc/3.11.5/tools/nodetool/enableauditlog.html deleted file mode 100644 index 6fea074ba..000000000 --- a/src/doc/3.11.5/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enableautocompaction.html b/src/doc/3.11.5/tools/nodetool/enableautocompaction.html deleted file mode 100644 index ff52b1ac9..000000000 --- a/src/doc/3.11.5/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablebackup.html b/src/doc/3.11.5/tools/nodetool/enablebackup.html deleted file mode 100644 index 30a9a3766..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablebinary.html b/src/doc/3.11.5/tools/nodetool/enablebinary.html deleted file mode 100644 index fa95ae920..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablefullquerylog.html b/src/doc/3.11.5/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index 55ea454ab..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablegossip.html b/src/doc/3.11.5/tools/nodetool/enablegossip.html deleted file mode 100644 index 363acdd43..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablehandoff.html b/src/doc/3.11.5/tools/nodetool/enablehandoff.html deleted file mode 100644 index 48d72e243..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enablehintsfordc.html b/src/doc/3.11.5/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 6a4125163..000000000 --- a/src/doc/3.11.5/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/enableoldprotocolversions.html b/src/doc/3.11.5/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index b235ee1e0..000000000 --- a/src/doc/3.11.5/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/failuredetector.html b/src/doc/3.11.5/tools/nodetool/failuredetector.html deleted file mode 100644 index 2a4c0408f..000000000 --- a/src/doc/3.11.5/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/flush.html b/src/doc/3.11.5/tools/nodetool/flush.html deleted file mode 100644 index 4f8e38241..000000000 --- a/src/doc/3.11.5/tools/nodetool/flush.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/garbagecollect.html b/src/doc/3.11.5/tools/nodetool/garbagecollect.html deleted file mode 100644 index bcdb231ba..000000000 --- a/src/doc/3.11.5/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/gcstats.html b/src/doc/3.11.5/tools/nodetool/gcstats.html deleted file mode 100644 index c9e554479..000000000 --- a/src/doc/3.11.5/tools/nodetool/gcstats.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/3.11.5/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index 88c3718fb..000000000 --- a/src/doc/3.11.5/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getcompactionthreshold.html b/src/doc/3.11.5/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index d69d990a5..000000000 --- a/src/doc/3.11.5/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getcompactionthroughput.html b/src/doc/3.11.5/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index f79a1cfd9..000000000 --- a/src/doc/3.11.5/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getconcurrency.html b/src/doc/3.11.5/tools/nodetool/getconcurrency.html deleted file mode 100644 index e41aa7d60..000000000 --- a/src/doc/3.11.5/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getconcurrentcompactors.html b/src/doc/3.11.5/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index 35b3964c8..000000000 --- a/src/doc/3.11.5/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/3.11.5/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index fcbeaa6f5..000000000 --- a/src/doc/3.11.5/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getendpoints.html b/src/doc/3.11.5/tools/nodetool/getendpoints.html deleted file mode 100644 index aee201736..000000000 --- a/src/doc/3.11.5/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/3.11.5/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 4f715b6a4..000000000 --- a/src/doc/3.11.5/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getlogginglevels.html b/src/doc/3.11.5/tools/nodetool/getlogginglevels.html deleted file mode 100644 index fff0ada6b..000000000 --- a/src/doc/3.11.5/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getmaxhintwindow.html b/src/doc/3.11.5/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 43ea4600d..000000000 --- a/src/doc/3.11.5/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getreplicas.html b/src/doc/3.11.5/tools/nodetool/getreplicas.html deleted file mode 100644 index 6034c6ce7..000000000 --- a/src/doc/3.11.5/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getseeds.html b/src/doc/3.11.5/tools/nodetool/getseeds.html deleted file mode 100644 index 76b427501..000000000 --- a/src/doc/3.11.5/tools/nodetool/getseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getsstables.html b/src/doc/3.11.5/tools/nodetool/getsstables.html deleted file mode 100644 index d24190405..000000000 --- a/src/doc/3.11.5/tools/nodetool/getsstables.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/getstreamthroughput.html b/src/doc/3.11.5/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 8ebc88b59..000000000 --- a/src/doc/3.11.5/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/gettimeout.html b/src/doc/3.11.5/tools/nodetool/gettimeout.html deleted file mode 100644 index ac7a8bf72..000000000 --- a/src/doc/3.11.5/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/gettraceprobability.html b/src/doc/3.11.5/tools/nodetool/gettraceprobability.html deleted file mode 100644 index 2e886a4ee..000000000 --- a/src/doc/3.11.5/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/gossipinfo.html b/src/doc/3.11.5/tools/nodetool/gossipinfo.html deleted file mode 100644 index 9028186c0..000000000 --- a/src/doc/3.11.5/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/handoffwindow.html b/src/doc/3.11.5/tools/nodetool/handoffwindow.html deleted file mode 100644 index e9c30c696..000000000 --- a/src/doc/3.11.5/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/help.html b/src/doc/3.11.5/tools/nodetool/help.html deleted file mode 100644 index 95b2af745..000000000 --- a/src/doc/3.11.5/tools/nodetool/help.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/import.html b/src/doc/3.11.5/tools/nodetool/import.html deleted file mode 100644 index da2fa89ad..000000000 --- a/src/doc/3.11.5/tools/nodetool/import.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/info.html b/src/doc/3.11.5/tools/nodetool/info.html deleted file mode 100644 index 340055d5b..000000000 --- a/src/doc/3.11.5/tools/nodetool/info.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/invalidatecountercache.html b/src/doc/3.11.5/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 6425a7a6e..000000000 --- a/src/doc/3.11.5/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/invalidatekeycache.html b/src/doc/3.11.5/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index c951ae795..000000000 --- a/src/doc/3.11.5/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/invalidaterowcache.html b/src/doc/3.11.5/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index 09cdada7e..000000000 --- a/src/doc/3.11.5/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/join.html b/src/doc/3.11.5/tools/nodetool/join.html deleted file mode 100644 index c89611f03..000000000 --- a/src/doc/3.11.5/tools/nodetool/join.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/listsnapshots.html b/src/doc/3.11.5/tools/nodetool/listsnapshots.html deleted file mode 100644 index a21734bdb..000000000 --- a/src/doc/3.11.5/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size. True size is the total size of all SSTables which
-        are not backed up to disk. Size on disk is total size of the snapshot on
-        disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/move.html b/src/doc/3.11.5/tools/nodetool/move.html deleted file mode 100644 index 47a47f7ac..000000000 --- a/src/doc/3.11.5/tools/nodetool/move.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/netstats.html b/src/doc/3.11.5/tools/nodetool/netstats.html deleted file mode 100644 index e60575898..000000000 --- a/src/doc/3.11.5/tools/nodetool/netstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/nodetool.html b/src/doc/3.11.5/tools/nodetool/nodetool.html deleted file mode 100644 index a725ce96f..000000000 --- a/src/doc/3.11.5/tools/nodetool/nodetool.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Nodetool" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-u <username> | –username <username>)]
-
[(-h <host> | –host <host>)] [(-p <port> | –port <port>)] -[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-pp | –print-port)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

sjk - Run commands of ‘Swiss Java Knife’. Run ‘nodetool sjk –help’ for more information.

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/pausehandoff.html b/src/doc/3.11.5/tools/nodetool/pausehandoff.html deleted file mode 100644 index 643ad210f..000000000 --- a/src/doc/3.11.5/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/profileload.html b/src/doc/3.11.5/tools/nodetool/profileload.html deleted file mode 100644 index 29aab9ee7..000000000 --- a/src/doc/3.11.5/tools/nodetool/profileload.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/proxyhistograms.html b/src/doc/3.11.5/tools/nodetool/proxyhistograms.html deleted file mode 100644 index d9a349bad..000000000 --- a/src/doc/3.11.5/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/rangekeysample.html b/src/doc/3.11.5/tools/nodetool/rangekeysample.html deleted file mode 100644 index cfee0cbf4..000000000 --- a/src/doc/3.11.5/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/rebuild.html b/src/doc/3.11.5/tools/nodetool/rebuild.html deleted file mode 100644 index 34f6800cc..000000000 --- a/src/doc/3.11.5/tools/nodetool/rebuild.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/rebuild_index.html b/src/doc/3.11.5/tools/nodetool/rebuild_index.html deleted file mode 100644 index 7a1cc237f..000000000 --- a/src/doc/3.11.5/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/refresh.html b/src/doc/3.11.5/tools/nodetool/refresh.html deleted file mode 100644 index 45adf9021..000000000 --- a/src/doc/3.11.5/tools/nodetool/refresh.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/refreshsizeestimates.html b/src/doc/3.11.5/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index c7a27a4bd..000000000 --- a/src/doc/3.11.5/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/reloadlocalschema.html b/src/doc/3.11.5/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 06d8264b3..000000000 --- a/src/doc/3.11.5/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/reloadseeds.html b/src/doc/3.11.5/tools/nodetool/reloadseeds.html deleted file mode 100644 index 2e1c5f600..000000000 --- a/src/doc/3.11.5/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/reloadssl.html b/src/doc/3.11.5/tools/nodetool/reloadssl.html deleted file mode 100644 index 8943ab643..000000000 --- a/src/doc/3.11.5/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/reloadtriggers.html b/src/doc/3.11.5/tools/nodetool/reloadtriggers.html deleted file mode 100644 index 1f65365c6..000000000 --- a/src/doc/3.11.5/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/relocatesstables.html b/src/doc/3.11.5/tools/nodetool/relocatesstables.html deleted file mode 100644 index e198f62cd..000000000 --- a/src/doc/3.11.5/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/removenode.html b/src/doc/3.11.5/tools/nodetool/removenode.html deleted file mode 100644 index 84bc0bd25..000000000 --- a/src/doc/3.11.5/tools/nodetool/removenode.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/repair.html b/src/doc/3.11.5/tools/nodetool/repair.html deleted file mode 100644 index 52e79cda3..000000000 --- a/src/doc/3.11.5/tools/nodetool/repair.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends (inclusive)
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-            (exclusive)
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/repair_admin.html b/src/doc/3.11.5/tools/nodetool/repair_admin.html deleted file mode 100644 index 80c7966ff..000000000 --- a/src/doc/3.11.5/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/replaybatchlog.html b/src/doc/3.11.5/tools/nodetool/replaybatchlog.html deleted file mode 100644 index 430fc2715..000000000 --- a/src/doc/3.11.5/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/resetfullquerylog.html b/src/doc/3.11.5/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index f9953f297..000000000 --- a/src/doc/3.11.5/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/resetlocalschema.html b/src/doc/3.11.5/tools/nodetool/resetlocalschema.html deleted file mode 100644 index b81fa4f21..000000000 --- a/src/doc/3.11.5/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/resumehandoff.html b/src/doc/3.11.5/tools/nodetool/resumehandoff.html deleted file mode 100644 index 95ce71708..000000000 --- a/src/doc/3.11.5/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/ring.html b/src/doc/3.11.5/tools/nodetool/ring.html deleted file mode 100644 index c68a2355a..000000000 --- a/src/doc/3.11.5/tools/nodetool/ring.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/scrub.html b/src/doc/3.11.5/tools/nodetool/scrub.html deleted file mode 100644 index 98d982a4d..000000000 --- a/src/doc/3.11.5/tools/nodetool/scrub.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/3.11.5/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index a2291b07c..000000000 --- a/src/doc/3.11.5/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setcachecapacity.html b/src/doc/3.11.5/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 3cec5e861..000000000 --- a/src/doc/3.11.5/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setcachekeystosave.html b/src/doc/3.11.5/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index d9b309d5e..000000000 --- a/src/doc/3.11.5/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setcompactionthreshold.html b/src/doc/3.11.5/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index 0a7238d62..000000000 --- a/src/doc/3.11.5/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setcompactionthroughput.html b/src/doc/3.11.5/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index f49dc40a1..000000000 --- a/src/doc/3.11.5/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setconcurrency.html b/src/doc/3.11.5/tools/nodetool/setconcurrency.html deleted file mode 100644 index 5f5744610..000000000 --- a/src/doc/3.11.5/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setconcurrentcompactors.html b/src/doc/3.11.5/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index d591494db..000000000 --- a/src/doc/3.11.5/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/3.11.5/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 152368ff3..000000000 --- a/src/doc/3.11.5/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/3.11.5/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index d846a6478..000000000 --- a/src/doc/3.11.5/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/3.11.5/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index dbded373b..000000000 --- a/src/doc/3.11.5/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setlogginglevel.html b/src/doc/3.11.5/tools/nodetool/setlogginglevel.html deleted file mode 100644 index 2ec39f784..000000000 --- a/src/doc/3.11.5/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setmaxhintwindow.html b/src/doc/3.11.5/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 6dcb90fd8..000000000 --- a/src/doc/3.11.5/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/setstreamthroughput.html b/src/doc/3.11.5/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index 7e30792c5..000000000 --- a/src/doc/3.11.5/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/settimeout.html b/src/doc/3.11.5/tools/nodetool/settimeout.html deleted file mode 100644 index b661f4a0e..000000000 --- a/src/doc/3.11.5/tools/nodetool/settimeout.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/settraceprobability.html b/src/doc/3.11.5/tools/nodetool/settraceprobability.html deleted file mode 100644 index 819879ca9..000000000 --- a/src/doc/3.11.5/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/sjk.html b/src/doc/3.11.5/tools/nodetool/sjk.html deleted file mode 100644 index d38b60a72..000000000 --- a/src/doc/3.11.5/tools/nodetool/sjk.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/snapshot.html b/src/doc/3.11.5/tools/nodetool/snapshot.html deleted file mode 100644 index 546aa0cbb..000000000 --- a/src/doc/3.11.5/tools/nodetool/snapshot.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/status.html b/src/doc/3.11.5/tools/nodetool/status.html deleted file mode 100644 index 1e02018a4..000000000 --- a/src/doc/3.11.5/tools/nodetool/status.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/statusautocompaction.html b/src/doc/3.11.5/tools/nodetool/statusautocompaction.html deleted file mode 100644 index fe3b8b10d..000000000 --- a/src/doc/3.11.5/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/statusbackup.html b/src/doc/3.11.5/tools/nodetool/statusbackup.html deleted file mode 100644 index 8be437023..000000000 --- a/src/doc/3.11.5/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/statusbinary.html b/src/doc/3.11.5/tools/nodetool/statusbinary.html deleted file mode 100644 index 95ddc2aaa..000000000 --- a/src/doc/3.11.5/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/statusgossip.html b/src/doc/3.11.5/tools/nodetool/statusgossip.html deleted file mode 100644 index c2d75abf2..000000000 --- a/src/doc/3.11.5/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/statushandoff.html b/src/doc/3.11.5/tools/nodetool/statushandoff.html deleted file mode 100644 index d04369b62..000000000 --- a/src/doc/3.11.5/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/stop.html b/src/doc/3.11.5/tools/nodetool/stop.html deleted file mode 100644 index 5b9e6d834..000000000 --- a/src/doc/3.11.5/tools/nodetool/stop.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/stopdaemon.html b/src/doc/3.11.5/tools/nodetool/stopdaemon.html deleted file mode 100644 index b56509d74..000000000 --- a/src/doc/3.11.5/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/tablehistograms.html b/src/doc/3.11.5/tools/nodetool/tablehistograms.html deleted file mode 100644 index 524c13399..000000000 --- a/src/doc/3.11.5/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/tablestats.html b/src/doc/3.11.5/tools/nodetool/tablestats.html deleted file mode 100644 index a08e5684d..000000000 --- a/src/doc/3.11.5/tools/nodetool/tablestats.html +++ /dev/null @@ -1,167 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/toppartitions.html b/src/doc/3.11.5/tools/nodetool/toppartitions.html deleted file mode 100644 index 9d3047fb5..000000000 --- a/src/doc/3.11.5/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/tpstats.html b/src/doc/3.11.5/tools/nodetool/tpstats.html deleted file mode 100644 index b5987c827..000000000 --- a/src/doc/3.11.5/tools/nodetool/tpstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/truncatehints.html b/src/doc/3.11.5/tools/nodetool/truncatehints.html deleted file mode 100644 index 222e1cadc..000000000 --- a/src/doc/3.11.5/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/upgradesstables.html b/src/doc/3.11.5/tools/nodetool/upgradesstables.html deleted file mode 100644 index 56254630d..000000000 --- a/src/doc/3.11.5/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/verify.html b/src/doc/3.11.5/tools/nodetool/verify.html deleted file mode 100644 index 868e5773b..000000000 --- a/src/doc/3.11.5/tools/nodetool/verify.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/version.html b/src/doc/3.11.5/tools/nodetool/version.html deleted file mode 100644 index 2970d1fca..000000000 --- a/src/doc/3.11.5/tools/nodetool/version.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/tools/nodetool/viewbuildstatus.html b/src/doc/3.11.5/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index c0c640b2c..000000000 --- a/src/doc/3.11.5/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.5/troubleshooting/index.html b/src/doc/3.11.5/troubleshooting/index.html deleted file mode 100644 index 71eed5665..000000000 --- a/src/doc/3.11.5/troubleshooting/index.html +++ /dev/null @@ -1,100 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/.buildinfo b/src/doc/3.11.6/.buildinfo deleted file mode 100644 index acda372ba..000000000 --- a/src/doc/3.11.6/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 90094b50d4537672b7882be6977dad58 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/3.11.6/_images/eclipse_debug0.png b/src/doc/3.11.6/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug1.png b/src/doc/3.11.6/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug2.png b/src/doc/3.11.6/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug3.png b/src/doc/3.11.6/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug4.png b/src/doc/3.11.6/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug5.png b/src/doc/3.11.6/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/3.11.6/_images/eclipse_debug6.png b/src/doc/3.11.6/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/3.11.6/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/3.11.6/_sources/architecture/dynamo.rst.txt b/src/doc/3.11.6/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index a7dbb8750..000000000 --- a/src/doc/3.11.6/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,139 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and availability through *Consistency Levels*. -Essentially, an operation's consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this: - -- Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded - within a specified time window. -- Based on ``read_repair_chance`` and ``dclocal_read_repair_chance`` (part of a table's schema), read requests may be - randomly sent to all replicas in order to repair potentially inconsistent data. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels that are high enough to overlap, resulting in "strong" -consistency. This is typically expressed as ``W + R > RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/3.11.6/_sources/architecture/guarantees.rst.txt b/src/doc/3.11.6/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/3.11.6/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/3.11.6/_sources/architecture/index.rst.txt b/src/doc/3.11.6/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/3.11.6/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/3.11.6/_sources/architecture/overview.rst.txt b/src/doc/3.11.6/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/3.11.6/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/3.11.6/_sources/architecture/storage_engine.rst.txt b/src/doc/3.11.6/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index e4114e5af..000000000 --- a/src/doc/3.11.6/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -.. todo:: todo - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. diff --git a/src/doc/3.11.6/_sources/bugs.rst.txt b/src/doc/3.11.6/_sources/bugs.rst.txt deleted file mode 100644 index 240cfd495..000000000 --- a/src/doc/3.11.6/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs and Contributing -=============================== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``#cassandra`` :ref:`IRC channel `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/3.11.6/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/3.11.6/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index f205f7d30..000000000 --- a/src/doc/3.11.6/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,1911 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -*Default Value:* KEYSPACE - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using. - -Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down gossip and Thrift and kill the JVM, so the node can be replaced. - -stop - shut down gossip and Thrift, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``thrift_prepared_statements_cache_size_mb`` --------------------------------------------- - -Maximum size of the Thrift prepared statement cache - -If you do not use Thrift at all, it is safe to leave this value at "auto". - -See description of 'prepared_statements_cache_size_mb' above for more information. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync`` ------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic" or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.) - - -*Default Value:* batch - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -*Default Value:* 2 - -``commitlog_sync`` ------------------- - -the other option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_max_tree_depth`` ---------------------------------- -*This option is commented out by default.* - -Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs. - -The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -*Default Value:* 18 - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``start_rpc`` -------------- - -Whether to start the thrift rpc server. - -*Default Value:* false - -``rpc_address`` ---------------- - -The address or interface to bind the Thrift RPC service and native transport -server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``rpc_port`` ------------- - -port for Thrift to listen for clients on - -*Default Value:* 9160 - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``rpc_server_type`` -------------------- - -Cassandra provides two out-of-the-box options for the RPC Server: - -sync - One thread per thrift connection. For a very large number of clients, memory - will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size - per thread, and that will correspond to your use of virtual memory (but physical memory - may be limited depending on use of stack space). - -hsha - Stands for "half synchronous, half asynchronous." All thrift clients are handled - asynchronously using a small number of threads that does not vary with the amount - of thrift clients (and thus scales well to many clients). The rpc requests are still - synchronous (one thread per active request). If hsha is selected then it is essential - that rpc_max_threads is changed from the default value of unlimited. - -The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory. - -Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it. - -*Default Value:* sync - -``rpc_min_threads`` -------------------- -*This option is commented out by default.* - -Uncomment rpc_min|max_thread to set request pool size limits. - -Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all). - -The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently. - - -*Default Value:* 16 - -``rpc_max_threads`` -------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``rpc_send_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -uncomment to set socket buffer sizes on rpc connections - -``rpc_recv_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``thrift_framed_transport_size_in_mb`` --------------------------------------- - -Frame size for thrift (maximum message length). - -*Default Value:* 15 - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations - -*Default Value:* 10000 - -``slow_query_log_timeout_in_ms`` --------------------------------- - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero and read_repair_chance is < 1.0, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``request_scheduler`` ---------------------- - -request_scheduler -- Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below. - -*Default Value:* org.apache.cassandra.scheduler.NoScheduler - -``request_scheduler_options`` ------------------------------ -*This option is commented out by default.* - -Scheduler Options vary based on the type of scheduler - -NoScheduler - Has no options - -RoundRobin - throttle_limit - The throttle_limit is the number of in-flight - requests per client. Requests beyond - that limit are queued up until - running requests can complete. - The value of 80 here is twice the number of - concurrent_reads + concurrent_writes. - default_weight - default_weight is optional and allows for - overriding the default which is 1. - weights - Weights are optional and will default to 1 or the - overridden default_weight. The weight translates into how - many requests are handled during each turn of the - RoundRobin, based on the scheduler id. - - -*Default Value (complex option)*:: - - # throttle_limit: 80 - # default_weight: 5 - # weights: - # Keyspace1: 1 - # Keyspace2: 5 - -``request_scheduler_id`` ------------------------- -*This option is commented out by default.* -request_scheduler_id -- An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace. - -*Default Value:* keyspace - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack - -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client/server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_warn_threshold_in_ms`` ---------------------------- - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``enable_materialized_views`` ------------------------------ - - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* true - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* true diff --git a/src/doc/3.11.6/_sources/configuration/index.rst.txt b/src/doc/3.11.6/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/3.11.6/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/3.11.6/_sources/contactus.rst.txt b/src/doc/3.11.6/_sources/contactus.rst.txt deleted file mode 100644 index 8d0f5dd04..000000000 --- a/src/doc/3.11.6/_sources/contactus.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _irc-channels: - -IRC ---- - -To chat with developers or users in real-time, join our channels on `IRC freenode `__. The -following channels are available: - -- ``#cassandra`` - for user questions and general discussions. -- ``#cassandra-dev`` - strictly for questions or discussions related to Cassandra development. -- ``#cassandra-builds`` - results of automated test builds. - diff --git a/src/doc/3.11.6/_sources/cql/appendices.rst.txt b/src/doc/3.11.6/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/3.11.6/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/3.11.6/_sources/cql/changes.rst.txt b/src/doc/3.11.6/_sources/cql/changes.rst.txt deleted file mode 100644 index 1eee5369a..000000000 --- a/src/doc/3.11.6/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,204 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to -inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/3.11.6/_sources/cql/ddl.rst.txt b/src/doc/3.11.6/_sources/cql/ddl.rst.txt deleted file mode 100644 index 302777544..000000000 --- a/src/doc/3.11.6/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,649 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE Excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -- ``'SimpleStrategy'``: A simple strategy that defines a replication factor for the whole cluster. The only sub-options - supported is ``'replication_factor'`` to define that replication factor and is mandatory. -- ``'NetworkTopologyStrategy'``: A replication strategy that allows to set the replication factor independently for - each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is - the associated replication factor. - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records' - AND read_repair_chance = 1.0; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, c, d) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see `www.datastax.com/dev/blog/thrift-to-cql3 -`__ for more details) and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair_chance`` | *simple* | 0.1 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) for | -| | | | the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``dclocal_read_repair_chance`` | *simple* | 0 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) | -| | | | belonging to the same data center than the read | -| | | | coordinator for the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table' - AND read_repair_chance = 0.2; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/3.11.6/_sources/cql/definitions.rst.txt b/src/doc/3.11.6/_sources/cql/definitions.rst.txt deleted file mode 100644 index d4a5b59b9..000000000 --- a/src/doc/3.11.6/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,232 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/3.11.6/_sources/cql/dml.rst.txt b/src/doc/3.11.6/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/3.11.6/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/3.11.6/_sources/cql/functions.rst.txt b/src/doc/3.11.6/_sources/cql/functions.rst.txt deleted file mode 100644 index 47026cd94..000000000 --- a/src/doc/3.11.6/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,558 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Time conversion functions -````````````````````````` - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/3.11.6/_sources/cql/index.rst.txt b/src/doc/3.11.6/_sources/cql/index.rst.txt deleted file mode 100644 index 00d90e41e..000000000 --- a/src/doc/3.11.6/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do **not** refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL). - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/3.11.6/_sources/cql/indexes.rst.txt b/src/doc/3.11.6/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/3.11.6/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/3.11.6/_sources/cql/json.rst.txt b/src/doc/3.11.6/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/3.11.6/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/3.11.6/_sources/cql/mvs.rst.txt b/src/doc/3.11.6/_sources/cql/mvs.rst.txt deleted file mode 100644 index aabea10d8..000000000 --- a/src/doc/3.11.6/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,166 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. diff --git a/src/doc/3.11.6/_sources/cql/security.rst.txt b/src/doc/3.11.6/_sources/cql/security.rst.txt deleted file mode 100644 index 099fcc48e..000000000 --- a/src/doc/3.11.6/_sources/cql/security.rst.txt +++ /dev/null @@ -1,502 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/3.11.6/_sources/cql/triggers.rst.txt b/src/doc/3.11.6/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/3.11.6/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/3.11.6/_sources/cql/types.rst.txt b/src/doc/3.11.6/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/3.11.6/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/3.11.6/_sources/data_modeling/index.rst.txt b/src/doc/3.11.6/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/3.11.6/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/3.11.6/_sources/development/code_style.rst.txt b/src/doc/3.11.6/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/3.11.6/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/3.11.6/_sources/development/how_to_commit.rst.txt b/src/doc/3.11.6/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index d956c72d8..000000000 --- a/src/doc/3.11.6/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``—atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/3.11.6/_sources/development/how_to_review.rst.txt b/src/doc/3.11.6/_sources/development/how_to_review.rst.txt deleted file mode 100644 index dc9774362..000000000 --- a/src/doc/3.11.6/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,71 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/3.11.6/_sources/development/ide.rst.txt b/src/doc/3.11.6/_sources/development/ide.rst.txt deleted file mode 100644 index 298649576..000000000 --- a/src/doc/3.11.6/_sources/development/ide.rst.txt +++ /dev/null @@ -1,161 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -.. note:: - - `Bleeding edge development snapshots `_ of Cassandra are available from Jenkins continuous integration. - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/3.11.6/_sources/development/index.rst.txt b/src/doc/3.11.6/_sources/development/index.rst.txt deleted file mode 100644 index aefc5999c..000000000 --- a/src/doc/3.11.6/_sources/development/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Development -********************* - -.. toctree:: - :maxdepth: 2 - - ide - testing - patches - code_style - how_to_review - how_to_commit diff --git a/src/doc/3.11.6/_sources/development/patches.rst.txt b/src/doc/3.11.6/_sources/development/patches.rst.txt deleted file mode 100644 index e3d968fab..000000000 --- a/src/doc/3.11.6/_sources/development/patches.rst.txt +++ /dev/null @@ -1,125 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue tagged with the `low hanging fruit label `_ in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our `community page `_. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -3.x Tick-tock (see below) -3.0 Bug fixes only -2.2 Bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -Tick-Tock Releases -"""""""""""""""""" - -New releases created as part of the `tick-tock release process `_ will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk. - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: ``patch by X; reviewed by Y for CASSANDRA-ZZZZZ`` - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/3.11.6/_sources/development/testing.rst.txt b/src/doc/3.11.6/_sources/development/testing.rst.txt deleted file mode 100644 index b8eea6b28..000000000 --- a/src/doc/3.11.6/_sources/development/testing.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -TODO: `CASSANDRA-12365 `_ - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/3.11.6/_sources/faq/index.rst.txt b/src/doc/3.11.6/_sources/faq/index.rst.txt deleted file mode 100644 index d985e3716..000000000 --- a/src/doc/3.11.6/_sources/faq/index.rst.txt +++ /dev/null @@ -1,298 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/3.11.6/_sources/getting_started/configuring.rst.txt b/src/doc/3.11.6/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index 27fac7872..000000000 --- a/src/doc/3.11.6/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the steps above are enough, you don't really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/3.11.6/_sources/getting_started/drivers.rst.txt b/src/doc/3.11.6/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index baec82378..000000000 --- a/src/doc/3.11.6/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ diff --git a/src/doc/3.11.6/_sources/getting_started/index.rst.txt b/src/doc/3.11.6/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/3.11.6/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/3.11.6/_sources/getting_started/installing.rst.txt b/src/doc/3.11.6/_sources/getting_started/installing.rst.txt deleted file mode 100644 index 1a7b8ad3b..000000000 --- a/src/doc/3.11.6/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/3.11.6/_sources/getting_started/querying.rst.txt b/src/doc/3.11.6/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/3.11.6/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/3.11.6/_sources/index.rst.txt b/src/doc/3.11.6/_sources/index.rst.txt deleted file mode 100644 index 562603d19..000000000 --- a/src/doc/3.11.6/_sources/index.rst.txt +++ /dev/null @@ -1,41 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - - bugs - contactus diff --git a/src/doc/3.11.6/_sources/operating/backups.rst.txt b/src/doc/3.11.6/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/3.11.6/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/3.11.6/_sources/operating/bloom_filters.rst.txt b/src/doc/3.11.6/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/3.11.6/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/3.11.6/_sources/operating/bulk_loading.rst.txt b/src/doc/3.11.6/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/3.11.6/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/3.11.6/_sources/operating/cdc.rst.txt b/src/doc/3.11.6/_sources/operating/cdc.rst.txt deleted file mode 100644 index 192f62a09..000000000 --- a/src/doc/3.11.6/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property ``cdc=true`` (either when :ref:`creating the table -` or :ref:`altering it `), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in ``cassandra.yaml`` on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disable CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -This implementation included a refactor of CommitLogReplayer into `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -The initial implementation of Change Data Capture does not include a parser (see :ref:`reading-commitlogsegments` above) -so, if CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `Design doc `__ -- `JIRA ticket `__ diff --git a/src/doc/3.11.6/_sources/operating/compaction.rst.txt b/src/doc/3.11.6/_sources/operating/compaction.rst.txt deleted file mode 100644 index 0f3900042..000000000 --- a/src/doc/3.11.6/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,442 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table's ``read_repair_chance`` and ``dclocal_read_repair_chance`` to 0. - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/3.11.6/_sources/operating/compression.rst.txt b/src/doc/3.11.6/_sources/operating/compression.rst.txt deleted file mode 100644 index 01da34b6d..000000000 --- a/src/doc/3.11.6/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides three classes (``LZ4Compressor``, - ``SnappyCompressor``, and ``DeflateCompressor`` ). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/3.11.6/_sources/operating/hardware.rst.txt b/src/doc/3.11.6/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/3.11.6/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/3.11.6/_sources/operating/hints.rst.txt b/src/doc/3.11.6/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/3.11.6/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/3.11.6/_sources/operating/index.rst.txt b/src/doc/3.11.6/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/3.11.6/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/3.11.6/_sources/operating/metrics.rst.txt b/src/doc/3.11.6/_sources/operating/metrics.rst.txt deleted file mode 100644 index 04abb48e9..000000000 --- a/src/doc/3.11.6/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,706 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -These metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools scope= type= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessages..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMetrics scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -connectedNativeClients Counter Number of clients connected to this nodes native protocol server -connectedThriftClients Counter Number of clients connected to this nodes thrift protocol server -=========================== ============== =========== - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/3.11.6/_sources/operating/read_repair.rst.txt b/src/doc/3.11.6/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/3.11.6/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/3.11.6/_sources/operating/repair.rst.txt b/src/doc/3.11.6/_sources/operating/repair.rst.txt deleted file mode 100644 index 97d8ce8ba..000000000 --- a/src/doc/3.11.6/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Repair ------- - -.. todo:: todo diff --git a/src/doc/3.11.6/_sources/operating/security.rst.txt b/src/doc/3.11.6/_sources/operating/security.rst.txt deleted file mode 100644 index dfcd9e6c5..000000000 --- a/src/doc/3.11.6/_sources/operating/security.rst.txt +++ /dev/null @@ -1,410 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- - -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/3.11.6/_sources/operating/snitch.rst.txt b/src/doc/3.11.6/_sources/operating/snitch.rst.txt deleted file mode 100644 index faea0b3e1..000000000 --- a/src/doc/3.11.6/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero and read_repair_chance is < 1.0, this will allow - 'pinning' of replicas to hosts in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/3.11.6/_sources/operating/topo_changes.rst.txt b/src/doc/3.11.6/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index c42708e02..000000000 --- a/src/doc/3.11.6/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,124 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase. - -Once the bootstrapping is complete the node will be marked "UP", we rely on the hinted handoff's for making this node -consistent (since we don't accept writes since the start of the bootstrap). - -.. Note:: If the replacement process takes longer than ``max_hint_window_in_ms`` you **MUST** run repair to make the - replaced node consistent again, since it missed ongoing writes during bootstrapping. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/3.11.6/_sources/tools/cqlsh.rst.txt b/src/doc/3.11.6/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/3.11.6/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/3.11.6/_sources/tools/index.rst.txt b/src/doc/3.11.6/_sources/tools/index.rst.txt deleted file mode 100644 index 5a5e4d5ae..000000000 --- a/src/doc/3.11.6/_sources/tools/index.rst.txt +++ /dev/null @@ -1,26 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cqlsh - nodetool diff --git a/src/doc/3.11.6/_sources/tools/nodetool.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool.rst.txt deleted file mode 100644 index e37303110..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _nodetool: - -Nodetool --------- - -.. todo:: Try to autogenerate this from Nodetool’s help. diff --git a/src/doc/3.11.6/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/compact.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/decommission.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/describering.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/drain.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/flush.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/help.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/import.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/info.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/join.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/move.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/netstats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c20d0ac21..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,256 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-u | --username )] - [(-h | --host )] [(-p | --port )] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-pp | --print-port)] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`sjk` - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk --help' for more information. - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/profileload.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/refresh.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/removenode.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/repair.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/ring.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/scrub.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/sjk.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/status.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/stop.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/verify.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/version.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/3.11.6/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/3.11.6/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/3.11.6/_sources/troubleshooting/index.rst.txt b/src/doc/3.11.6/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 2e5cf106d..000000000 --- a/src/doc/3.11.6/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -.. TODO: todo diff --git a/src/doc/3.11.6/_static/ajax-loader.gif b/src/doc/3.11.6/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/3.11.6/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/3.11.6/_static/basic.css b/src/doc/3.11.6/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/3.11.6/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/3.11.6/_static/comment-bright.png b/src/doc/3.11.6/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/3.11.6/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/comment-close.png b/src/doc/3.11.6/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/3.11.6/_static/comment-close.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/comment.png b/src/doc/3.11.6/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/3.11.6/_static/comment.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/doctools.js b/src/doc/3.11.6/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/3.11.6/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/3.11.6/_static/documentation_options.js b/src/doc/3.11.6/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/3.11.6/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/3.11.6/_static/down-pressed.png b/src/doc/3.11.6/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/3.11.6/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/down.png b/src/doc/3.11.6/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/3.11.6/_static/down.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/extra.css b/src/doc/3.11.6/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/3.11.6/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/3.11.6/_static/file.png b/src/doc/3.11.6/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/3.11.6/_static/file.png and /dev/null differ diff --git a/src/doc/3.11.6/_static/jquery-3.2.1.js b/src/doc/3.11.6/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/3.11.6/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this:

-
    -
  • Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded -within a specified time window.
  • -
  • Based on read_repair_chance and dclocal_read_repair_chance (part of a table’s schema), read requests may be -randomly sent to all replicas in order to repair potentially inconsistent data.
  • -
-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/architecture/guarantees.html b/src/doc/3.11.6/architecture/guarantees.html deleted file mode 100644 index 4dd5f4737..000000000 --- a/src/doc/3.11.6/architecture/guarantees.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-
-

Todo

-

todo

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/architecture/index.html b/src/doc/3.11.6/architecture/index.html deleted file mode 100644 index ef6c8fe45..000000000 --- a/src/doc/3.11.6/architecture/index.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Architecture

-

This section describes the general architecture of Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/architecture/overview.html b/src/doc/3.11.6/architecture/overview.html deleted file mode 100644 index 01e37ccd3..000000000 --- a/src/doc/3.11.6/architecture/overview.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/3.11.6/architecture/storage_engine.html b/src/doc/3.11.6/architecture/storage_engine.html deleted file mode 100644 index 85137c81e..000000000 --- a/src/doc/3.11.6/architecture/storage_engine.html +++ /dev/null @@ -1,164 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-
-

Todo

-

todo

-
-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/bugs.html b/src/doc/3.11.6/bugs.html deleted file mode 100644 index 671f2c3fb..000000000 --- a/src/doc/3.11.6/bugs.html +++ /dev/null @@ -1,108 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs and Contributing" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs and Contributing

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the #cassandra IRC channel.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Cassandra Development section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/configuration/cassandra_config_file.html b/src/doc/3.11.6/configuration/cassandra_config_file.html deleted file mode 100644 index a5130be64..000000000 --- a/src/doc/3.11.6/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1826 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Default Value: KEYSPACE

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using.

-

Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-
stop
-
shut down gossip and Thrift, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

thrift_prepared_statements_cache_size_mb

-

Maximum size of the Thrift prepared statement cache

-

If you do not use Thrift at all, it is safe to leave this value at “auto”.

-

See description of ‘prepared_statements_cache_size_mb’ above for more information.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync

-

This option is commented out by default.

-

commitlog_sync may be either “periodic” or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.)

-

Default Value: batch

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

Default Value: 2

-
-
-

commitlog_sync

-

the other option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_max_tree_depth

-

This option is commented out by default.

-

Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs.

-

The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-

Default Value: 18

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

start_rpc

-

Whether to start the thrift rpc server.

-

Default Value: false

-
-
-

rpc_address

-

The address or interface to bind the Thrift RPC service and native transport -server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

rpc_port

-

port for Thrift to listen for clients on

-

Default Value: 9160

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

rpc_server_type

-

Cassandra provides two out-of-the-box options for the RPC Server:

-
-
sync
-
One thread per thrift connection. For a very large number of clients, memory -will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size -per thread, and that will correspond to your use of virtual memory (but physical memory -may be limited depending on use of stack space).
-
hsha
-
Stands for “half synchronous, half asynchronous.” All thrift clients are handled -asynchronously using a small number of threads that does not vary with the amount -of thrift clients (and thus scales well to many clients). The rpc requests are still -synchronous (one thread per active request). If hsha is selected then it is essential -that rpc_max_threads is changed from the default value of unlimited.
-
-

The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory.

-

Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it.

-

Default Value: sync

-
-
-

rpc_min_threads

-

This option is commented out by default.

-

Uncomment rpc_min|max_thread to set request pool size limits.

-

Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all).

-

The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently.

-

Default Value: 16

-
-
-

rpc_max_threads

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

rpc_send_buff_size_in_bytes

-

This option is commented out by default.

-

uncomment to set socket buffer sizes on rpc connections

-
-
-

rpc_recv_buff_size_in_bytes

-

This option is commented out by default.

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

thrift_framed_transport_size_in_mb

-

Frame size for thrift (maximum message length).

-

Default Value: 15

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.)

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations

-

Default Value: 10000

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

request_scheduler

-

request_scheduler – Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below.

-

Default Value: org.apache.cassandra.scheduler.NoScheduler

-
-
-

request_scheduler_options

-

This option is commented out by default.

-

Scheduler Options vary based on the type of scheduler

-
-
NoScheduler
-
Has no options
-
RoundRobin
-
-
throttle_limit
-
The throttle_limit is the number of in-flight -requests per client. Requests beyond -that limit are queued up until -running requests can complete. -The value of 80 here is twice the number of -concurrent_reads + concurrent_writes.
-
default_weight
-
default_weight is optional and allows for -overriding the default which is 1.
-
weights
-
Weights are optional and will default to 1 or the -overridden default_weight. The weight translates into how -many requests are handled during each turn of the -RoundRobin, based on the scheduler id.
-
-
-
-

Default Value (complex option):

-
#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-
-
-
-

request_scheduler_id

-

This option is commented out by default. -request_scheduler_id – An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace.

-

Default Value: keyspace

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack

-

If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client/server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_warn_threshold_in_ms

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/configuration/index.html b/src/doc/3.11.6/configuration/index.html deleted file mode 100644 index a1e021057..000000000 --- a/src/doc/3.11.6/configuration/index.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/contactus.html b/src/doc/3.11.6/contactus.html deleted file mode 100644 index d3e752489..000000000 --- a/src/doc/3.11.6/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

IRC

-

To chat with developers or users in real-time, join our channels on IRC freenode. The -following channels are available:

-
    -
  • #cassandra - for user questions and general discussions.
  • -
  • #cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
  • #cassandra-builds - results of automated test builds.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/appendices.html b/src/doc/3.11.6/cql/appendices.html deleted file mode 100644 index fdf205ec8..000000000 --- a/src/doc/3.11.6/cql/appendices.html +++ /dev/null @@ -1,565 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/changes.html b/src/doc/3.11.6/cql/changes.html deleted file mode 100644 index 3d4f4b0a2..000000000 --- a/src/doc/3.11.6/cql/changes.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

-
    -
  • Adds a new ``duration `` data types (CASSANDRA-11873).
  • -
  • Support for GROUP BY (CASSANDRA-10707).
  • -
  • Adds a DEFAULT UNSET option for INSERT JSON to ignore omitted columns (CASSANDRA-11424).
  • -
  • Allows null as a legal value for TTL on insert and update. It will be treated as equivalent to
  • -
-

inserting a 0 (CASSANDRA-12216).

-
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/ddl.html b/src/doc/3.11.6/cql/ddl.html deleted file mode 100644 index 150dae6ef..000000000 --- a/src/doc/3.11.6/cql/ddl.html +++ /dev/null @@ -1,765 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-
-
-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
    -
  • 'SimpleStrategy': A simple strategy that defines a replication factor for the whole cluster. The only sub-options -supported is 'replication_factor' to define that replication factor and is mandatory.
  • -
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for -each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is -the associated replication factor.
  • -
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-
-

Column definitions

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-
-

Static columns

-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-
-
-
-

The Primary key

-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-
-

The partition key

-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-
-
-

The clustering columns

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, c, d)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-
-
-
-

Table options

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Compact tables

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as COMPACT -STORAGE cannot, as of Cassandra 3.11.6, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details) and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-
-
-

Reversing the clustering order

-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-
-

Other table options

-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
read_repair_chancesimple0.1The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) for -the purpose of read repairs.
dclocal_read_repair_chancesimple0The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) -belonging to the same data center than the read -coordinator for the purpose of read repairs.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
-
-
Compaction options
-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-
-
-
Compression options
-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-
-
-
Caching options
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-
-
-
Other considerations:
-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table'
-       AND read_repair_chance = 0.2;
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/definitions.html b/src/doc/3.11.6/cql/definitions.html deleted file mode 100644 index b02ca4eac..000000000 --- a/src/doc/3.11.6/cql/definitions.html +++ /dev/null @@ -1,312 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term          ::=  constant | literal | function_call | type_hint | bind_marker
-literal       ::=  collection_literal | udt_literal | tuple_literal
-function_call ::=  identifier '(' [ term (',' term)* ] ')'
-type_hint     ::=  '(' cql_type `)` term
-bind_marker   ::=  '?' | ':' identifier
-
-

A term is thus one of:

-
    -
  • A constant.
  • -
  • A literal for either a collection, a user-defined type or a tuple -(see the linked sections for details).
  • -
  • A function call: see the section on functions for details on which native function exists and how to define your own user-defined ones.
  • -
  • A type hint: see the related section for details.
  • -
  • A bind marker, which denotes a variable to be bound at execution time. See the section on Prepared Statements -for details. A bind marker can be either anonymous (?) or named (:some_name). The latter form provides a more -convenient way to refer to the variable for binding it and should generally be preferred.
  • -
-
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/dml.html b/src/doc/3.11.6/cql/dml.html deleted file mode 100644 index 96d59dbb4..000000000 --- a/src/doc/3.11.6/cql/dml.html +++ /dev/null @@ -1,558 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/functions.html b/src/doc/3.11.6/cql/functions.html deleted file mode 100644 index c72ebdd98..000000000 --- a/src/doc/3.11.6/cql/functions.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Time conversion functions

-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/index.html b/src/doc/3.11.6/cql/index.html deleted file mode 100644 index e2678ca67..000000000 --- a/src/doc/3.11.6/cql/index.html +++ /dev/null @@ -1,239 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL).

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/indexes.html b/src/doc/3.11.6/cql/indexes.html deleted file mode 100644 index a20a857ec..000000000 --- a/src/doc/3.11.6/cql/indexes.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/json.html b/src/doc/3.11.6/cql/json.html deleted file mode 100644 index 35143615f..000000000 --- a/src/doc/3.11.6/cql/json.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/mvs.html b/src/doc/3.11.6/cql/mvs.html deleted file mode 100644 index c0e9510ee..000000000 --- a/src/doc/3.11.6/cql/mvs.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/security.html b/src/doc/3.11.6/cql/security.html deleted file mode 100644 index 3ea999a48..000000000 --- a/src/doc/3.11.6/cql/security.html +++ /dev/null @@ -1,704 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/triggers.html b/src/doc/3.11.6/cql/triggers.html deleted file mode 100644 index 642a02348..000000000 --- a/src/doc/3.11.6/cql/triggers.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/cql/types.html b/src/doc/3.11.6/cql/types.html deleted file mode 100644 index 13cfd9225..000000000 --- a/src/doc/3.11.6/cql/types.html +++ /dev/null @@ -1,697 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 3.11.6, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/data_modeling/index.html b/src/doc/3.11.6/data_modeling/index.html deleted file mode 100644 index a534c57ae..000000000 --- a/src/doc/3.11.6/data_modeling/index.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/code_style.html b/src/doc/3.11.6/development/code_style.html deleted file mode 100644 index 0320d4a50..000000000 --- a/src/doc/3.11.6/development/code_style.html +++ /dev/null @@ -1,208 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/how_to_commit.html b/src/doc/3.11.6/development/how_to_commit.html deleted file mode 100644 index 277304953..000000000 --- a/src/doc/3.11.6/development/how_to_commit.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

—atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/how_to_review.html b/src/doc/3.11.6/development/how_to_review.html deleted file mode 100644 index b985d2bc5..000000000 --- a/src/doc/3.11.6/development/how_to_review.html +++ /dev/null @@ -1,172 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/ide.html b/src/doc/3.11.6/development/ide.html deleted file mode 100644 index c864af709..000000000 --- a/src/doc/3.11.6/development/ide.html +++ /dev/null @@ -1,234 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

Note

-

Bleeding edge development snapshots of Cassandra are available from Jenkins continuous integration.

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/index.html b/src/doc/3.11.6/development/index.html deleted file mode 100644 index 06a49208c..000000000 --- a/src/doc/3.11.6/development/index.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Development" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/development/patches.html b/src/doc/3.11.6/development/patches.html deleted file mode 100644 index cf991eba7..000000000 --- a/src/doc/3.11.6/development/patches.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue tagged with the low hanging fruit label in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our community page.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - -
VersionPolicy
3.xTick-tock (see below)
3.0Bug fixes only
2.2Bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

Tick-Tock Releases

-

New releases created as part of the tick-tock release process will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk.

-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: patch by X; reviewed by Y for CASSANDRA-ZZZZZ
  2. -
  3. When you’re happy with the result, create a patch:
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/development/testing.html b/src/doc/3.11.6/development/testing.html deleted file mode 100644 index 4d891f50f..000000000 --- a/src/doc/3.11.6/development/testing.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

TODO: CASSANDRA-12365

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/faq/index.html b/src/doc/3.11.6/faq/index.html deleted file mode 100644 index c56237922..000000000 --- a/src/doc/3.11.6/faq/index.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/genindex.html b/src/doc/3.11.6/genindex.html deleted file mode 100644 index 82c5b8d90..000000000 --- a/src/doc/3.11.6/genindex.html +++ /dev/null @@ -1,93 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/getting_started/configuring.html b/src/doc/3.11.6/getting_started/configuring.html deleted file mode 100644 index 5d5e6d0eb..000000000 --- a/src/doc/3.11.6/getting_started/configuring.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the steps above are enough, you don’t really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/getting_started/drivers.html b/src/doc/3.11.6/getting_started/drivers.html deleted file mode 100644 index 719b80c40..000000000 --- a/src/doc/3.11.6/getting_started/drivers.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/getting_started/index.html b/src/doc/3.11.6/getting_started/index.html deleted file mode 100644 index 88f25e177..000000000 --- a/src/doc/3.11.6/getting_started/index.html +++ /dev/null @@ -1,146 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/getting_started/installing.html b/src/doc/3.11.6/getting_started/installing.html deleted file mode 100644 index 78ff0f74f..000000000 --- a/src/doc/3.11.6/getting_started/installing.html +++ /dev/null @@ -1,196 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/getting_started/querying.html b/src/doc/3.11.6/getting_started/querying.html deleted file mode 100644 index a294fdd5a..000000000 --- a/src/doc/3.11.6/getting_started/querying.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/index.html b/src/doc/3.11.6/index.html deleted file mode 100644 index 4f48db0e8..000000000 --- a/src/doc/3.11.6/index.html +++ /dev/null @@ -1,75 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v3.11.6

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/3.11.6/objects.inv b/src/doc/3.11.6/objects.inv deleted file mode 100644 index 1468441b2..000000000 Binary files a/src/doc/3.11.6/objects.inv and /dev/null differ diff --git a/src/doc/3.11.6/operating/backups.html b/src/doc/3.11.6/operating/backups.html deleted file mode 100644 index 330070401..000000000 --- a/src/doc/3.11.6/operating/backups.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/operating/bloom_filters.html b/src/doc/3.11.6/operating/bloom_filters.html deleted file mode 100644 index dd6685efc..000000000 --- a/src/doc/3.11.6/operating/bloom_filters.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/bulk_loading.html b/src/doc/3.11.6/operating/bulk_loading.html deleted file mode 100644 index b3d60827c..000000000 --- a/src/doc/3.11.6/operating/bulk_loading.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/operating/cdc.html b/src/doc/3.11.6/operating/cdc.html deleted file mode 100644 index 66a494a82..000000000 --- a/src/doc/3.11.6/operating/cdc.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property cdc=true (either when creating the table or altering it), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in cassandra.yaml on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory.

-
-
-

Configuration

-
-

Enabling or disable CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

This implementation included a refactor of CommitLogReplayer into CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

The initial implementation of Change Data Capture does not include a parser (see Reading CommitLogSegments above) -so, if CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/compaction.html b/src/doc/3.11.6/operating/compaction.html deleted file mode 100644 index 0d758e9f0..000000000 --- a/src/doc/3.11.6/operating/compaction.html +++ /dev/null @@ -1,514 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy).

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table’s read_repair_chance and dclocal_read_repair_chance to 0.

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/compression.html b/src/doc/3.11.6/operating/compression.html deleted file mode 100644 index e5c193b46..000000000 --- a/src/doc/3.11.6/operating/compression.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides three classes (LZ4Compressor, -SnappyCompressor, and DeflateCompressor ). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/hardware.html b/src/doc/3.11.6/operating/hardware.html deleted file mode 100644 index f2c573f07..000000000 --- a/src/doc/3.11.6/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/hints.html b/src/doc/3.11.6/operating/hints.html deleted file mode 100644 index 394e65a4e..000000000 --- a/src/doc/3.11.6/operating/hints.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/operating/index.html b/src/doc/3.11.6/operating/index.html deleted file mode 100644 index 34cc9bdac..000000000 --- a/src/doc/3.11.6/operating/index.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/metrics.html b/src/doc/3.11.6/operating/metrics.html deleted file mode 100644 index b87d0a463..000000000 --- a/src/doc/3.11.6/operating/metrics.html +++ /dev/null @@ -1,1601 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

These metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools scope=<ThreadPoolName> type=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessages.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMetrics scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsCounterNumber of clients connected to this nodes native protocol server
connectedThriftClientsCounterNumber of clients connected to this nodes thrift protocol server
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/read_repair.html b/src/doc/3.11.6/operating/read_repair.html deleted file mode 100644 index a9bc385ef..000000000 --- a/src/doc/3.11.6/operating/read_repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/operating/repair.html b/src/doc/3.11.6/operating/repair.html deleted file mode 100644 index d05746b16..000000000 --- a/src/doc/3.11.6/operating/repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.6/operating/security.html b/src/doc/3.11.6/operating/security.html deleted file mode 100644 index 35753bf98..000000000 --- a/src/doc/3.11.6/operating/security.html +++ /dev/null @@ -1,446 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/snitch.html b/src/doc/3.11.6/operating/snitch.html deleted file mode 100644 index 8072e67ed..000000000 --- a/src/doc/3.11.6/operating/snitch.html +++ /dev/null @@ -1,176 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/operating/topo_changes.html b/src/doc/3.11.6/operating/topo_changes.html deleted file mode 100644 index bd3b162a8..000000000 --- a/src/doc/3.11.6/operating/topo_changes.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase.

-

Once the bootstrapping is complete the node will be marked “UP”, we rely on the hinted handoff’s for making this node -consistent (since we don’t accept writes since the start of the bootstrap).

-
-

Note

-

If the replacement process takes longer than max_hint_window_in_ms you MUST run repair to make the -replaced node consistent again, since it missed ongoing writes during bootstrapping.

-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/search.html b/src/doc/3.11.6/search.html deleted file mode 100644 index b06aab8e1..000000000 --- a/src/doc/3.11.6/search.html +++ /dev/null @@ -1,103 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/searchindex.js b/src/doc/3.11.6/searchindex.js deleted file mode 100644 index c7b0903f3..000000000 --- a/src/doc/3.11.6/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/security","cql/triggers","cql/types","data_modeling/index","development/code_style","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","tools/cqlsh","tools/index","tools/nodetool","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","troubleshooting/index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/code_style.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","troubleshooting/index.rst"],objects:{},objnames:{},objtypes:{},terms:{"00t89":21,"03t04":21,"0x0000000000000003":14,"0x00000004":13,"100mb":6,"10mb":6,"10s":52,"10x":[6,41],"11e6":52,"128th":4,"12gb":43,"12h30m":21,"15m":46,"160mb":41,"16mb":[30,41],"180kb":6,"19t03":139,"1mo":21,"1st":21,"24h":21,"250m":6,"256mb":6,"256th":6,"29d":21,"2e10":10,"2gb":43,"2nd":[6,11,50],"2xlarg":43,"300s":6,"327e":52,"32gb":43,"32mb":[6,30],"36x":34,"3ff3e5109f22":13,"3gb":42,"3rd":[6,46,50],"40f3":13,"4ae3":13,"4kb":11,"4xlarg":43,"50kb":6,"50mb":[6,41],"512mb":6,"5573e5b09f14":13,"5kb":6,"5mb":41,"64k":6,"64kb":42,"6ms":6,"6tb":43,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":24,"75th":46,"86400s":41,"89h4m48":21,"8gb":43,"8th":[6,40],"90th":46,"95ac6470":52,"95th":46,"98th":46,"99th":46,"9th":46,"\u00eatre":9,"abstract":[23,25],"boolean":[9,12,14,17,19,21,52],"break":[28,41],"byte":[6,9,13,21,46,62,80,116,166],"case":[6,10,11,12,13,14,16,17,18,21,24,25,28,29,30,38,43,49,51,52],"catch":23,"class":[6,11,14,21,23,26,29,41,42,45,49,117,129,150],"default":[4,6,10,11,13,14,17,19,21,26,29,30,31,34,38,40,41,42,46,49,51,52,57,76,80,87,116,117,119,122,132,133,139,154,156,167],"enum":9,"export":[26,46,52],"final":[14,19,23,26,41,43,49,133],"float":[9,10,11,12,14,17,21,38,42],"function":[6,9,10,11,12,15,16,18,19,21,25,32,36,49,50,52],"import":[11,14,21,26,27,29,31,41,43,46,52,117],"int":[9,10,11,13,14,17,18,19,21,29,40,42],"long":[6,13,21,24,25,30,41,46],"new":[0,4,6,10,11,14,16,17,18,19,20,21,23,25,26,28,29,33,36,38,41,43,49,51,108,115,117],"null":[9,10,12,13,14,17,18,21,23,52],"public":[6,14,23,29,30,34,49,50],"return":[6,9,11,13,14,16,17,18,19,21,25,132],"short":[6,21],"static":[6,9,10,18,50],"super":49,"switch":[6,10,19,26,30,45,46,49,50],"throw":[6,14,23,29],"true":[6,11,12,17,19,21,26,30,40,41,49,51,52,114,117],"try":[6,11,23,26,28,30,41,54,132],"var":[6,23,34],"void":29,"while":[6,10,11,12,13,21,24,28,38,41,42,43,49,52],AES:6,AND:[9,11,13,14,18,19,49,52],AWS:43,Added:10,Adding:[6,11,19,21,30,36,45,49],And:[11,14,19,49],Are:25,Ave:21,BUT:23,But:[13,15,19,21,23,28,30,52],CAS:6,CFs:[132,139],CLS:52,DCs:6,DNS:30,Doing:10,EBS:43,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,20,21,28,29,30,31,34,35,41,43,49,50,52],GCs:6,Has:[6,25],IDE:[27,36],IDEs:[26,27],IDs:[117,157],INTO:[6,9,11,13,14,17,21],IPs:[6,50,138,157],Ids:163,JKS:6,KBs:6,LCS:11,NFS:43,NOT:[6,9,10,11,13,14,16,18,19,20,21],Not:[13,19,28,41,42],ONE:[0,6,46,52],One:[6,29,30,41],PFS:6,Pis:43,Such:21,THE:6,TLS:[6,45],That:[11,12,18,21,28,30,41,52],The:[0,4,6,8,9,10,12,14,16,18,19,20,21,23,24,26,28,29,30,31,34,35,36,38,40,42,43,46,49,50,51,52,57,60,65,67,73,77,83,86,87,90,95,99,101,103,108,115,117,119,123,124,130,132,139,142,143,150,156,157,158,165,167,170,171,173],Their:21,Then:[13,29,30,34,41,49],There:[0,6,10,11,12,13,14,21,26,28,29,30,41,46,49],These:[4,6,11,14,26,46,49,52],USE:[9,14,15],USING:[9,13,16,20,21,41],Use:[11,13,19,30,35,45,52,55,60,117,122,132,163,170],Used:46,Uses:[6,17,45,50],Using:[11,13,29,30,49],WILL:6,WITH:[9,11,12,16,18,19,38,40,41,42,49,52],Will:[6,36,80,117,150],With:[6,13,17,30,41,51,56],Yes:30,_cache_max_entri:49,_if_:6,_must_:6,_trace:46,_udt:14,_update_interval_in_m:49,_use:14,_validity_in_m:49,a278b781fe4b2bda:34,abil:[14,30,42],abilityid:16,abl:[6,14,21,26,29,30,41],about:[4,6,19,26,28,29,30,38,41,50,52,59,117,138],abov:[6,8,11,12,13,14,21,26,28,30,31,40,41,46],absenc:12,abstracttyp:21,accept:[0,6,10,11,12,13,17,28,29,38,51,75,117],access:[6,10,21,26,28,43,45,46],accompani:6,accord:[6,30],accordingli:[6,14,30],account:[6,21,29],accru:[41,46],accumul:[6,41,46],accur:[6,30,38,138],accuraci:[38,119,167],acheiv:49,achiev:[41,46],achil:32,ack:6,acoount:46,acquir:[19,46],across:[6,11,19,28,46,49,50,117,121],action:[6,13],activ:[4,6,28,40,46,52,117,119,167],activetask:46,actual:[4,6,13,20,23,25,30,34,41,50,132],acycl:19,add:[0,6,9,10,11,21,24,25,28,31,34,36,41,49],addamsfamili:11,added:[0,6,10,11,14,25,41],adding:[6,13,14,25,43,52],addit:[0,6,9,11,13,19,21,26,28,31,41,43,46,49,52],addition:[11,13,41],address:[6,8,17,21,26,28,31,36,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],adher:10,adjac:41,adjust:[6,38],adv:34,advanc:[6,45,49],advantag:43,advers:30,advic:[28,30],advis:[6,12,21,30],af08:13,afd:21,affect:[6,25,28,30,41,139],afford:6,after:[5,6,10,11,12,13,14,16,17,18,26,28,30,40,41,43,45,46,49,50,52],afterward:[26,29],afunct:14,again:[6,28,41,51,52],against:[6,11,14,28,29,30,43,51,52,132],agent:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],agentlib:26,aggreg:[6,9,10,13,15,18,19,46,52],aid:12,aim:6,akeyspac:14,algorithm:[6,11,51],alia:[10,13,32],alias:[6,10,18],alic:19,align:23,aliv:6,all:[0,6,9,11,12,13,14,17,18,21,23,24,25,26,28,29,36,38,40,41,46,49,51,52,57,58,59,75,87,92,108,109,114,117,119,121,130,133,139,154,156,158,167,169,170,171],allmemtableslivedatas:46,allmemtablesoffheaps:46,allmemtablesonheaps:46,alloc:[6,30,40,43,46],allocate_tokens_for_keyspac:51,allow:[0,4,6,9,10,11,12,14,16,17,18,21,31,38,40,41,42,43,50],allowallauthent:[6,49],allowallauthor:[6,49],allowallinternodeauthent:6,almost:[6,14,21,41],alon:[6,23],along:[6,13,114,117],alongsid:[35,52],alphabet:23,alphanumer:[11,19],alreadi:[6,11,14,16,18,21,28,41,49,170],also:[0,4,6,10,11,12,13,14,17,18,19,21,26,28,29,30,31,41,43,46,49,51,52,87,171],alter:[9,10,15,17,30,38,40,41,42,49],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:21,alter_type_stat:[12,21],alter_user_stat:12,altern:[6,10,11,12,13,17,21,26,28,31,43,49],although:[6,28],alwai:[0,6,9,10,11,13,14,18,21,23,28,29,30,41,43],amend:24,amongst:11,amount:[6,11,13,21,26,28,29,30,41,42,43,46,51,52,132],amplif:[41,43],anaggreg:14,analogu:13,analyt:38,analyz:29,ani:[0,6,10,11,12,13,14,17,18,19,20,21,24,25,26,28,29,31,34,36,40,41,43,46,49,51,52,55,108,114,117,122,139,154],annot:23,anonym:[12,21],anoth:[6,11,14,19,21,29,41,49,52],anotherarg:14,ant:[26,28,29],anti:[6,21],anticip:11,anticompact:[41,163],antientropystag:46,antipattern:43,anymor:[24,41],anyon:23,anyth:41,anywai:6,anywher:13,apach:[2,5,6,7,14,20,23,24,25,26,28,29,30,33,34,41,42,46,49,53],api:[6,8,11,15,17,35,50],appear:[12,14,41,52],append:[21,24,43,46,52],appendic:[15,36],appendix:[12,15],appl:21,appli:[6,9,10,11,12,13,19,21,24,28,29,30,46,52],applic:[6,11,19,23,25,26,49],appreci:28,approach:[4,41,51],appropri:[6,11,19,21,25,28,49,50,51],approxim:[41,46],apt:34,arbitrari:[11,12,21],architectur:[30,36],archiv:[6,40,80],archive_command:80,archive_retri:80,aren:13,arg:[14,117,155],argnam:14,argnum:14,argument:[6,11,13,14,16,17,30,31,42,52,55,56,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],arguments_declar:14,arguments_signatur:14,around:[6,19,41,43,50],arrai:[6,30],arriv:[6,28,30],artifact:26,artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,21],asf:26,ask:[5,28,29,36,49],aspect:11,assassin:117,assertionerror:23,assertrow:29,assign:[6,13,30],associ:[6,11],assum:[6,11,14,26,49,50],assumpt:49,astyanax:32,async:[6,49],asynchron:[6,16,30,43],asynchroni:46,atabl:14,atom:[11,13,20,24],atomiclong:46,attach:28,attemp:46,attempt:[0,6,11,16,18,19,21,30,41,46,49,52,133],attent:[23,28],attribut:41,audit:[66,76,117],auditlog:76,auth:6,authent:[10,45,52],authenticatedus:6,author:[9,19,21,45],authorizationproxi:49,auto:[6,30,158],auto_bootstrap:51,autocompact:[41,67,77,117,158],autogener:54,autom:[8,23],automat:[6,13,14,16,26,29,30,34,41,49,51],avail:[0,6,8,11,14,19,26,28,29,34,40,49,50,52,57,87,130,139,150,170],availabil:6,averag:[6,14,41,46],average_live_cells_per_slice_last_five_minut:166,average_s:11,average_tombstones_per_slice_last_five_minut:166,averagefin:14,averagest:14,avg_bucket_s:41,avoid:[6,11,12,23,25,28,38,41,43,49,50,52,171],awai:[26,51,52],awar:[0,11,28,38,42,138],azur:43,b124:13,b70de1d0:13,back:[6,41,46,51,114,117],backend:6,background:[30,34,41,49],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,36,41,45,51,52,68,78,117,159],backward:[6,10,11,15,19,21],bad:[6,14,30,50],balanc:51,banana:21,band:21,bar:[12,23],bardet:21,bare:6,base:[0,4,6,10,11,13,14,18,19,21,24,28,29,30,41,43,46,49,51],bash:30,basi:[6,30,42],basic:[11,41,43],batch:[6,9,15,29,36,52],batch_remov:46,batch_stat:12,batch_stor:46,batchlog:[13,46,89,117,134,140],be34:13,beatl:21,beca:52,becaus:[6,13,14,34,41,42,49],becom:[4,6,11,14,19,28,41,46,49,51],been:[0,4,6,10,13,14,15,19,21,25,28,41,43,49,139],befor:[0,6,10,11,13,14,16,20,21,26,27,29,32,41,49,50,52,80,156],begin:[9,12,13,29,49,52],beginn:28,begintoken:52,behavior:[0,6,10,14,17,21,23,25,38,41,51,133],behind:[6,23,29,30,41],being:[6,11,13,17,21,25,29,30,38,41,46,51],belong:[11,13,14,46,57,117],below:[6,11,12,13,17,19,21,28,34,41,52,63],benchmark:43,benefici:41,benefit:[6,38,41,43,45],besid:6,best:[6,29,41,49,50],best_effort:6,better:[6,23,28,41,43],between:[0,6,9,10,13,15,28,30,38,41,46,49,51,132,154],beyond:[6,52,171],big:[41,60],bigger:[11,41],biggest:14,bigint:[9,14,17,21],bigintasblob:14,bin:[26,34,35,52],binari:[14,33,69,79,117,160],binauditlogg:76,bind:[6,10,12,14,30],bind_mark:[12,13,18,21],biolog:11,birth:13,birth_year:13,bit:[6,14,17,21,28,30,42,43],bite:30,bitrot:11,bitstr:9,black:6,blank:[6,23,30],bleed:26,blindli:30,blob:[9,10,12,17,21,36,42],blobasbigint:14,blobastyp:14,block:[4,6,11,24,31,41,43,46,49,80],blockedonalloc:6,blog:[6,11,13],blog_til:13,blog_titl:13,bloom:[4,11,36,43,45,46],bloom_filter_false_posit:166,bloom_filter_false_ratio:166,bloom_filter_fp_ch:[11,38],bloom_filter_off_heap_memory_us:166,bloom_filter_space_us:166,bloomfilterdiskspaceus:46,bloomfilterfalseposit:46,bloomfilterfalseratio:46,bloomfilteroffheapmemoryus:46,blunt:49,bnf:12,bob:[13,19],bodi:[11,12],boilerpl:27,boolstyl:52,boost:6,boot:30,bootstrap:[0,6,36,42,45,46,49,117,122,150],born:13,both:[0,6,11,13,14,18,21,24,25,28,30,31,38,41,42,43,46,49,51,52],bottleneck:6,bottom:30,bound:[6,11,12,21,43,49],box:[6,49,50],brace:23,bracket:12,braket:12,branch:[24,25,26,29],branchnam:28,breakpoint:26,breed:29,bring:6,brk:30,broadcast:6,broadcast_address:50,broken:[41,46],brows:6,browser:52,bucket:41,bucket_high:41,bucket_low:41,buffer:[4,6,46],bufferpool:45,bug:[10,24,29,30,36],build:[8,27,29,36,46,117,173],builder:[94,117,147],built:[26,46],bulk:[36,45],bump:10,bunch:23,burn:40,button:30,bytebuff:14,byteorderedpartition:[6,14],bytescompact:46,bytesflush:46,bytestyp:9,c73de1d3:13,cach:[6,30,31,43,45,50,108,110,111,112,117,141,142],cachecleanupexecutor:46,cachenam:46,calcul:[6,38,40,41,46,50],call:[9,11,12,13,14,19,23,31,36,41,43,46,51,117,150],callback:46,caller:23,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,23,24,25,26,28,29,31,34,35,36,38,40,41,42,43,46,49,50,51,52,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],cancel:[10,133],candid:41,cannot:[6,9,11,13,14,17,18,19,21,41,49,55,117],cap:[12,91,96,102,117,144,149,152],capabl:[6,30,52],capac:[6,40,46,50,117,119,141,167],captur:[6,36,45],care:[6,41,132],carlo:19,carri:[23,132],cascommit:46,cascontent:[103,153],casprepar:46,caspropos:46,casread:46,cassablanca:21,cassafort:32,cassandra:[0,2,4,5,8,10,11,13,14,19,20,21,23,24,28,32,33,35,38,41,42,43,46,50,51,52,76,80,117,128,132,135,139,164,172],cassandra_hom:[6,40,49],cassandraauthor:[6,49],cassandradaemon:[26,34],cassandralogin:49,cassandrarolemanag:[6,49],casser:32,cassi:32,cast:[10,13,18],caswrit:46,cat:21,categor:46,categori:[11,12,13,14,76],caught:[25,46],caus:[6,18,30,41,49],caution:6,caveat:49,cbc:6,ccm:[25,29],ccmlib:29,cdc:[6,11],cdc_enabl:40,cdc_free_space_check_interval_m:40,cdc_free_space_in_mb:40,cdc_raw:[6,40],cdc_raw_directori:40,cdccompactor:6,cell:[6,21,46,87,171],center:[6,11,21,30,50,51,73,83,117,132],central:[26,49,52],centric:19,certain:[6,9,11,19,29,41,49],certainli:14,certif:[49,117,128],cfname:[101,119,167],cfs:23,chain:19,chanc:38,chang:[6,11,12,15,19,21,24,26,27,33,34,36,42,45,46,49,150],channel:[5,8,28],charact:[11,12,13,17,19,21,23,52],chat:8,cheap:6,check:[0,6,11,13,23,25,26,28,29,30,38,40,41,46,49,108,117,132,171],checklist:[27,28,36],checkout:[26,28],checksum:[11,42,117,171],cherri:24,chess:13,child:52,chmod:49,choic:[6,11,36,41,45],choos:[0,6,11,27,32,43,46],chosen:[0,6,11,14],chown:49,christoph:21,chrome:52,chunk:[4,6,30,42,52],chunk_length_in_kb:[11,42],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:46,chunksiz:52,churn:6,cipher:[6,49],cipher_suit:6,circular:19,citi:21,clash:12,class_nam:6,classpath:[6,14,21,46],claus:[10,11,14,16,17,18,19,23],clean:[6,23,46,57,117,135],cleanli:28,cleanup:[30,41,45,46,87,117,163],clear:[25,28,59,108],clearsnapshot:117,click:[13,26,28,29],client:[0,6,8,10,11,13,17,19,21,25,30,31,33,36,43,45,52,59,117],client_encryption_opt:49,clientrequest:46,clientstat:117,clock:6,clockr:6,clojur:33,clone:[26,30,52],close:[6,15,49],closer:38,cloud:45,cluster:[0,4,6,9,10,13,14,20,21,25,29,31,35,36,41,43,46,49,50,51,52,64,85,89,105,117,140,157],cluster_nam:[31,35],clustering_column:11,clustering_ord:11,cmsparallelremarken:26,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,20,24,25,26,27,29,36,42,46],codestyl:23,col:14,cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,43,45,46,87],collection_liter:12,collection_typ:21,color:[21,52],column1:9,column:[6,9,10,12,13,14,15,16,17,18,21,42,46,52,101,119,139,156,167],column_definit:11,column_nam:[11,13,16],columnfamili:[6,9,23,41],colupdatetimedeltahistogram:46,com:[6,11,14,23,24,49],combin:[4,6,10,40,41],come:[6,9,49],comingl:41,comma:[6,11,12,13,31,49,51,52,76,119,122,167],command:[0,6,24,29,30,31,34,35,42,45,53,55,56,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],comment:[6,11,15,18,23,25,49],commit:[6,8,11,27,28,36,46],commitlog:[2,6,30,31,43,45],commitlog_archiv:6,commitlog_directori:[31,43],commitlog_segment_size_in_mb:30,commitlogread:40,commitlogreadhandl:40,commitlogreplay:40,commitlogseg:[6,45,46],committ:[24,28,29],common:[0,14,15,23,25,28,45,52],common_nam:11,commonli:117,commun:[6,8,25,26,28,30,31,35,49],commut:30,compact:[4,6,15,30,36,38,42,43,45,57,61,62,87,90,91,117,130,139,143,144,150,158,163,170],compacted_partition_maximum_byt:166,compacted_partition_mean_byt:166,compacted_partition_minimum_byt:166,compaction_:163,compaction_window_s:41,compaction_window_unit:41,compactionbyteswritten:46,compactionexecutor:46,compactionhistori:[41,117],compactionid:163,compactionparamet:41,compactionparametersjson:41,compactionstat:[41,117],compactionstrategi:45,compactor:[93,117,146],compar:[6,28,41,46],compat:[6,9,10,11,13,15,19,25,28],compatilibi:21,compet:6,compil:[23,26,52],complain:26,complet:[6,13,14,28,30,41,46,49,51,52,117,131,133],completedtask:46,complex:[6,9,14,21,28],complexarg:14,compliant:[6,14,49],complic:28,compon:[4,11,25,38,46,49,117,150],compos:[11,13,21],composit:11,compound:17,comprehens:25,compress:[4,6,29,36,41,43,45,46],compression_metadata_off_heap_memory_us:166,compressioninfo:4,compressionmetadataoffheapmemoryus:46,compressionratio:46,compressor:[6,11],compris:[4,11,42],compromis:49,comput:[6,14],concaten:14,concept:[15,19,41],concern:[13,14],concret:[12,21],concurr:[6,43,92,93,94,117,132,145,146,147],concurrentmarksweep:43,condens:13,condit:[6,10,12,13,19,21,23,24,41,46,49,52],conditionnotmet:46,conf:[6,30,31,34,46,49,52],config:[46,49,52],configur:[0,4,11,21,26,29,30,33,34,36,45,46,49,50,52,63,80,117,135,150],confirm:[6,8,25,26],conflict:[13,21,24],conform:[18,25],confus:[10,12,30],conjunct:52,connect:[6,11,19,21,26,35,36,46,49,50,52,59,63,116,117],connectednativecli:46,connectedthriftcli:46,connector:[30,32,49],consecut:31,consequ:[11,13,21,43],conserv:6,consid:[0,6,13,21,28,31,38,41,43],consider:[13,21],consist:[2,11,12,13,14,25,49,51],consol:[26,31,52],constant:[10,11,15,17,21],constantli:[6,41],construct:12,constructor:[6,23],consum:[6,29,38,40,46],consumpt:40,contact:[6,11,30,36],contain:[0,6,8,9,10,11,12,13,15,16,18,19,21,26,28,40,41,42,49,52,156],contend:[6,46],content:[4,6,11,12,13,36,41,52,80],contentionhistogram:46,context:[6,9,19,21,28,30,49],contigu:13,continu:[0,6,23,26,29,41,49,50],contrarili:12,contrast:[29,49],contribut:[24,27,29,36],contributor:[24,28,29,34],control:[0,6,10,11,13,15,25,31,34,41,49,50,52],conveni:[9,12,14,17,29,51],convent:[6,11,14,15,24,27,28,29,49,50],convers:10,convert:[10,13,14,41],coordin:[0,6,11,13,14,21,30,46,133],coordinatorreadlat:46,coordinatorscanlat:46,cop:23,copi:[0,30,41],core:[6,14,43,145],correct:[10,25,34,41,42,117,130],correctli:[6,11,30,41,49],correl:[6,10,50],correspond:[6,9,11,13,14,18,21,28,29,30,40,50],corrupt:[6,11,41,42,43,139,171],cost:[6,13,21,42],could:[6,12,21,25,28,41,52],couldn:34,count:[6,9,13,21,30,41,46,51],counter:[6,9,14,43,46,110,117,139,141,142],counter_mut:46,countercach:46,countermutationstag:46,counterwrit:[103,153],countri:[13,21],country_cod:21,coupl:[0,6],cours:[6,13],cover:[25,28,29,30,33,41,46],cpu:[6,11,40,42,45],cqerl:32,cql3:[11,14,25,29,52],cql:[6,10,11,12,13,14,16,17,19,21,29,32,35,36,41,45,49,53,150],cql_type:[11,12,13,14,19,21],cqlc:32,cqldefinit:14,cqlsh:[30,33,34,36,49,53],cqltester:[25,29],crash:43,crc32:4,crc:4,crc_check_chanc:[11,42],creat:[6,9,10,12,13,15,17,26,27,29,30,40,41,42,49,51,52,60],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,21],create_user_stat:12,createkeystor:6,createt:29,creation:[6,10,11,13,14,18,21],creator:19,credenti:[6,49],critic:[25,28,49],cross:[6,30,50],crossnodedroppedlat:46,cryptographi:6,csv:52,cuddli:21,curl:[24,34],current:[6,9,11,13,14,19,21,26,28,34,41,46,51,52,82,100,104,106,108,117,131,162,170],currentlyblockedtask:46,custom:[6,9,10,11,14,15,16,19,28,50,52],custom_option1:19,custom_option2:19,custom_typ:[14,21],cute:21,cvh:25,cycl:[6,40,80],daemon:[26,117,164],dai:[17,21,41],daili:80,danger:6,dash:12,data:[0,4,6,10,12,14,15,16,18,25,31,34,36,38,42,43,45,46,49,50,52,55,60,73,80,83,87,108,117,122,132,156,171],data_file_directori:[31,43],data_read:19,data_writ:19,databas:[12,13,15,20,41,43,49],datacent:[0,6,50,73,83,96,117,132,149],datacenter1:6,dataset:6,datastax:[6,11,14,32],datatyp:14,date:[9,10,14,15,17,139],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,41],daylight:21,db_user:49,dba:49,dc1:[6,11,49],dc2:[6,11,49],dcassandra:[41,46,49,51],dclocal_read_repair_ch:[0,11,41],dcom:49,dcpar:132,ddl:[11,52],ddl_statement:12,dead:[6,45,55,117],dead_node_ip:51,deb:34,debian:[30,33],debug:[31,52],decid:[9,41,50],decim:[9,14,17,21,52],decimalsep:52,declar:[11,12,14,21],decod:[17,21],decommiss:[6,51,117],decompress:42,decreas:[6,41],decrement:[13,21],decrypt:6,dedic:6,dedupl:[114,117],deem:6,deeper:28,default_time_to_l:[10,11,13],default_weight:6,defend:30,defin:[0,6,9,10,11,12,13,15,16,17,18,19,20,26,41,46,49,50,51,52,60,117],definit:[9,13,14,15,18,21,36,38],deflat:6,deflatecompressor:[11,42],degrad:6,delet:[6,9,10,11,12,15,17,19,21,28,36,52,80,87,117,169],delete_stat:[12,13],delimit:6,deliv:[0,6],deliveri:[6,117,118,137,148],delta:46,demand:49,deni:30,denorm:21,denot:12,dens:38,depend:[4,6,11,12,13,14,21,25,26,28,29,41],deploi:[30,31],deploy:[6,49,50],deprec:[6,10,11,14,15,30,41],depth:6,desc:[9,11,13,52],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,19,21,25,26,28,38,49,53,117],describeclust:117,descript:[6,10,11,14,21,46,52],descriptor:46,design:[14,40,41,43],desir:[16,21,30],destin:[40,52],detail:[5,6,10,11,12,13,14,21,30,45,49,52],detect:[2,6,11,24,30,49],detector:[85,117],determin:[0,6,13,19,38,42,50,132],determinist:30,dev:[6,8,11,30],develop:[5,8,26,28,29,36,43],dfb660d92ad8:52,dfp:171,dht:6,dictat:[6,49],did:[25,46],die:6,dies:36,diff:[15,23],differ:[0,6,11,12,13,14,15,19,21,24,26,28,29,30,31,34,41,42,43,46,51],difficult:[6,29],difficulti:21,digest:4,digit:[17,21,30],diminish:21,direct:[6,11,17,19,28,46],directli:[13,18,19,26,41],director:13,directori:[6,20,26,29,30,33,34,35,40,43,45,52,108,117,135],dirti:6,disabl:[6,11,14,41,42,49,50,52,66,67,68,69,70,71,72,73,74,83,117,140,142,144,149,152,153,154],disable_stcs_in_l0:41,disableauditlog:117,disableautocompact:[41,117],disablebackup:117,disablebinari:117,disablefullquerylog:117,disablegossip:117,disablehandoff:117,disablehintsfordc:117,disableoldprotocolvers:117,disablesnapshot:139,disallow:6,disambigu:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],discard:[6,40],disconnect:41,discourag:[11,21,28],discov:30,discuss:[8,21,28],disk:[4,6,11,31,36,38,40,41,42,45,46,80,114,117,130,171],displai:[11,52,56,62,92,107,109,116,117,166],disrupt:[30,49],dist:34,distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,29,30,41,46,49,51],divid:12,djava:[26,30,49],dml:20,dml_statement:12,dmx4jaddress:46,dmx4jport:46,dns:30,dobar:23,doc:[6,25,40,49],document:[5,12,14,15,17,25,28,35,49,52],doe:[6,11,13,14,16,17,18,19,21,24,25,28,36,38,40,41,42,49,50,51,114,117],doesn:[6,14,21,23,29,30],dofoo:23,doing:[6,13,29,30,41,51],dollar:[10,12],domain:[49,138,157],don:[5,13,23,24,25,26,28,30,31,41,51,108,132],done:[6,11,13,21,28,29,31,35,41],doubl:[6,9,10,11,12,14,17,21,26,46,50],down:[6,19,41,46,50,51,71,117,132],download:[6,26,34,46],downward:19,drain:117,drive:[6,41,43],driver:[6,12,14,29,33,36,52],drop:[6,10,15,36,41,46,80],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,21],drop_user_stat:12,droppabl:[6,41],dropped_mut:166,droppedmessag:45,droppedmetr:46,droppedmut:46,dropwizard:46,dt_socket:26,dtest:[25,27],due:[11,13,21,30,34,46,51],dump:52,duplic:25,durable_writ:11,durat:[6,10,15,19,41,46,119,167],dure:[6,11,14,20,28,29,30,41,42,46,49,51,52,139],dying:30,dynam:[6,45,49],dynamic_snitch:50,dynamic_snitch_badness_threshold:50,dynamic_snitch_reset_interval_in_m:50,dynamic_snitch_update_interval_in_m:50,dynamo:[2,36],each:[0,4,6,10,11,12,13,14,17,18,19,21,24,28,35,36,41,42,43,46,49,50,51,52,117,142,158,171],each_quorum:0,earli:[6,12,28],earlier:15,easi:[9,28],easier:[0,28],easiest:30,ec2:[6,43,50],ec2multiregionsnitch:[6,50],ec2snitch:[6,50],ecc:43,echo:34,eclips:[23,27,29],ecosystem:25,edg:[25,26],edit:[26,31,34,46,49],effect:[6,11,21,28,30,38,42,49,71,117],effectiv:46,effici:[6,11,41,50,51],effort:6,either:[6,8,12,13,14,16,21,23,24,26,28,30,34,35,40,41,46,49,169],elaps:[41,46],element:[21,52],elig:6,els:[11,13,23,28],email:[8,16,21,36],embed:29,emploi:38,empti:[6,9,10,11,12,52],emptytyp:9,enabl:[6,11,14,17,19,29,30,41,42,50,51,52,76,77,78,80,83,84,117,154],enable_user_defined_funct:14,enableauditlog:117,enableautocompact:[41,117],enablebackup:117,enablebinari:117,enablefullquerylog:117,enablegossip:117,enablehandoff:117,enablehintsfordc:117,enableoldprotocolvers:117,encapsul:[23,46],enclos:[9,10,12,14,19],enclosur:12,encod:[15,21,25,52],encount:[5,13,34,46],encourag:[6,11],encrypt:[6,45],encryption_opt:6,end:[21,28,30,41,49,52,60,95,117,132],end_token:[60,132],end_token_1:122,end_token_2:122,end_token_n:122,endpoint:[46,50,55,95,117,132,169],endpoint_snitch:50,endtoken:52,enforc:[17,49],engin:[2,11,28,36,46],enhanc:43,enough:[0,6,21,30,31,41,50,52],enqueu:6,ensur:[11,13,18,20,30,42,49],entail:30,enter:[30,52],entir:[0,4,6,14,21,30,38,41,49,51,52],entri:[4,6,9,13,16,28,36,46,49,52],entropi:6,entry_titl:13,enumer:19,env:[30,31,46,49],environ:[0,5,6,26,30,33,43],ephemer:43,epoch:21,equal:[0,6,10,11,13,21,23,41],equival:[10,11,12,13,14,19,24,41],eras:11,erlang:33,erlcass:32,err:52,errfil:52,error:[6,11,12,14,16,18,19,21,23,25,26,34,36,52,133],escap:[12,17],especi:[28,30,41,52],essenti:[0,6,14,30,52],establish:[6,19,50],estim:46,estimatedcolumncounthistogram:46,estimatedpartitioncount:46,estimatedpartitionsizehistogram:46,etc:[6,18,21,23,25,30,31,34,41,46,49],eth0:6,eth1:6,ev1:21,even:[0,6,10,12,13,14,17,21,28,36,41,49,52,63,139,170],evenli:6,event:[13,21,41,52,132],event_typ:13,eventu:[4,13],ever:[23,29,30,43],everi:[4,6,11,13,14,18,19,20,21,35,38,41,43,52],everyth:[12,23,26,30],evict:46,evil:[6,14],exact:[11,12,14,42],exactli:[11,14,18,49],exampl:[0,6,11,13,14,17,19,21,29,34,35,41,49,50,52],exaust:6,excalibur:11,exce:[4,6,17,23],exceed:[6,43],excel:11,excelsior:11,except:[0,13,14,17,25,27,28,29,30,46],excess:38,exchang:[6,30],exclud:[46,76,100,117],excluded_categori:76,excluded_keyspac:76,excluded_us:76,exclus:[21,29,132],execut:[6,9,11,12,13,14,19,26,29,35,41,46,49,52],exhaust:6,exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,29,36,38,41,42,50,51],expect:[6,10,12,21,23,25,28,41,49],expens:[6,38,50],experi:[6,41],experienc:6,experiment:132,expir:[6,10,11,13,21,45,49,139],expiri:41,explain:[23,25,28,34],explicit:10,explicitli:[6,10,13,17,21,23,41,50],explor:26,expon:10,exponenti:46,expos:[6,9,49],express:[0,6,10,12,50],expung:30,extend:[21,28,29,108,171],extens:[6,11,49],extern:[46,51],extra:[0,6,11,41],extract:[23,34],extrem:[6,13],fact:[21,29,30],factor:[0,6,11,36,42,49],fail:[6,13,14,21,36,41,52,117,133],failur:[2,6,28,36,41,43,46,50,85,117,171],failuredetector:117,fairli:[6,40,49],fake:14,fall:6,fallback:[6,50],fals:[6,11,12,17,19,21,38,40,41,42,46,49,51,52,139],famili:[6,43,101,119,156,167],fanout_s:41,fast:[6,38,41],faster:[6,28,42,43,117,142],fastest:[6,24,50],fatal:6,fault:30,fav:[16,21],fax:21,fct:14,fct_using_udt:14,fear:30,feasibl:21,featur:[25,26,28,49],fed:6,feedback:28,feel:24,fetch:[6,11,52],few:[41,43],fewer:[6,28],fffffffff:[17,21],field:[10,13,14,17,21,23,38],field_definit:21,field_nam:13,fifteen:46,fifteenminutecachehitr:46,figur:41,file:[4,7,11,26,27,28,29,30,31,33,36,38,41,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],filenam:[11,52,101,117],filesystem:49,fill:[40,41],fillion:13,filter:[4,9,11,18,36,43,45,46,132],finalfunc:[9,14],find:[6,26,29,34,38,41,51,95,99],fine:[6,28,49],finer:6,finish:[26,28,117,134],fip:[6,49],fire:20,firefox:52,firewal:[6,30,31,50],first:[5,6,11,13,14,21,28,30,33,41,43,49,52,132,139],firstnam:13,fit:[6,41,46],five:46,fiveminutecachehitr:46,fix:[6,10,12,24,30,41,43],flag:[6,13,24,25,28,40,46,51],flexibl:49,flight:[6,49],flip:11,floor:6,flow:[6,19,25],fluent:32,flush:[4,6,40,41,43,46,75,117,156],fname:14,focu:28,folder:[26,163],follow:[0,5,6,8,9,10,11,12,13,14,17,18,19,21,23,24,25,26,28,29,30,31,34,36,40,41,42,46,49,50,52,57,60,67,77,86,87,123,132,139,153,158,170,171],font:12,foo:[11,12,40],footprint:[117,119],forc:[4,6,11,13,52,60,63,117,131,132,133],forcefulli:[55,117],foreground:[31,34],forev:41,forget:5,fork:28,form:[6,10,11,12,14,19,62,116,166],formal:12,format:[6,10,17,21,24,25,27,28,46,52,61,80,101,122,166,168],former:[6,46],forward:[6,11],found:[5,12,14,15,28,29,31,35,49,52,163,171],four:13,fqcn:29,fraction:6,frame:6,framework:[25,29],franc:[13,21],free:[6,11,21,24,26,46],freed:4,freenod:8,frequenc:[6,40],frequent:[6,29,36,41,49],fresh:51,friendli:[6,21,29],from:[0,4,6,9,11,12,13,14,15,17,18,19,21,24,27,28,29,33,35,36,38,40,41,42,43,46,49,50,51,54,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,126,127,130,131,132,133,135,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],fromjson:15,froom:21,frozen:[9,10,11,13,14,21],fruit:[21,28],fsync:[6,46],full:[6,9,11,13,16,19,28,34,35,41,42,49,52,70,80,117,123,132,135],full_nam:166,fulli:[6,11,12,14,45,49],function_cal:12,function_nam:[13,14,19],fundament:17,further:[5,6,11,18,21,41,45,49],furthermor:[10,13,49],futur:[6,9,10,11,21,28,82,117,162],g1gc:43,game:[14,21],garbag:[11,43,45,46,87],garbage_collect:163,garbagecollect:117,gather:41,gaug:46,gaurante:0,gc_grace_second:11,gc_type:46,gce:[30,43],gcg:6,gcstat:117,gener:[0,2,4,6,8,11,12,13,14,17,21,25,26,27,28,30,43,49,52,103,139,153],genuin:23,get:[6,8,24,26,28,30,34,36,38,41,92,93,94,97,100,117],getbatchlogreplaythrottl:117,getcompactionthreshold:117,getcompactionthroughput:117,getconcurr:117,getconcurrentcompactor:117,getconcurrentviewbuild:117,getendpoint:117,getint:14,getinterdcstreamthroughput:117,getlocalhost:[6,30],getlogginglevel:117,getlong:14,getmaxhintwindow:117,getpartition:23,getreplica:117,getse:117,getsstabl:117,getstr:14,getstreamthroughput:117,gettempsstablepath:23,getter:[19,23],gettimeout:117,gettraceprob:117,gib:[62,116,166],gist:23,git:[5,24,26,28],github:[23,24,28,29],give:[18,19,21,28,29,36,52],given:[0,6,11,12,13,14,16,21,28,38,41,49,51,52,58,60,65,67,77,90,99,103,117,123,143,150,154,158,165],global:[6,52,117,141],gmt:21,goal:[6,41],gocassa:32,gocql:32,going:[6,28,41],gone:6,good:[6,23,28,29,30,52],googl:[23,52],gori:30,gossip:[2,6,30,46,50,71,81,105,117,161],gossipinfo:117,gossipingpropertyfilesnitch:[6,50],gossipstag:46,got:6,gp2:43,gpg:34,grace:45,grai:21,grain:49,grammar:[11,12],grant:[6,9,49],grant_permission_stat:12,grant_role_stat:12,granular:[6,87],graph:19,gravesit:11,great:[28,41],greater:[0,6,21,30,50,146,147],greatli:6,green:21,group:[6,10,11,19,41,46,49,50],group_by_claus:13,grow:21,guarante:[0,2,11,13,14,21,28,36,38,41,51,52],guid:[6,26],guidelin:[10,25,43],had:[9,10,41],half:[6,24,30],hand:[6,13,43],handl:[6,14,25,27,28,30,40,43,46,49,80],handoff:[6,46,51,72,106,117,148],handoffwindow:117,hang:28,happen:[6,13,23,24,28,36,41,46,50],happi:28,happili:43,hard:[6,14,41,43],harder:6,hardwar:[6,36,45],has:[0,4,6,10,11,12,13,14,18,19,21,23,28,30,41,43,46,49,50,52],hash:[4,6,41],hashcod:23,haskel:33,hasn:80,have:[0,5,6,9,10,11,12,13,14,15,18,19,21,23,24,25,26,28,29,30,31,34,38,41,42,43,46,49,50,80,139],haven:28,hayt:32,hdd:[6,43],head:28,header:[26,52],headroom:6,heap:[4,6,26,31,36,38,42,43,46],heap_buff:6,heavi:6,heavili:43,held:[6,43,117,121],help:[5,6,10,28,29,35,54,56,117,155],helper:29,henc:[5,6,11,21],here:[6,24,29,30,32,41,46,49],hex:[12,17,101],hexadecim:[10,12,101],hibern:51,hidden:51,hide:[23,25],hierarch:19,hierarchi:19,high:[0,6,30,41,43],higher:[0,19,28,38,41,46,51,119,167],highest:41,highli:[28,30,43,49],hint:[0,6,11,12,30,31,36,45,46,51,72,73,82,83,98,106,117,118,137,148,151,162,169],hintedhandoff:[6,45],hintedhandoffmanag:46,hints_creat:46,hints_directori:31,hints_not_stor:46,hintsdispatch:46,histogram:[41,46,117,120,165],histor:28,histori:[23,59,61,117],hit:[6,41,46],hitrat:46,hoc:29,hold:[0,6,10,13,19,30,41,52],home:[21,52],hope:41,hopefulli:28,host:[6,31,36,46,50,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hostnam:[6,30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hot:[6,46],hotspot:11,hotspotdiagnost:49,hottest:6,hour:[6,21,28,41],hourli:80,how:[0,5,6,7,8,11,12,21,25,26,27,28,29,33,35,36,41,42,46,50,52,80],howev:[6,9,10,11,12,13,15,17,18,21,28,29,30,31,34,38,42,43,49,52],hsha:6,html:6,http:[6,23,24,26,34,46],httpadaptor:46,hub:30,human:[11,62,116,166],hypothet:24,iauthent:6,iauthor:6,icompressor:42,idea:[6,14,27,28,29,30,41,52],ideal:[6,29,41,49],idempot:[13,21],idemptot:21,ident:0,identifi:[6,9,10,11,13,14,15,16,19,20,21],idiomat:8,idl:6,ieee:[17,21],iendpointsnitch:[6,50],ignor:[0,6,10,14,21,23,52,166],iinternodeauthent:6,illeg:14,illustr:19,imag:21,imagin:41,immedi:[6,11,21,28,38,42,57,117],immut:[4,30,42,43],impact:[6,11,25,41,45,49],implement:[6,10,13,14,18,19,23,29,30,40,42,49,50],implementor:6,impli:[11,12,21],implic:[0,49],implicitli:14,import_:52,imposs:41,improv:[0,6,11,21,28,29,38,41,43,50,51,52],inact:30,includ:[4,6,10,11,12,13,18,19,21,23,28,40,41,43,46,49,52,76,133,170],included_categori:76,included_keyspac:76,included_us:76,inclus:[28,132],incom:6,incomingbyt:46,incompat:[6,10],incomplet:25,inconsist:[0,30],incorrect:30,increas:[6,11,30,38,41,42,43,46,50,51,132],increment:[6,10,13,21,28,41,68,78,117,133,139,159],incur:[13,21,46],indent:23,independ:[11,41,43,49],index:[4,6,9,10,11,12,13,15,21,36,41,45,52,117,123],index_build:163,index_identifi:16,index_nam:16,index_summari:163,index_summary_off_heap_memory_us:166,indexclass:16,indexedentrys:46,indexinfocount:46,indexinfoget:46,indexnam:123,indexsummaryoffheapmemoryus:46,indic:[5,6,12,13,23,28,30,132],indirectli:13,individu:[6,10,14,21,28,29,43,49],induc:13,inequ:[10,13],inet:[9,11,14,17,21],inetaddress:[6,30],inexpens:43,infin:[9,10,12],influenc:11,info:[6,31,46,65,117],inform:[4,6,12,13,21,35,49,50,51,52,56,59,85,105,107,108,109,116,117,138,155,157],ingest:6,ingestr:52,inher:[11,21],inherit:19,init:46,initcond:[9,14],initi:[6,14,23,25,40,46,49,52,117,150],initial_token:51,input:[9,10,14,17,21,25,52],inputd:21,inreleas:34,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,19,21,30,33,36,43,52],insert_stat:[12,13],insid:[6,11,12,13,21,23,52],inspect:[6,26,52],instabl:6,instal:[6,20,30,33,36,52],instanc:[6,10,11,12,13,14,16,18,19,20,21,26,29,30,40,41,43,46],instantan:46,instanti:10,instantli:6,instead:[10,11,13,18,21,23,30,41,138,157],instruct:[6,8,11,24,26,36],instrument:49,intasblob:13,integ:[0,10,11,12,13,17,21,46],integr:[27,29,36],intellij:[23,27],intend:[25,49],intens:[6,29,30],intent:25,inter:[6,96,117,149],interact:[29,35,52],interest:[0,41,49],interfac:[6,10,14,23,30,31,42,49],intern:[6,9,11,13,18,21,25,30,43,46],internaldroppedlat:46,internalresponsestag:46,internet:6,internod:[6,30],internode_encrypt:[6,49],internodeconnect:[103,153],internodeus:[103,153],interpret:[10,21,52],interrupt:30,interv:[6,9,46],intra:[6,46,50],intrins:21,introduc:[6,10,17,28,51],introduct:[10,19,29],intvalu:14,invalid:[6,13,19,25,49,108,110,111,112,117],invalidatecountercach:117,invalidatekeycach:117,invalidaterowcach:117,invertedindex:20,investig:6,invoc:14,invok:[24,34,49,171],involv:[6,13,41,42,49],ioerror:23,ip1:6,ip2:6,ip3:6,ip_address:55,ipv4:[6,17,21,30],ipv6:[6,17,21],irc:[5,28,36],irolemanag:6,irrevers:[11,21],isn:[0,18,23,28,30],iso:21,isol:[6,11,13],issu:[0,6,19,24,28,29,30,38,41,42,132],item:[12,21,25,26],iter:[0,6],its:[4,6,11,12,13,14,21,26,30,41,46,49,50,51],itself:[6,11,16,30,34],iv_length:6,jaa:49,jacki:24,jamm:26,januari:21,jar:[14,23,26,46],java7:49,java:[6,14,20,21,23,26,28,33,34,36,40,41,43,46,49,117,155],javaag:26,javadoc:[23,25],javas:6,javascript:[6,14],javax:49,jbod:43,jce8:6,jce:6,jcek:6,jconsol:[36,41,49],jdk:6,jdwp:26,jenkin:[26,29],jetbrain:26,jira:[5,6,25,28,29,40],jkskeyprovid:6,jmc:[41,49],jmx:[6,19,36,45,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],jmx_password:49,jmx_user:49,jmxremot:49,job:[28,57,87,130,132,139,170],job_thread:132,john:[13,21],join:[6,8,13,36,41,49,51,117],joss:13,jpg:21,jsmith:21,json:[9,10,13,15,36,41,42,61,166,168],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,judgement:23,junit:[23,26,29],jurisdict:6,just:[6,14,19,26,28,29,30,41,49],jvm:[6,20,26,30,31,45,49,51],jvm_extra_opt:26,jvm_opt:[31,49],jvmstabilityinspector:25,keep:[6,8,11,23,28,30,41,46,108],keepal:[6,30],kei:[4,6,9,10,13,14,17,21,29,30,34,40,41,42,43,46,49,57,95,99,101,111,117,121,141,142,166],kept:[6,41,46],kernel:[6,30],key_alia:6,key_password:6,key_provid:6,keycach:46,keycachehitr:46,keyserv:34,keyspac:[0,6,9,10,12,14,15,16,19,21,36,38,41,42,45,49,51,52,57,58,60,65,67,76,77,86,87,90,95,99,101,108,117,119,121,122,123,124,130,132,138,139,143,156,157,158,165,166,167,170,171,173],keyspace1:[6,19],keyspace2:6,keyspace_nam:[11,14,19,21,41],keystor:[6,49],keystore_password:6,keystorepassword:49,keyword:[10,11,13,14,15,16,17,21],kib:[62,116,166],kick:[117,134],kill:[6,34],kilobyt:42,kind:[11,12,21,28,40,41],kitten:21,knife:[117,155],know:[6,13,21,23,41],known:[19,21,32,35,38,41],ks_owner:49,ks_user:49,ktlist:156,kundera:32,label:[21,28],lag:46,land:42,landlin:21,lang:[36,46,49],languag:[6,9,10,12,14,20,21,32,35,36,52],larg:[6,11,13,14,21,29,36,41,43,46,52],larger:[6,29,30,41,42,43],largest:[6,46],last:[6,12,13,14,15,28,41,46,55,117],lastli:[13,21],lastnam:13,latenc:[0,6,30,46,50],later:[0,11,21,23,28,30],latest:[0,28,34,41,52,171],latter:12,layer:43,layout:11,lazi:11,lazili:11,lead:[6,10,21,41],learn:[6,29,30,52],least:[0,6,11,12,13,18,30,41,43],leav:[6,12,13,23,29,30,52],left:[6,17,41],legaci:[6,19],legal:10,length:[4,6,10,17,21,25,41],less:[6,21,28,30,38,43],let:[6,41],letter:17,level:[6,10,11,13,19,23,25,31,43,45,46,49,52,97,108,117,150],leveledcompactionstrategi:[11,38,41],lexic:30,lib:[6,20,25,26,34],libqtcassandra:32,librari:[8,25,29,32,46,52],licenc:25,licens:[25,26,28],life:28,lifespan:43,like:[0,6,12,13,14,17,21,23,24,25,28,29,30,36,41,42,43,49],likewis:19,limit:[6,9,10,11,18,19,21,30,40,41,42,49],line:[12,23,28,29,31,34,35,49,53,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],linear:43,linearli:38,link:[6,8,11,12,28,29,34],linux:[6,30],list:[4,5,6,9,10,11,12,13,14,17,26,28,29,31,34,35,36,41,49,51,52,55,57,58,59,60,65,67,73,76,77,83,86,87,90,92,95,99,100,101,103,107,108,114,115,117,119,122,123,124,127,130,131,132,133,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],list_liter:[13,21],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,36,46],listen_address:[31,35,36],listen_interfac:31,listsnapshot:117,liter:[10,12,14,17,52],littl:23,live:[13,36,41,46,51],livediskspaceus:46,livescannedhistogram:46,livesstablecount:46,load:[0,6,11,20,21,36,45,46,49,50,51,109,117,124,132,157],local:[0,6,11,26,28,29,35,43,46,49,50,52,117,126,132,136,169],local_jmx:49,local_on:[0,49,52],local_quorum:[0,52],local_read_count:166,local_read_latency_m:166,local_seri:52,local_write_latency_m:166,localhost:[6,35,49],locat:[6,33,34,42,46,49,50,52,163],lock:[6,30,46],log:[6,11,13,25,29,33,34,36,40,45,46,49,66,70,76,80,97,117,132,135,150,163],log_al:41,logback:31,logger:[23,31,76],logic:[6,20],login:[6,9,19,29,49],lol:21,longer:[6,9,10,30,41,51,57,117],look:[6,12,24,28,29,41,43],lookup:46,loop:23,lose:[6,41,51],loss:[6,21],lost:[41,51],lot:[6,35,36],low:[6,28,117,119],lower:[0,6,11,12,13,19,30,38,41,46,51],lowercas:12,lowest:[28,41],lz4:6,lz4compressor:[6,11,42],macaddr:9,machin:[6,11,29,30,46,49,50,51],made:[6,21,36,38,43,49],magnet:6,magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,19,21,25,26,28,29,30,34,38,41,49,50,51,52,139],mail:[5,28,36],main:[0,14,18,26,30,33,34,49,51,52],main_actor:13,mainli:[6,11],maintain:[6,28],mainten:46,major:[0,10,28,49,60,117],make:[0,6,8,9,20,21,23,26,28,29,30,31,34,41,49,51,52,114,117],man:6,manag:[6,19,26,29,46,49,51,56,117],mandatori:[11,14],mani:[0,6,11,23,25,28,41,42,43,46,49,52,57,60,67,77,80,86,87,132,139,158,170,171],manipul:[12,15,29,36],manual:[6,24,30],map:[6,9,10,11,13,14,17,19,36,46],map_liter:[11,16,19,21],mar:21,mark:[6,19,41,51,71,117],marker:[6,11,12,25,30],match:[6,12,13,14,17,19,46,50],materi:[6,10,11,12,15,36,46,52,117,173],materialized_view_stat:12,matter:[11,30],max:[6,36,41,46,49,52,80,90,98,117,132,143,151],max_hint_window_in_m:51,max_log_s:80,max_map_count:30,max_mutation_size_in_kb:[6,30],max_queue_weight:80,max_thread:6,max_threshold:41,maxattempt:52,maxbatchs:52,maxfiledescriptorcount:46,maxhintwindow:151,maxim:43,maximum:[4,6,14,38,46,52,80,92,117,139,145],maximum_live_cells_per_slice_last_five_minut:166,maximum_tombstones_per_slice_last_five_minut:166,maxinserterror:52,maxoutputs:52,maxparseerror:52,maxpartitions:46,maxpools:46,maxrequest:52,maxrow:52,maxthreshold:143,maxtimeuuid:10,mayb:13,mbean:[6,19,41,46,49],mbeanserv:19,mbp:6,mct:6,mean:[6,9,11,12,13,14,17,18,21,36,41,46,50,52,132],meaning:13,meanpartitions:46,meant:[21,30,46],measur:[6,25,29,46,51,52],mechan:40,median:46,meet:[6,25],megabyt:6,member:23,membership:6,memlock:30,memori:[4,6,11,36,38,41,45],memory_pool:46,memtabl:[2,6,38,40,41,42,43,46,156],memtable_allocation_typ:4,memtable_cell_count:166,memtable_cleanup_threshold:4,memtable_data_s:166,memtable_off_heap_memory_us:166,memtable_switch_count:166,memtablecolumnscount:46,memtableflushwrit:46,memtablelivedatas:46,memtableoffheaps:46,memtableonheaps:46,memtablepool:6,memtablepostflush:46,memtablereclaimmemori:46,memtableswitchcount:46,mention:[6,21,28,46,49],menu:26,mere:23,merg:[24,28,38,42,43,45],mergetool:24,merkl:[6,46],mess:[28,29],messag:[6,21,25,28,34,36,46],met:13,meta:[13,46],metadata:[4,19,42,43,46],metal:6,meter:46,method:[10,13,14,19,23,25,26,29,36,49],metric:[6,45],metricnam:46,metricsreporterconfigfil:46,mib:[6,62,116,166],microsecond:[6,11,13,21,46],midnight:21,might:[6,13,41,46,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],migrat:[6,46,50],migrationstag:46,millisecond:[6,10,21,46,119,139,167],min:[6,30,40,41,46,52,90,117,143],min_sstable_s:41,min_threshold:41,minbatchs:52,mind:6,minim:[6,41,43],minimum:[6,11,14,31,46],minor:[10,12,45],minpartitions:46,minthreshold:143,mintimeuuid:10,minut:[6,21,41,46,80],misbehav:41,misc:[103,153],miscelen:46,miscellan:6,miscstag:46,miss:[11,41,46,51],misslat:46,mistaken:[55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],mitig:[6,49],mix:[6,41],mmap:30,mnt:16,mock:29,mode:[6,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],model:[11,15,19,28,36,49],moder:43,modern:43,modif:[13,19],modifi:[6,9,10,11,14,19,21,28,38,41,42],modification_stat:13,modul:52,modular:25,moment:[6,28],monitor:[30,36,45,49,50,56,117],monkeyspeci:[11,18],monkeyspecies_by_popul:18,month:21,more:[0,4,6,10,11,12,13,21,23,28,29,31,35,36,38,43,45,46,49,50,51,60,86,87,117,119,132,139,155,167,171],moreov:13,most:[6,11,12,13,21,26,28,29,30,31,41,42,43,49,52,59,117,167],mostli:[6,11,21],motiv:[29,41],mount:6,move:[6,28,30,36,40,45,46,117],movement:45,movi:[13,21],movingaverag:6,mtime:11,much:[0,5,6,11,38,41,50],multi:[0,6,12,25],multilin:27,multipl:[4,6,10,11,12,13,14,21,23,25,26,28,30,31,41,43,50,122],multipli:41,murmur3partit:4,murmur3partition:[6,14,52],must:[0,6,10,11,13,14,17,18,19,23,28,29,30,31,41,46,49,51,52,156],mutant:16,mutat:[0,6,13,30,40,46,171],mutationstag:46,mv1:18,mx4j:46,mx4j_address:46,mx4j_port:46,mx4jtool:46,mxbean:19,myaggreg:14,mycolumn:17,mydir:52,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,20],mytrigg:20,nairo:21,name:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,28,29,30,31,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],names_valu:13,nan:[9,10,12],nanosecond:21,nathan:13,nativ:[6,10,12,15,17,25,30,35,46,52,69,79,117,123,160],native_transport_min_thread:6,native_transport_port:31,native_transport_port_ssl:49,native_typ:21,natur:[11,21,23,41,42],nearli:26,neccessari:6,necessari:[6,11,14,19,28,34,42,49],necessarili:[6,12,31],need:[0,6,10,11,12,13,19,21,23,25,26,28,29,30,31,34,35,38,41,42,43,49,50,52,95,99],neg:6,neglig:13,neighbour:41,neither:[18,21,49],neon:26,nerdmovi:[13,16],nest:[12,13,23],net:[6,26,30,33,34,49],netstat:[51,117],network:[6,13,30,43,49,50,116,117,120],networktopologystrategi:[11,49],never:[6,10,11,12,13,14,21,23,30,41],nevertheless:13,new_rol:19,new_superus:49,newargtuplevalu:14,newargudtvalu:14,newer:[41,43,52,87],newest:[11,41],newli:[11,21,28,40,117,124],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[6,30,35,41,52],ngem3b:13,ngem3c:13,nifti:24,nio:[6,14,46],no_pubkei:34,node:[0,4,6,11,13,14,20,21,25,29,31,32,35,36,38,40,41,43,45,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nodej:33,nodetool:[34,36,38,42,45,49,51,53,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nologin:9,non:[6,9,10,11,12,13,14,19,21,30,38,42,46,49,52],none:[6,11,13,21,49],nonsens:19,nor:[11,18,21],norecurs:[9,19],norm:46,normal:[14,17,26,30,34,46,51,52],noschedul:6,nosuperus:[9,19],notabl:[14,17],notat:[10,12,13,52],note:[0,5,6,10,11,12,13,14,15,17,19,21,24,28,30,41,49],noth:[6,11,14,24,29,30],notic:6,notif:8,notion:[11,12],now:[10,23,26,41,51],ntp:6,nullval:52,num_cor:52,num_token:51,number:[0,6,10,11,12,13,14,17,18,21,26,28,29,30,34,38,41,42,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:166,numer:[15,38],numprocess:52,object:[6,11,12,25],objectnam:19,observ:23,obsolet:[6,43,46],obtain:[12,49],obviou:[14,24],obvious:11,occup:13,occupi:[6,46],occur:[10,12,13,20,21,30,41,43,46],occurr:21,octet:[6,50],odd:28,off:[4,6,30,42,46,49,52,117,134],off_heap_memory_used_tot:166,offer:[15,29,42],offheap:[38,43],offheap_buff:6,offheap_object:6,offici:[36,52],offset:[4,46],often:[6,11,12,23,28,29,30,41,42,43,49,50,52,80],ohc:6,ohcprovid:6,okai:23,old:[4,6,41,51,74,84,117],older:[6,14,26,34,41,43,52],oldest:[6,11],omit:[6,10,11,13,17,21,150],onc:[4,6,11,12,14,21,24,26,28,29,30,40,41,42,43,46,49,51,52],one:[0,4,6,9,10,11,12,13,14,17,18,19,21,23,26,28,29,31,36,38,41,43,46,49,50,51,52,57,60,67,77,86,87,103,117,132,139,153,156,158,170,171],oneminutecachehitr:46,ones:[6,11,12,13,14,18,19,46],ongo:[41,51],onli:[0,6,9,11,12,13,14,17,18,19,21,23,28,29,31,36,38,41,42,43,46,49,50,52,132,156,166],onlin:52,only_purge_repaired_tombston:41,onto:[4,41],open:[5,6,26,49,50],openfiledescriptorcount:46,openjdk:34,oper:[0,6,10,11,13,16,18,19,21,23,36,38,40,43,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],operatingsystem:46,opertaion:6,opportun:38,ops:30,opt:14,optim:[6,11,12,13,30,41,43,51],optimis:132,option1_valu:19,option:[4,6,9,10,12,13,14,16,19,21,26,29,30,34,42,43,45,49,51,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],oracl:[6,34,49],order:[0,4,6,9,10,14,18,21,23,28,30,38,40,41,50,51,52],ordering_claus:13,orderpreservingpartition:6,org:[6,14,20,23,26,29,30,34,41,42,46,49],organ:[4,26,32],origin:[9,24,28,139],orign:13,other:[0,4,6,10,12,13,14,18,19,21,24,26,28,31,36,38,41,43,46,49,50,51,117,122,133],other_rol:19,otherwis:[0,9,12,13,16,21,92],our:[5,6,8,24,26,28,41],ourselv:24,out:[6,12,23,26,28,41,46,49,50,51,132],outbound:6,outboundtcpconnect:6,outgo:6,outgoingbyt:46,outlin:49,outofmemoryerror:36,output:[14,19,25,26,38,41,52,60,61,166,168],outsid:[11,20,21],over:[0,6,11,21,30,41,46,49,50,51],overal:14,overflow:[17,139],overhead:[6,30,42,46,51],overidden:49,overlap:[0,41],overload:[6,14,30],overrid:[6,23,49,51,139],overridden:[6,11],overview:[2,36,45],overwhelm:6,overwrit:[42,43],overwritten:[46,87],own:[0,6,11,12,14,21,28,30,34,41,42,46,49,95,101,108,117,171],owner:21,ownership:[41,138],p0000:21,pacif:21,packag:[26,30,31,33,35,52],packet:6,page:[6,21,26,28,29,30,43,46],paged_slic:46,pages:52,pagetimeout:52,pai:23,pair:[6,11,19,21,41,49],parallel:[29,41,132],paramet:[6,14,23,25,26,31,38,43,50,51,117,150],paranoid:6,parenthesi:[11,52],parnew:43,pars:[6,12,40,52],parser:[9,10,40],part:[0,5,6,11,13,14,18,21,25,26,28,29,30,50,51,52],parti:[25,46],partial:4,particip:[0,20],particular:[11,12,13,14,17,19,21,30,43,46,49],particularli:[12,21,49],partit:[4,6,10,13,14,30,38,41,43,46,87,95,99,117,139,167],partition:[4,10,13,14,52,64,117,132],partition_kei:[11,13],partli:13,pass:[25,28,31,52,155],password:[6,9,13,19,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],password_a:19,password_b:19,passwordauthent:[6,49],passwordfilepath:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],past:[6,46],patch:[10,13,23,24,25,27,29,36],path:[5,6,16,25,34,38,41,42,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],patter:19,pattern:[6,19,21],paus:[6,30,117,118],pausehandoff:117,paxo:[13,46,52],peer:[6,46],peerip:46,penalti:[6,13],pend:[41,46,117,131],pending_flush:166,pendingcompact:46,pendingflush:46,pendingrangecalcul:46,pendingtask:46,pendingtasksbytablenam:46,pennsylvania:21,peopl:[28,30],per:[0,4,6,10,11,13,23,24,28,30,38,40,41,42,46,49,52,117,140,148],percent:46,percent_repair:166,percentag:[6,46,50],percentil:46,percentrepair:46,perdiskmemtableflushwriter_0:46,perfect:14,perform:[6,11,13,19,21,24,25,27,30,31,38,41,43,46,49,50,52,132],period:[6,43,46,49,117,119],perman:[11,30,41,43],permiss:[6,9,12,29,49],permit:[6,19,40,49],persist:[4,30,38,43,49],perspect:30,pet:21,pgrep:34,phantom:32,phase:[51,52],phi:6,phone:[13,21],php:33,physic:[0,6,11,30,43,50],pick:[24,28,30,41,49,51,122],pid:[30,34],piec:[12,41,46],pile:6,pin:[6,50],ping:28,pkcs5pad:6,pkill:34,place:[5,6,16,20,23,24,28,40,41,46,49,52,117,124],placehold:[14,52],plai:[14,21],plain:4,plan:[11,24,28],platform:19,platter:[6,43],player:[14,21],playorm:32,pleas:[5,6,11,13,14,15,21,23,26,29,30],plu:[14,41,46],plug:6,pluggabl:[19,49],plugin:46,poe:21,point:[6,10,17,21,23,26,36,49,52,95,117],pointer:14,polici:[6,28,49,171],pool:[6,34,46,117,145,168],popul:[11,18],popular:[26,43],port:[6,26,31,36,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],portion:[43,52],posit:[4,6,10,11,21,38,46,51],possbili:6,possess:19,possibl:[6,10,11,13,14,17,19,21,25,28,29,30,38,41,43,46,49,51],post:[13,117,142],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,25,41,43,49,51,139],power:6,pr3z1den7:21,practic:[11,12,13,49],pre:[6,17,21,43,49],preced:30,precis:[10,17,21,41],precondit:46,predefin:11,predict:13,prefer:[0,6,11,12,21,23,28,49,50],preferipv4stack:26,prefix:[11,12,21],prepar:[6,14,15,46],preparedstatementscount:46,preparedstatementsevict:46,preparedstatementsexecut:46,preparedstatementsratio:46,prepend:21,prerequisit:33,present:[12,13,18,46],preserv:[6,17,19],press:34,pressur:[6,46],pretti:52,prevent:[6,29,40],preview:132,previou:[6,10,11,21,41,51],previous:6,previsouli:[83,117],primari:[9,10,13,14,21,29,40,41,42,49,51],primarili:[6,11],primary_kei:[11,18],print:[52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],prior:[6,13,19,21],prioriti:28,privat:[6,23,49,50],privileg:[19,34,49],probabilist:[38,42],probabl:[6,11,29,38,41,104,117,154],problem:[5,6,14,24,25,30,49],problemat:21,proc:[6,30],proce:[25,42,51],procedur:[13,49],process:[0,6,14,24,25,26,28,29,30,34,40,42,43,46,49,51,52,56,92,117,118,137,145],prod_clust:52,produc:[13,14,41,80],product:[6,28,30,43,50],profil:[13,117,119],profileload:117,program:[14,29],progress:[23,24,28,38,45,117,173],project:[23,29,46],promin:11,prompt:52,propag:[6,11,14,23,25,50],proper:[11,21,30,49],properli:[6,25],properti:[6,11,19,33,40,41,49,50,51],propertyfilesnitch:[6,50],proport:[6,13],proportion:[6,89,117,140],propos:[6,46],protect:[6,43],protocol:[6,25,30,35,46,49,52,59,69,74,79,84,117,160],provid:[0,5,6,11,12,13,14,15,17,21,26,28,35,40,41,42,43,46,49,50,51,53,116,117,127,131],proxim:[6,50],proxyhistogram:117,prv:132,ps1:49,ps22dhd:13,pt89h8m53:21,pull:[29,41,46,132],purg:43,purpos:[11,12,13,21,43,49],push:[24,28,46],put:[15,28,31,41,51,108,132],pwf:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],python:[14,28,29,33,34,52],quak:[14,21],qualifi:[6,11,14,28],qualiti:49,quantiti:21,queri:[6,10,11,12,13,14,16,18,19,33,36,41,46,52,70,80,117,135],question:[8,19,36],queu:[6,46],queue:[6,46,80],quick:[108,171],quickli:[30,41],quill:32,quintana:21,quit:[41,52],quorum:[0,49,52],quot:[9,10,11,12,14,17,19,52],quotat:19,quoted_identifi:12,quoted_nam:11,race:[21,24],rack1:6,rack:[0,6,49,50],rackdc:[6,50],rackinferringsnitch:[6,50],raid0:43,raid1:43,raid5:43,rain:12,rais:[12,30],raison:9,ram:[38,42,43],random:[11,14,30,51],randomli:[0,6,51],randompartition:[6,13,14],rang:[2,6,10,11,13,21,25,41,45,46,52,60,65,103,117,122,132,153],range_slic:46,rangekeysampl:117,rangelat:46,rangemov:51,rangeslic:46,rapid:43,rare:[10,38],raspberri:43,rate:[6,11,46,49,52],ratebasedbackpressur:6,ratefil:52,rather:[13,30,41,43],ratio:[6,42,43,46],raw:[6,14],reach:[6,28,30,40,41],read:[0,6,11,13,21,23,25,29,30,33,36,38,41,42,43,45,46,49,50,52,103,153,166,171],read_lat:166,read_repair:46,read_repair_ch:[0,6,11,41,50],read_request_timeout:30,readabl:[11,62,116,166],readi:[28,49],readlat:46,readrepair:46,readrepairstag:46,readstag:46,readwrit:49,real:[8,11,23,30],realiz:41,realli:[6,29,31],reason:[0,6,13,14,15,30,31,34,41,43,49,51],rebuild:[38,41,42,46,117,123,139],rebuild_index:117,receiv:[6,14,28,30,41,43],recent:[6,28,29,43,59],reclaim:41,recogn:[13,26,28],recommend:[6,11,21,30,43,49,51],recompact:41,recompress:42,reconnect:49,record:[11,13,21,28,41],recov:[6,30,41],recoveri:6,recreat:52,recurs:80,recv:34,recycl:[6,46],redistribut:6,redo:28,reduc:[6,30,41,42,63,89,117,132,140],reduct:6,redund:[0,6,23,25,28,43],reenabl:[79,81,82,117],refactor:40,refer:[6,11,12,13,14,15,21,23,29,30,34,35,52],referenc:6,reflect:41,refresh:[6,49,52,117,125],refreshsizeestim:117,refus:36,regard:[11,13],regardless:[0,6,19,28],regener:38,regexp:12,region:[6,50],regist:21,registri:49,regress:[25,29],regular:[9,12,26,29,30,46,52],regularstatementsexecut:46,reinsert:139,reject:[6,13,30,40,49],rel:[6,21,52],relat:[8,10,12,13,26,28,41,46],releas:[6,10,34,52],relev:[13,19,21,28,42,49],reli:[6,14,21,30,51],reliabl:41,reload:[6,117,126,127,128,129],reloadlocalschema:117,reloadse:117,reloadssl:117,reloadtrigg:117,reloc:[117,130,163],relocatesst:117,remain:[6,13,14,21,24,41,46,51,166],remaind:[17,42],remedi:41,remot:[0,24,26,36,41,49,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],remov:[4,6,10,11,12,13,14,15,17,21,25,30,36,40,45,49,55,58,87,117,131],removenod:[51,55,117],renam:[9,21],reorder:6,repair:[0,4,6,11,30,36,42,45,46,50,51,108,117,133,150,171],repair_admin:117,repeat:[12,34,42,49],replac:[6,9,14,19,21,25,30,36,41,45,80],replace_address_first_boot:51,replai:[0,21,43,46,89,117,134,140],replaybatchlog:117,replic:[2,6,11,36,41,43,49,51,55,117],replica:[0,6,11,13,30,41,46,50,51,63,99,117],replication_factor:[0,11,49],repo:[24,26],report:[28,36,45],report_writ:19,reportfrequ:52,repositori:[5,8,26,28,29,34],repres:[6,10,17,19,21,30,41,46,49,50,52],represent:[10,17],request:[0,6,13,19,20,29,30,38,41,43,45,49,50,52,117,154,170],request_respons:46,requestresponsestag:46,requestschedul:6,requesttyp:46,requir:[0,6,11,13,14,19,23,24,25,26,28,30,38,42,43,49],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15],reset:[6,13,117,136,150],reset_bootstrap_progress:51,resetfullquerylog:117,resetlocalschema:117,resid:[6,13,30,46],resolut:[6,13,30],resolv:[24,30,138,157],resort:[55,117],resourc:[19,49],resp:14,respect:[6,10,14,34,50,80],respond:[0,6,12],respons:[0,6,19,30,46,51],ressourc:21,rest:[6,11,12,21,25,51],restart:[30,41,49,51,117,124,142],restor:[41,51,52],restrict:[10,11,13,18,19],result:[0,6,8,10,11,12,14,17,19,21,28,30,41,46,52],resum:[56,117,137],resumehandoff:117,resurrect:41,resync:[117,136],retain:[30,41],rethrow:23,retri:[0,6,21,46,80],retriev:[11,13,19],reus:25,revers:13,review:[11,23,27,28,29,36],revok:[9,49],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[38,41,42,117,139,170],rewritten:[43,139],rfc:[14,21],rhel:36,rich:21,rider:21,riderresult:21,right:[6,26,30,52],ring:[2,6,36,49,51,52,113,115,117,150],risk:11,rmem_max:6,rmi:[30,49],robin:6,rogu:14,role:[6,9,10,12,15,45],role_a:19,role_admin:19,role_b:19,role_c:19,role_manag:49,role_nam:19,role_opt:19,role_or_permission_stat:12,role_permiss:6,roll:[30,49,80],roll_cycl:80,romain:21,root:[6,24,28,34],rotat:6,roughli:6,round:[6,13,41,46],roundrobin:6,roundrobinschedul:6,rout:[6,50],row:[0,4,6,10,11,13,14,15,17,18,29,35,38,42,43,46,52,87,108,112,117,139,141,142],rowcach:46,rowcachehit:46,rowcachehitoutofrang:46,rowcachemiss:46,rowindexentri:46,rows_per_partit:11,rpc:[6,46],rpc_min:6,rpc_timeout_in_m:[103,153],rsc:171,rubi:[14,33],rule:[6,12,14,28,30],run:[5,6,12,21,24,26,28,30,31,34,41,43,46,49,51,108,117,132,155],runtim:[6,33,97,117],runtimeexcept:23,rust:33,safe:[6,14,21,41,49],safeguard:43,safeti:[41,51],sai:36,said:[11,28,30,117,170],same:[0,5,6,11,12,13,14,15,17,18,19,21,24,26,28,31,36,38,41,46,49,50,132],sampl:[4,6,12,14,46,52,80,117,119,121,167],sampler:[46,119,167],san:43,sandbox:[6,14],sasi:6,satisfi:[0,23,43,46,51],satur:[6,46],save:[6,13,21,30,31,38,42,43,51,117,142],saved_cach:6,saved_caches_directori:31,sbin:30,scala:[14,33],scalar:15,scale:[6,29,42],scan:[6,13,38,46],scenario:24,scene:30,schedul:6,schema:[0,9,11,14,17,46,52,64,117,126,136],schema_own:19,scope:[19,46,49],score:[6,14,21,50],script:[6,14,26,29,80],scrub:[38,41,42,46,117,163],search:28,second:[6,11,12,13,21,30,40,43,49,52,117,140,148],secondari:[10,12,13,15,36,41,46,117,123],secondary_index_stat:12,secondaryindexmanag:46,section:[2,5,7,10,11,12,13,15,19,21,30,33,34,35,41,46,49,51,53],secur:[6,14,15,36,45],see:[0,4,6,10,11,12,13,14,17,19,21,26,28,35,36,40,41,46,49,51,52,87,117,132],seed:[6,31,36,50,100,117,127],seedprovid:6,seek:[6,43,46],seen:[6,11],segment:[4,6,40,46,52,80],select:[6,9,10,11,12,14,15,19,26,29,30,35,38,41,49,52,122],select_claus:13,select_stat:[12,18],self:25,selinux:30,semant:[10,13,14],semi:30,send:[6,8,30],sens:[6,10,13,15,30],sensic:14,sensit:[11,12,14,17],sensor:21,sent:[0,6,21,30,46],separ:[4,6,11,13,23,28,31,41,43,49,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],seq:[6,132],sequenc:12,sequenti:[6,43,132],seren:13,seri:[11,41,52],serial:6,serializingcacheprovid:6,serv:[13,43,49],server:[6,12,13,21,26,29,30,43,46,49],server_encryption_opt:49,servic:[6,26,34,49,51],session:[6,19,49,117,133],set:[0,6,9,10,11,12,13,14,17,18,25,27,28,29,31,36,38,40,41,42,43,46,49,50,51,52,57,76,87,117,130,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,170],set_liter:21,setbatchlogreplaythrottl:117,setcachecapac:117,setcachekeystosav:117,setcompactionthreshold:[41,117],setcompactionthroughput:[41,117],setconcurr:117,setconcurrentcompactor:117,setconcurrentviewbuild:117,sethintedhandoffthrottlekb:117,setint:14,setinterdcstreamthroughput:117,setlogginglevel:117,setlong:14,setmaxhintwindow:117,setstr:14,setstreamthroughput:117,setter:[19,23],settimeout:117,settraceprob:117,setup:[28,29,49],sever:[4,13,19,41,49],sfunc:[9,14],sha:24,shadow:41,share:[11,13,26],sharedpool:52,sharp:32,shed:30,shell:[35,36,53],shift:21,ship:[29,35,49,52],shortcut:18,shorter:49,shorthand:52,should:[0,5,6,10,11,12,13,14,17,19,21,25,26,28,29,30,31,32,33,35,38,41,42,43,46,49,50,51,52,122,132,153],shouldn:11,show:[19,36,51,65,85,105,117,121,131,138,157,158,166,173],shown:[12,52,166],shrink:6,shut:6,shutdown:[6,43],side:[11,13,17,21,49],sign:[13,21,30],signal:[117,128],signatur:[34,40],signific:[6,26,28,29,43],significantli:6,silent:14,similar:[6,13,14,42,43],similarli:[0,10,17,23,43,117,122],simpl:[6,11,26,29,49],simple_classnam:29,simple_select:13,simplequerytest:29,simplereplicationstrategi:49,simpleseedprovid:6,simplesnitch:[6,50],simplestrategi:11,simpli:[0,6,11,13,14,17,21,26,29,41,43,46,51,171],simul:29,simultan:[6,43,52,57,87,130,139,170],sinc:[6,11,13,14,21,26,30,34,41,46,51],singl:[0,6,10,11,12,13,14,17,18,19,21,23,28,31,35,36,45,46,49,50,52,60],singleton:25,situat:[6,29,41],size:[4,6,11,21,23,30,31,38,40,42,43,45,46,49,52,80,114,117],size_estim:[117,125],sizetieredcompactionstrategi:[11,41],sjk:117,skip:[6,13,46,51,52,139,156],skipcol:52,skiprow:52,sks:34,sla:25,slash:12,slf4j:23,slightli:6,slow:[6,50],slower:[6,11,38],slowest:6,slowli:[6,21],small:[6,11,13,21,30,41,43],smaller:[6,30,41,43,52],smallest:[0,11,14,46],smallint:[9,10,14,17,21],smith:21,smoother:10,smoothli:6,snappi:6,snappycompressor:[11,42],snapshot:[6,26,46,58,114,117,139],snapshot_nam:58,snapshotnam:[58,117],snitch:[6,36,45,64,117],socket:[6,49,153],sole:11,solid:[6,43],some:[0,6,9,11,12,13,14,21,26,28,29,30,31,40,41,42,46,49,51,52],some_funct:14,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[24,41],sometim:[6,12,13],someudt:14,somewher:34,soon:49,sooner:6,sort:[4,11,13,21,41,43,166],sort_kei:166,sourc:[5,6,8,14,27,34,46,122],source_elaps:52,space:[6,23,30,40,41,43,46],space_used_by_snapshots_tot:166,space_used_l:166,space_used_tot:166,span:[6,13,41],sparingli:13,spark:32,spec:[25,35,46,52],speci:[11,18],special:[12,13,29,30,41,46],specif:[6,9,11,12,13,19,21,26,28,30,32,40,41,46,49,52,117,122,132],specifc:46,specifi:[0,6,10,11,12,13,14,16,18,19,21,26,30,35,40,41,42,46,49,51,52,58,60,101,117,122,132,138,151,153,156,163,166,169],specific_dc:132,specific_host:132,specific_keyspac:122,specific_sourc:122,specific_token:122,specul:[0,46],speculativeretri:46,speed:[6,36],spent:46,spike:30,spin:[6,43],spindl:6,spirit:[6,50],split:[23,30,41,46,52,60],spread:[6,50],sql:[13,15],squar:12,squash:28,src:122,ssd:[6,16,43],ssl:[6,30,45,52,117,128],ssl_storage_port:50,sss:17,sstabl:[2,6,11,30,38,42,43,45,57,60,87,101,108,114,117,124,130,139,170,171],sstable_compression_ratio:166,sstable_count:166,sstable_s:41,sstable_size_in_mb:41,sstableexpiredblock:41,sstablesperreadhistogram:46,sstablewrit:23,stabil:28,stabl:[34,52],stack:6,stage:[28,92,117,145],stai:[36,41],stale:49,stall:[6,51],stand:[6,29],standalon:29,standard:[6,21,30,34,46],start:[0,6,9,13,27,30,31,34,36,41,43,46,49,51,60,132,163],start_token:[60,132],start_token_1:122,start_token_2:122,start_token_n:122,starter:28,startup:[6,20,26,30,41,46,51],starvat:6,state:[6,14,38,41,43,46,51,117,157],statement:[6,9,10,11,13,14,15,16,17,19,20,21,25,27,28,38,41,46,49,52],static0:11,static1:11,statist:[4,41,46,52,62,88,117,120,165,166,168],statu:[19,25,28,30,34,52,117,131,158,159,160,161,162,171],statusautocompact:117,statusbackup:117,statusbinari:117,statusgossip:117,statushandoff:117,stc:11,stdin:52,stdout:52,step:[6,26,31,49],still:[0,6,10,13,14,17,21,23,49,51,52],stop:[6,34,52,75,117,135,164],stop_commit:6,stop_paranoid:6,stopdaemon:117,storag:[2,11,15,16,28,30,36,42,43,45],storage_port:[31,50],storageservic:[6,23],store:[0,4,6,10,11,12,13,21,36,38,41,42,43,46,49,52,72,80,82,117,162],store_typ:6,straight:51,straightforward:40,strategi:[0,6,11,45,50],stream:[4,6,36,41,42,45,56,96,102,117,122,132,149,150,152,153],street:21,strength:6,strict:[10,41],strictli:[8,11,14],string:[6,10,11,12,13,14,16,17,19,20,21,46,52,101],strong:0,strongli:[6,11,12,49],structur:[4,6,9,19,25,38,46],stub:49,style:[6,25,26,27,28,29,36],stype:[9,14],sub:[11,13,21,34,41],subclass:6,subdirectori:[6,20],subject:[6,14,49],submiss:[6,28],submit:[28,29,36,60],subscrib:8,subscript:8,subsequ:[6,13,30,41,42],subset:[19,41,52],substitut:34,subsystem:49,subvert:41,succed:46,succesfulli:46,success:[0,52],sudden:6,sudo:[30,34],suffici:[6,43],suggest:[12,28,43],suit:[6,28,29,49],suitabl:[13,14,25,28],sum:40,summari:[4,6,46],sun:[23,49],sunx509:6,supercolumn:9,supersed:[10,139],superus:[9,19,49],suppli:[13,24],support:[0,6,9,10,11,12,13,14,15,16,18,19,21,28,29,30,32,36,41,49,52,139,163],suppos:13,sure:[6,8,23,26,28,29,30,31,34,41],surplu:30,surpris:0,surprisingli:6,surround:[17,52],suscept:14,suspect:[5,28],suspend:26,swamp:30,swap:6,swiss:[117,155],symmetri:17,symptom:30,sync:[6,30,46,132],synchron:6,synonym:19,synopsi:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],syntact:[11,19],syntax:[10,12,13,14,19,21,41,42],sys:6,sysctl:30,sysintern:6,system:[6,11,14,19,29,30,31,35,41,43,46,49,52,91,93,94,96,102,108,117,124,125,126,144,146,147,149,152],system_auth:[6,49],system_trac:132,tab:23,tabl:[0,4,6,9,10,12,13,14,15,16,17,18,19,20,21,29,38,41,42,45,49,52,57,60,67,75,77,86,87,90,95,99,108,117,123,124,126,130,132,139,143,156,158,163,165,166,170,171],table1:19,table_nam:[11,13,16,19,20,41,166],table_opt:[11,18],tablehistogram:117,tablestat:117,tag:[21,25,28,156],take:[6,10,11,13,14,21,25,26,28,30,38,41,42,43,51,117,156],taken:[6,40,41,46],tar:34,tarbal:[31,33,52],target:[11,19,26,29,41],task:[6,26,28,46,52],tcp:[6,30],tcp_keepalive_intvl:30,tcp_keepalive_prob:30,tcp_keepalive_tim:30,tcp_nodelai:6,tcp_wmem:6,teach:[6,50],team:30,technetwork:6,technic:[11,15],technot:6,tee:34,tell:[6,13,25,30,31,46],temporari:49,temporarili:6,tenanc:6,tend:[6,30,43],tendenc:6,terabyt:42,term:[6,13,14,15,18,21],termin:[12,52],ternari:23,test:[6,8,23,25,27,28,35,36,43,52],test_keyspac:49,testabl:[25,28],testbatchandlist:29,testmethod1:29,testmethod2:29,testsom:29,teststaticcompactt:29,text:[4,9,11,12,13,14,17,21,40,42,49],than:[0,6,11,12,13,14,15,18,21,23,28,36,41,42,43,49,50,51,133,146,147],thei:[6,9,10,11,12,13,14,15,18,19,21,23,25,28,29,36,38,41,42,43,46,49],them:[6,10,11,13,14,21,23,28,29,30,35,38,41,46,49,117,170],themselv:[13,19],theoret:11,therefor:[28,29,49],thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,19,21,23,24,25,26,28,29,30,31,33,34,36,38,40,41,42,43,46,49,50,51,52,53,54,55,57,58,60,63,65,67,73,77,83,86,87,89,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],thing:[6,21,24,28,30,33,41],think:6,third:[21,25,46],thobb:52,those:[11,12,13,14,16,17,18,19,21,28,30,40,41,49,52,170],though:[6,10,12,21,36,41,42,46],thousand:52,thousandssep:52,thread:[6,43,46,49,57,87,117,130,132,139,148,168,170],threadpool:45,threadpoolnam:46,threadprioritypolici:26,three:[0,6,38,41,42,49,52],threshold:[4,40,43,50,90,117,143,150],thrift:[6,9,11,15,30,46],throttl:[6,89,117,140,144,148,149,152],throttle_limit:6,through:[0,5,9,10,11,12,13,26,28,30,35,40,41,52],throughout:49,throughput:[0,6,41,42,43,46,91,96,102,117,144,149,152],throwabl:[25,29],thrown:21,thu:[6,10,11,12,13,18,21,30,46,50,51,117,170],thumb:[6,28],thusli:21,tib:[62,116,166],ticket:[5,24,25,28,29,40],tie:30,tier:45,ties:13,tighter:6,tightli:6,tild:52,time:[0,6,8,9,10,11,12,13,15,16,17,18,23,25,26,28,29,30,38,40,42,45,46,49,52,117,119],timehorizon:6,timelin:11,timeout:[6,21,30,46,52,103,117,153],timeout_in_m:153,timeout_typ:[103,153],timer:[6,46],timestamp:[4,9,10,11,13,14,15,17,36,41,52,139],timeunit:41,timeuuid:[9,10,11,17,21],timewindowcompactionstrategi:11,timezon:[17,52],tini:[6,41],tinyint:[9,10,14,17,21],tjake:23,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,toc:4,todai:12,todat:14,todo:[25,29],togeth:[6,11,13,14,41],toggl:49,tojson:15,token:[2,4,6,9,10,12,13,30,41,46,52,60,65,108,109,115,117,122,132,138,171],toler:38,tom:13,tombston:[4,6,11,17,30,45,46,87,139],tombstone_compact:163,tombstone_compaction_interv:41,tombstone_threshold:41,tombstonescannedhistogram:46,ton:29,too:[6,11,12,14,21,25,41],tool:[6,12,28,30,36,41,46,49,51],top:[13,21,28,36,46,119,166,167],topcount:[119,167],topic:52,topolog:[6,50,138],toppartit:117,total:[6,13,40,41,46,114,117],totalblockedtask:46,totalcommitlogs:46,totalcompactionscomplet:46,totaldiskspaceus:46,totalhint:46,totalhintsinprogress:46,totallat:46,totimestamp:14,touch:[8,30,41],tough:29,tounixtimestamp:14,tour:21,toward:11,tpstat:117,trace:[6,46,104,117,132,154],track:[6,41,46],tracker:28,tradeoff:[0,6],tradit:[41,42],traffic:[6,50],trail:23,transact:[13,20,46,163],transfer:[6,30,49],transform:13,transit:[10,19],translat:6,transpar:[6,30],transport:[6,26,46,69,79,117,160],treat:[0,6,10,30,50],tree:[6,26,46],tri:41,trigger:[4,6,9,12,15,36,38,42,45,57,117,129],trigger_nam:20,trigger_stat:12,trip:[6,13],trivial:49,troubleshoot:[25,36],truediskspaceus:[114,117],truesnapshotss:46,truli:9,truncat:[6,9,10,15,19,103,117,153,169],truncate_stat:12,truncatehint:117,trunk:[24,25,26,28],trust:49,trustor:6,truststor:[6,49],truststore_password:6,truststorepassword:49,tserverfactori:6,ttl:[4,6,9,10,11,14,17,21,45,139],tty:52,tunabl:2,tune:[30,38,41,43],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:21,tuplevalu:[10,14],turn:[0,6,28,30,49],twc:[11,41],twice:[6,21],two:[0,6,11,12,13,14,17,26,36,38,41,43,49,50,52],txt:[4,14,24,25,28],type:[0,6,10,11,12,13,14,15,19,25,34,36,43,45,49,52,103,117,153,163],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,30,38,41,43,46,49,52],ubuntu:26,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:21,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:30,unabl:[6,25,36],unaffect:21,unavail:[6,11,46,49,51],unblock:46,unbound:21,unchecked_tombstone_compact:41,uncom:[6,46,49],uncommon:28,uncompress:[6,42,46],undelet:41,under:[6,21,23,29,46,49],underli:[6,18,41,49],understand:[6,28,30],unencrypt:[6,49],unexpectedli:21,unfinishedcommit:46,unflush:[40,156],unfortun:29,uniqu:[11,14,21],unit:[21,25,27,41,117,141],unixtimestampof:[10,14],unless:[6,11,13,16,18,19,21,23,40,49,50],unlik:[6,10,13,21],unlimit:[6,30,52],unlog:9,unnecessari:[25,51],unnecessarili:40,unpredict:13,unprepar:46,unquot:12,unquoted_identifi:12,unquoted_nam:11,unrel:28,unreleas:28,unrepair:45,unsecur:49,unset:[6,10,13,17],unsign:21,unspecifi:6,unsubscrib:[8,36],untar:34,until:[0,6,21,38,40,41,42,49,50],unus:6,unusu:25,updat:[6,9,10,11,12,14,15,17,18,19,21,25,28,29,34,36,41,42,46,49,52],update_paramet:13,update_stat:[12,13],upgrad:[6,41,117,170],upgrade_sst:163,upgradesst:[38,41,42,117],upload:28,upon:[6,21,38,42],upper:[12,17,41,49],ups:43,upstream:28,uptim:[109,117],url:24,usag:[4,6,11,21,36,38,40,42,46,52],use:[6,9,10,11,12,13,14,16,17,18,19,21,23,25,26,28,29,31,34,35,36,38,40,41,43,46,49,50,51,52,57,87,100,117,119,130,139,167,170],use_stat:12,usecas:41,useconcmarksweepgc:26,usecondcardmark:26,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,21,25,26,28,29,30,41,43,46,49,50,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useecassandra:49,useful:[0,6,11,14,28,41,42,46,51,52,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useparnewgc:26,user1:13,user2:13,user3:13,user4:13,user:[5,6,8,9,10,11,12,13,15,16,17,18,25,28,30,34,38,41,42,43,49,52,60,76,117],user_count:13,user_defined_typ:21,user_funct:19,user_nam:13,user_occup:13,user_opt:19,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],uses:[0,4,6,11,12,13,14,16,19,20,29,30,49],usethreadprior:26,using:[4,6,10,11,12,13,14,18,19,21,26,28,29,33,34,35,36,38,42,43,45,46,49,51,52,60,122,139,156],usr:52,usual:[6,13,21,24,29,38,49,132],utc:[17,52],utd:11,utf8:[21,52],utf8typ:9,utf:52,util:[14,25,41,52],uuid:[9,10,11,12,17,21],val0:11,val1:11,val:14,valid:[6,10,11,12,13,14,17,21,30,41,42,46,49,52,132,139,163],validationexecutor:46,valu:[6,9,10,11,12,13,14,16,17,21,25,26,30,38,41,46,49,50,52,76,104,108,117,140,144,146,147,148,149,151,152,153,154],value1:13,value2:13,value_in_kb_per_sec:[140,148],value_in_m:151,value_in_mb:[144,149,152],valueof:14,varchar:[9,11,14,17,21],vari:[6,42],variabl:[6,10,12,17,21,26,33],variant:12,varieti:40,varint:[9,11,14,17,21],variou:[26,29,43,49],veri:[6,11,13,28,29,30,38,41,42,43],verifi:[28,30,32,34,42,108,117,163],version:[5,6,9,11,14,15,21,26,28,32,34,41,46,51,59,64,74,84,117,170,171],vertic:52,via:[6,8,10,19,25,30,31,41,42,43,46,49,50],view:[6,10,11,12,15,19,36,46,52,94,117,147,173],view_build:163,view_nam:18,viewbuildstatu:117,viewlockacquiretim:46,viewmutationstag:46,viewpendingmut:46,viewreadtim:46,viewreplicasattempt:46,viewreplicassuccess:46,viewwrit:46,viewwritelat:46,virtual:[0,6,30,41,46,51],visibl:[11,19,23,38],vnode:[6,42],volum:[6,40,42],vulner:[6,49],wai:[4,6,12,15,17,18,21,24,26,29,30,41,42,132],wait:[0,6,11,28,30,46,117,134],waitingoncommit:46,waitingonfreememtablespac:46,waitingonsegmentalloc:46,want:[6,11,13,26,28,29,30,49,51],warmup:[117,142],warn:[6,11,23,29,45,132],washington:21,wasn:10,wast:6,watch:29,weaker:0,websit:[29,34],week:21,weight:[6,46,80],welcom:8,well:[6,11,13,14,17,21,25,26,40,42,43,49,50,117,135],went:46,were:[6,9,10,19,25,26,41,46],what:[11,13,21,27,29,31,36,41,43,49,52],whatev:[10,13,30],whedon:13,when:[4,6,9,10,11,12,13,14,15,16,17,19,21,23,25,28,29,31,36,38,40,42,43,45,46,49,50,51,52,55,57,58,60,63,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],where:[0,4,6,9,10,11,12,14,16,17,18,19,21,25,29,31,34,38,41,42,49,51,52,80,132],where_claus:13,wherea:[21,49],whether:[0,6,9,11,13,26,41,50,52,80],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,28,29,30,31,34,35,38,40,41,42,43,46,49,50,51,60,95,99,108,114,117,122,132],whichev:[0,6],whitelist:49,whitespac:27,who:[19,28,30],whole:[6,11,13,14,21,41],whose:[11,21,163],why:[25,28,36],wide:[4,40],width:12,wiki:[6,26],wildcard:[13,19],window:[0,6,45,46,49,98,106,117,151],winner:30,wip:[26,28],wipe:[30,51],wire:30,wise:11,wish:[6,41,46],within:[0,4,6,11,12,13,16,28,30,41,43,46,49],withing:6,without:[6,11,12,13,14,19,21,24,26,28,29,30,40,43,46,49,52,55,108,117,124],wmem_max:6,won:[6,13,24],wont:41,word:[10,11,12,18,19,21,30],work:[6,10,11,14,15,17,23,24,26,27,29,30,41,43,46,49,50,51,52],worker:52,workload:[6,25,38,41,43],workspac:26,worktre:26,worri:[28,30],wors:[6,50],worst:[6,28],worthwhil:6,would:[6,12,13,14,17,19,26,28,29,36,41,42,43,49,50],wrap:50,write:[0,4,6,10,11,13,21,23,25,29,30,40,41,42,43,46,49,50,51,52,75,103,117,153,166],write_lat:166,write_request_timeout:30,writelat:46,writer:[6,23],writetim:[9,14],writetimeoutexcept:6,written:[4,6,20,30,38,41,42,46],wrong:6,wrte:46,www:[6,11,34],xlarg:43,xml:31,xmn220m:26,xms1024m:26,xmx1024m:26,xmx:43,xss256k:26,xvf:34,yaml:[6,14,31,34,46,49,50,51,61,76,80,117,135,166,168],year:[13,21],yes:[9,11,49],yet:[11,46],yield:[13,51],you:[5,6,8,10,11,12,13,14,16,17,18,20,21,23,24,26,27,29,30,31,32,33,34,35,36,41,46,49,50,51,52,55,117,156],younger:14,your:[0,5,6,8,10,11,12,23,26,28,29,30,31,34,36,41,43,49,50,52],yourself:[24,29],yyyi:[17,21],z_0:[11,16,18],zero:[6,10,30,46,50],zip:21,zipcod:21,zone:[6,21,50],zzzzz:28},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs and Contributing","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Security","Triggers","Data Types","Data Modeling","Code Style","How-to Commit","Review Checklist","Building and IDE Integration","Cassandra Development","Contributing Code Changes","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","cqlsh: the CQL shell","Cassandra Tools","Nodetool","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","Troubleshooting"],titleterms:{"class":50,"function":[13,14,17],"import":[23,108],"long":29,"new":30,"static":11,"switch":41,Adding:51,IDE:26,IDEs:23,LCS:41,TLS:49,The:[11,13,15,17,41],USE:11,Use:42,Uses:42,Using:26,Will:30,With:49,access:49,add:30,address:30,advanc:42,after:51,aggreg:14,alias:13,all:[19,30],alloc:51,allocate_tokens_for_keyspac:6,allow:13,alter:[11,18,19,21],ani:30,apach:36,appendic:9,appendix:9,architectur:2,ask:30,assassin:55,assign:51,auth:49,authent:[6,19,49],author:[6,49],auto_snapshot:6,automat:19,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:37,batch:[13,30],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,befor:28,benefit:42,binari:34,blob:[14,30],bloom:38,boilerpl:23,bootstrap:[30,41,51,56],branch:28,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:46,bug:[5,28],build:26,bulk:[30,39],cach:[11,46,49],call:30,can:30,captur:[40,52],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,26,27,29,30,31,34,36,40,45,49,53],cast:14,cdc:40,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,chang:[10,28,30,31,38,40,41],characterist:21,checklist:25,choic:43,choos:28,circleci:29,claus:13,cleanup:[51,57],clear:52,clearsnapshot:58,client:[32,35,46,49],client_encryption_opt:6,clientstat:59,clojur:32,cloud:43,cluster:[11,30],cluster_nam:6,code:[23,28],collect:[21,41],column:11,column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[26,41,52],comment:12,commit:24,commit_failure_polici:6,commitlog:[4,46],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:40,common:[11,41,43],compact:[9,11,41,46,60],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:61,compactionstat:62,compactionstrategi:41,compat:52,compress:[11,42],concern:41,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_writ:6,condition:19,configur:[6,7,31,40,42],connect:30,consider:11,consist:[0,52],constant:12,contact:8,contribut:[5,28],control:19,convent:[12,23],convers:14,copi:52,count:14,counter:[13,21],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:43,cql:[9,15,46,52],cqlsh:[35,52],cqlshrc:52,creat:[11,14,16,18,19,20,21,28],credenti:19,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:29,custom:21,cython:52,data:[11,13,17,19,21,22,30,40,41,51],data_file_directori:6,databas:19,date:21,dead:51,debian:34,debug:26,decommiss:63,defin:[14,21],definit:[11,12],defragment:41,delet:[13,30,41],depend:52,describ:[52,65],describeclust:64,detail:41,detect:0,develop:27,dies:30,directori:[31,41],disabl:40,disableauditlog:66,disableautocompact:67,disablebackup:68,disablebinari:69,disablefullquerylog:70,disablegossip:71,disablehandoff:72,disablehintsfordc:73,disableoldprotocolvers:74,disk:[30,43],disk_failure_polici:6,disk_optimization_strategi:6,document:36,doe:30,drain:75,driver:[32,35],drop:[9,11,14,16,18,19,20,21,30],droppedmessag:46,dtest:29,durat:21,dynam:50,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:30,eclips:26,email:30,enabl:[40,49],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_user_defined_funct:6,enableauditlog:76,enableautocompact:77,enablebackup:78,enablebinari:79,enablefullquerylog:80,enablegossip:81,enablehandoff:82,enablehintsfordc:83,enableoldprotocolvers:84,encod:17,encrypt:49,endpoint_snitch:6,engin:4,entri:30,environ:31,erlang:32,error:30,even:30,except:23,exist:30,exit:52,expand:52,experiment:6,expir:41,factor:30,fail:[30,51],failur:[0,30],failuredetector:85,featur:6,file:[6,23,34],file_cache_size_in_mb:6,filedescriptorratio:46,filter:[13,38],fix:28,flush:86,format:23,frequent:30,from:[26,30,34,52],fromjson:17,fulli:41,further:40,garbag:41,garbagecollect:87,garbagecollector:46,gc_grace_second:41,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:88,gener:23,get:33,getbatchlogreplaythrottl:89,getcompactionthreshold:90,getcompactionthroughput:91,getconcurr:92,getconcurrentcompactor:93,getconcurrentviewbuild:94,getendpoint:95,getinterdcstreamthroughput:96,getlogginglevel:97,getmaxhintwindow:98,getreplica:99,getse:100,getsstabl:101,getstreamthroughput:102,gettimeout:103,gettraceprob:104,give:30,gossip:0,gossipinfo:105,grace:41,grant:19,group:13,guarante:1,handl:23,handoffwindow:106,hang:51,happen:30,hardwar:43,haskel:32,heap:30,help:[52,107],hint:44,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:46,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,host:[30,52],how:[24,30],idea:26,identifi:12,impact:42,incremental_backup:6,index:[16,46],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:109,initial_token:6,insert:[13,17,35],instal:34,integr:[26,49],intellij:26,inter:49,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[19,49],internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:110,invalidatekeycach:111,invalidaterowcach:112,irc:8,java:[30,32],jconsol:30,jmx:[30,41,46,49],join:[30,113],json:17,jvm:46,kei:[11,16,18],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,30,46],keyword:[9,12],lang:30,languag:15,larg:30,level:[0,41],limit:13,line:[26,52],list:[8,19,21,30],listen:30,listen_address:[6,30],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:114,liter:21,live:30,load:[30,39],locat:31,log:[30,31,41],login:52,lot:30,made:30,mail:8,main:31,major:41,manipul:13,manual:51,map:[16,21,30],materi:18,max:[14,30],max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:30,memori:[30,43,46],memorypool:46,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:41,messag:30,method:30,metric:46,min:14,minor:41,mintimeuuid:14,model:22,monitor:[46,51],more:[30,41],move:[51,115],movement:51,multilin:23,nativ:[14,21],native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:32,netstat:116,networktopologystrategi:0,newer:26,node:[30,49,51],nodej:32,nodetool:[30,41,54,117],noteworthi:21,now:14,num_token:6,one:30,onli:30,oper:[30,41,42,45],option:[11,18,41,52],order:[11,13],otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[11,30],outofmemoryerror:30,overview:[3,40],packag:34,page:52,paramet:[13,40,41],partit:11,partition:6,password:49,patch:28,pausehandoff:118,perform:29,permiss:19,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:32,pick:0,point:30,port:30,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:34,primari:[11,18],profileload:119,progress:51,project:26,properti:31,proxyhistogram:120,python:32,pytz:52,queri:[15,35],question:30,rang:[0,51],range_request_timeout_in_m:6,rangekeysampl:121,read:[40,47],read_request_timeout_in_m:6,rebuild:122,rebuild_index:123,refresh:124,refreshsizeestim:125,refus:30,releas:28,reloadlocalschema:126,reloadse:127,reloadssl:128,reloadtrigg:129,relocatesst:130,remot:30,remov:[41,51],removenod:131,repair:[41,47,48,132],repair_admin:133,repair_session_max_tree_depth:6,replac:51,replaybatchlog:134,replic:[0,30],report:[5,30,46],request:46,request_schedul:6,request_scheduler_id:6,request_scheduler_opt:6,request_timeout_in_m:6,reserv:9,resetfullquerylog:135,resetlocalschema:136,result:13,resum:51,resumehandoff:137,revers:11,review:25,revok:19,rhel:30,right:28,ring:[0,30,138],role:[19,49],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpc_max_thread:6,rpc_min_thread:6,rpc_port:6,rpc_recv_buff_size_in_byt:6,rpc_send_buff_size_in_byt:6,rpc_server_typ:6,rubi:32,run:29,runtim:31,rust:32,safeti:6,sai:30,same:30,saved_caches_directori:6,scala:32,scalar:14,scrub:139,secondari:16,secur:[19,49],see:30,seed:30,seed_provid:6,select:[13,17,18],selector:13,serial:52,server_encryption_opt:6,session:52,set:[19,21,26,30],setbatchlogreplaythrottl:140,setcachecapac:141,setcachekeystosav:142,setcompactionthreshold:143,setcompactionthroughput:144,setconcurr:145,setconcurrentcompactor:146,setconcurrentviewbuild:147,sethintedhandoffthrottlekb:148,setinterdcstreamthroughput:149,setlogginglevel:150,setmaxhintwindow:151,setstreamthroughput:152,settimeout:153,settraceprob:154,setup:26,share:52,shell:52,show:[30,52],signatur:14,simplestrategi:0,singl:[30,41],size:41,sjk:155,slow_query_log_timeout_in_m:6,snapshot:156,snapshot_before_compact:6,snitch:50,sourc:[26,52],special:52,speed:30,ssl:49,ssl_storage_port:6,sstabl:[4,41,46],sstable_preemptive_open_interval_in_mb:6,stai:30,standard:49,start:[26,28,33],start_native_transport:6,start_rpc:6,starv:41,statement:[12,18,23],statu:157,statusautocompact:158,statusbackup:159,statusbinari:160,statusgossip:161,statushandoff:162,stc:41,stop:163,stopdaemon:164,storag:[4,9,46],storage_port:6,store:30,strategi:41,stream:[30,46,51],stream_throughput_outbound_megabits_per_sec:6,streaming_keep_alive_period_in_sec:6,stress:29,style:23,sum:14,support:17,tabl:[11,40,46],tablehistogram:165,tablestat:166,tarbal:34,term:12,test:[26,29],than:30,thei:30,though:30,threadpool:46,threshold:6,thrift_framed_transport_size_in_mb:6,thrift_prepared_statements_cache_size_mb:6,tick:28,tier:41,time:[14,21,41],timestamp:[21,30],timeuuid:14,timewindowcompactionstrategi:41,tock:28,todo:[0,1,3,4,11,22,37,39,44,47,48,54],tojson:17,token:[0,14,51],tombston:41,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[29,53],top:30,toppartit:167,tpstat:168,trace:52,tracetype_query_ttl:6,tracetype_repair_ttl:6,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[20,41],troubleshoot:174,truncat:11,truncate_request_timeout_in_m:6,truncatehint:169,ttl:[13,41],tunabl:0,tupl:21,two:30,type:[9,17,21,41,46],udt:21,unabl:30,unit:[26,29],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:41,unsubscrib:30,updat:[13,30],upgradesst:170,usag:[30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],use:30,user:[14,19,21],using:[30,41],uuid:14,variabl:31,verifi:171,version:[10,52,172],view:18,viewbuildstatu:173,warn:40,welcom:36,what:[28,30],when:[30,41],where:13,whitespac:23,why:[30,41],window:41,windows_timer_interv:6,without:41,work:[21,28],write_request_timeout_in_m:6,writetim:13,yaml:40,you:28}}) \ No newline at end of file diff --git a/src/doc/3.11.6/tools/cqlsh.html b/src/doc/3.11.6/tools/cqlsh.html deleted file mode 100644 index 24d469b90..000000000 --- a/src/doc/3.11.6/tools/cqlsh.html +++ /dev/null @@ -1,481 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/index.html b/src/doc/3.11.6/tools/index.html deleted file mode 100644 index 6cab900b7..000000000 --- a/src/doc/3.11.6/tools/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool.html b/src/doc/3.11.6/tools/nodetool.html deleted file mode 100644 index 938a3d9a2..000000000 --- a/src/doc/3.11.6/tools/nodetool.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-

Todo

-

Try to autogenerate this from Nodetool’s help.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/assassinate.html b/src/doc/3.11.6/tools/nodetool/assassinate.html deleted file mode 100644 index 11c8d285f..000000000 --- a/src/doc/3.11.6/tools/nodetool/assassinate.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/bootstrap.html b/src/doc/3.11.6/tools/nodetool/bootstrap.html deleted file mode 100644 index a5c2eb227..000000000 --- a/src/doc/3.11.6/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/cleanup.html b/src/doc/3.11.6/tools/nodetool/cleanup.html deleted file mode 100644 index 79232bffa..000000000 --- a/src/doc/3.11.6/tools/nodetool/cleanup.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/clearsnapshot.html b/src/doc/3.11.6/tools/nodetool/clearsnapshot.html deleted file mode 100644 index befb347ea..000000000 --- a/src/doc/3.11.6/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/clientstats.html b/src/doc/3.11.6/tools/nodetool/clientstats.html deleted file mode 100644 index 55901e93a..000000000 --- a/src/doc/3.11.6/tools/nodetool/clientstats.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/compact.html b/src/doc/3.11.6/tools/nodetool/compact.html deleted file mode 100644 index cba5758a0..000000000 --- a/src/doc/3.11.6/tools/nodetool/compact.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/compactionhistory.html b/src/doc/3.11.6/tools/nodetool/compactionhistory.html deleted file mode 100644 index fdd5a5e49..000000000 --- a/src/doc/3.11.6/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/compactionstats.html b/src/doc/3.11.6/tools/nodetool/compactionstats.html deleted file mode 100644 index 132d569ec..000000000 --- a/src/doc/3.11.6/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/decommission.html b/src/doc/3.11.6/tools/nodetool/decommission.html deleted file mode 100644 index e216d1a64..000000000 --- a/src/doc/3.11.6/tools/nodetool/decommission.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/describecluster.html b/src/doc/3.11.6/tools/nodetool/describecluster.html deleted file mode 100644 index a346f3ba0..000000000 --- a/src/doc/3.11.6/tools/nodetool/describecluster.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/describering.html b/src/doc/3.11.6/tools/nodetool/describering.html deleted file mode 100644 index 45c56dfe5..000000000 --- a/src/doc/3.11.6/tools/nodetool/describering.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disableauditlog.html b/src/doc/3.11.6/tools/nodetool/disableauditlog.html deleted file mode 100644 index d8a27eba1..000000000 --- a/src/doc/3.11.6/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disableautocompaction.html b/src/doc/3.11.6/tools/nodetool/disableautocompaction.html deleted file mode 100644 index 5a6009132..000000000 --- a/src/doc/3.11.6/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablebackup.html b/src/doc/3.11.6/tools/nodetool/disablebackup.html deleted file mode 100644 index 9e4490755..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablebinary.html b/src/doc/3.11.6/tools/nodetool/disablebinary.html deleted file mode 100644 index 7bec563dc..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablefullquerylog.html b/src/doc/3.11.6/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index a179fcd27..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablegossip.html b/src/doc/3.11.6/tools/nodetool/disablegossip.html deleted file mode 100644 index 0fa71daeb..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablehandoff.html b/src/doc/3.11.6/tools/nodetool/disablehandoff.html deleted file mode 100644 index 86380b21e..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disablehintsfordc.html b/src/doc/3.11.6/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index 0b0222cbc..000000000 --- a/src/doc/3.11.6/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/disableoldprotocolversions.html b/src/doc/3.11.6/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index e671a9429..000000000 --- a/src/doc/3.11.6/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/drain.html b/src/doc/3.11.6/tools/nodetool/drain.html deleted file mode 100644 index da8c55612..000000000 --- a/src/doc/3.11.6/tools/nodetool/drain.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enableauditlog.html b/src/doc/3.11.6/tools/nodetool/enableauditlog.html deleted file mode 100644 index db5d237d1..000000000 --- a/src/doc/3.11.6/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enableautocompaction.html b/src/doc/3.11.6/tools/nodetool/enableautocompaction.html deleted file mode 100644 index 4d1f05776..000000000 --- a/src/doc/3.11.6/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablebackup.html b/src/doc/3.11.6/tools/nodetool/enablebackup.html deleted file mode 100644 index cfbb41dbe..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablebinary.html b/src/doc/3.11.6/tools/nodetool/enablebinary.html deleted file mode 100644 index 3f0b41f6e..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablefullquerylog.html b/src/doc/3.11.6/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index b5a617907..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablegossip.html b/src/doc/3.11.6/tools/nodetool/enablegossip.html deleted file mode 100644 index d67ca2cc3..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablehandoff.html b/src/doc/3.11.6/tools/nodetool/enablehandoff.html deleted file mode 100644 index 81998a9c7..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enablehintsfordc.html b/src/doc/3.11.6/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index f2479c082..000000000 --- a/src/doc/3.11.6/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/enableoldprotocolversions.html b/src/doc/3.11.6/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index e5698ec92..000000000 --- a/src/doc/3.11.6/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/failuredetector.html b/src/doc/3.11.6/tools/nodetool/failuredetector.html deleted file mode 100644 index 8558ca1f7..000000000 --- a/src/doc/3.11.6/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/flush.html b/src/doc/3.11.6/tools/nodetool/flush.html deleted file mode 100644 index 8a11c45e9..000000000 --- a/src/doc/3.11.6/tools/nodetool/flush.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/garbagecollect.html b/src/doc/3.11.6/tools/nodetool/garbagecollect.html deleted file mode 100644 index db65d85dc..000000000 --- a/src/doc/3.11.6/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/gcstats.html b/src/doc/3.11.6/tools/nodetool/gcstats.html deleted file mode 100644 index 46aa6e6ff..000000000 --- a/src/doc/3.11.6/tools/nodetool/gcstats.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/3.11.6/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index 406577cc8..000000000 --- a/src/doc/3.11.6/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getcompactionthreshold.html b/src/doc/3.11.6/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 7e2e0011b..000000000 --- a/src/doc/3.11.6/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getcompactionthroughput.html b/src/doc/3.11.6/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index 8cdba5ac9..000000000 --- a/src/doc/3.11.6/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getconcurrency.html b/src/doc/3.11.6/tools/nodetool/getconcurrency.html deleted file mode 100644 index 36fc2187f..000000000 --- a/src/doc/3.11.6/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getconcurrentcompactors.html b/src/doc/3.11.6/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index b8fd33d8f..000000000 --- a/src/doc/3.11.6/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/3.11.6/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 761a3114d..000000000 --- a/src/doc/3.11.6/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getendpoints.html b/src/doc/3.11.6/tools/nodetool/getendpoints.html deleted file mode 100644 index 6a2cc0498..000000000 --- a/src/doc/3.11.6/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/3.11.6/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 232084bee..000000000 --- a/src/doc/3.11.6/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getlogginglevels.html b/src/doc/3.11.6/tools/nodetool/getlogginglevels.html deleted file mode 100644 index 50afe9165..000000000 --- a/src/doc/3.11.6/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getmaxhintwindow.html b/src/doc/3.11.6/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 68a34fa75..000000000 --- a/src/doc/3.11.6/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getreplicas.html b/src/doc/3.11.6/tools/nodetool/getreplicas.html deleted file mode 100644 index 213d9e6c4..000000000 --- a/src/doc/3.11.6/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getseeds.html b/src/doc/3.11.6/tools/nodetool/getseeds.html deleted file mode 100644 index e70eec8bd..000000000 --- a/src/doc/3.11.6/tools/nodetool/getseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getsstables.html b/src/doc/3.11.6/tools/nodetool/getsstables.html deleted file mode 100644 index 9dd872658..000000000 --- a/src/doc/3.11.6/tools/nodetool/getsstables.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/getstreamthroughput.html b/src/doc/3.11.6/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index efce57853..000000000 --- a/src/doc/3.11.6/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/gettimeout.html b/src/doc/3.11.6/tools/nodetool/gettimeout.html deleted file mode 100644 index eb2508eba..000000000 --- a/src/doc/3.11.6/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/gettraceprobability.html b/src/doc/3.11.6/tools/nodetool/gettraceprobability.html deleted file mode 100644 index c8a90a702..000000000 --- a/src/doc/3.11.6/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/gossipinfo.html b/src/doc/3.11.6/tools/nodetool/gossipinfo.html deleted file mode 100644 index 90c623dc8..000000000 --- a/src/doc/3.11.6/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/handoffwindow.html b/src/doc/3.11.6/tools/nodetool/handoffwindow.html deleted file mode 100644 index b1226f7d8..000000000 --- a/src/doc/3.11.6/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/help.html b/src/doc/3.11.6/tools/nodetool/help.html deleted file mode 100644 index 33a44a93f..000000000 --- a/src/doc/3.11.6/tools/nodetool/help.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/import.html b/src/doc/3.11.6/tools/nodetool/import.html deleted file mode 100644 index 00a8f2f88..000000000 --- a/src/doc/3.11.6/tools/nodetool/import.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/info.html b/src/doc/3.11.6/tools/nodetool/info.html deleted file mode 100644 index 64f323981..000000000 --- a/src/doc/3.11.6/tools/nodetool/info.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/invalidatecountercache.html b/src/doc/3.11.6/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 9aae28f65..000000000 --- a/src/doc/3.11.6/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/invalidatekeycache.html b/src/doc/3.11.6/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 8487fe0ce..000000000 --- a/src/doc/3.11.6/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/invalidaterowcache.html b/src/doc/3.11.6/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index a30aa31c1..000000000 --- a/src/doc/3.11.6/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/join.html b/src/doc/3.11.6/tools/nodetool/join.html deleted file mode 100644 index 4b905b2a3..000000000 --- a/src/doc/3.11.6/tools/nodetool/join.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/listsnapshots.html b/src/doc/3.11.6/tools/nodetool/listsnapshots.html deleted file mode 100644 index 8cb54cfb2..000000000 --- a/src/doc/3.11.6/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size. True size is the total size of all SSTables which
-        are not backed up to disk. Size on disk is total size of the snapshot on
-        disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/move.html b/src/doc/3.11.6/tools/nodetool/move.html deleted file mode 100644 index f8b21eb3e..000000000 --- a/src/doc/3.11.6/tools/nodetool/move.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/netstats.html b/src/doc/3.11.6/tools/nodetool/netstats.html deleted file mode 100644 index c3952611c..000000000 --- a/src/doc/3.11.6/tools/nodetool/netstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/nodetool.html b/src/doc/3.11.6/tools/nodetool/nodetool.html deleted file mode 100644 index bfcd96b28..000000000 --- a/src/doc/3.11.6/tools/nodetool/nodetool.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Nodetool" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-u <username> | –username <username>)]
-
[(-h <host> | –host <host>)] [(-p <port> | –port <port>)] -[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-pp | –print-port)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

sjk - Run commands of ‘Swiss Java Knife’. Run ‘nodetool sjk –help’ for more information.

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/pausehandoff.html b/src/doc/3.11.6/tools/nodetool/pausehandoff.html deleted file mode 100644 index 2d65ede66..000000000 --- a/src/doc/3.11.6/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/profileload.html b/src/doc/3.11.6/tools/nodetool/profileload.html deleted file mode 100644 index 907c93db8..000000000 --- a/src/doc/3.11.6/tools/nodetool/profileload.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/proxyhistograms.html b/src/doc/3.11.6/tools/nodetool/proxyhistograms.html deleted file mode 100644 index 9bc4cb39d..000000000 --- a/src/doc/3.11.6/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/rangekeysample.html b/src/doc/3.11.6/tools/nodetool/rangekeysample.html deleted file mode 100644 index bca56e9b1..000000000 --- a/src/doc/3.11.6/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/rebuild.html b/src/doc/3.11.6/tools/nodetool/rebuild.html deleted file mode 100644 index b11a8a273..000000000 --- a/src/doc/3.11.6/tools/nodetool/rebuild.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/rebuild_index.html b/src/doc/3.11.6/tools/nodetool/rebuild_index.html deleted file mode 100644 index 552a89a65..000000000 --- a/src/doc/3.11.6/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/refresh.html b/src/doc/3.11.6/tools/nodetool/refresh.html deleted file mode 100644 index 80a621829..000000000 --- a/src/doc/3.11.6/tools/nodetool/refresh.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/refreshsizeestimates.html b/src/doc/3.11.6/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 4b1f22183..000000000 --- a/src/doc/3.11.6/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/reloadlocalschema.html b/src/doc/3.11.6/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 12cc0d814..000000000 --- a/src/doc/3.11.6/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/reloadseeds.html b/src/doc/3.11.6/tools/nodetool/reloadseeds.html deleted file mode 100644 index cc0a7b8e2..000000000 --- a/src/doc/3.11.6/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/reloadssl.html b/src/doc/3.11.6/tools/nodetool/reloadssl.html deleted file mode 100644 index e3159a12a..000000000 --- a/src/doc/3.11.6/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/reloadtriggers.html b/src/doc/3.11.6/tools/nodetool/reloadtriggers.html deleted file mode 100644 index c94211f34..000000000 --- a/src/doc/3.11.6/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/relocatesstables.html b/src/doc/3.11.6/tools/nodetool/relocatesstables.html deleted file mode 100644 index 4608f5317..000000000 --- a/src/doc/3.11.6/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/removenode.html b/src/doc/3.11.6/tools/nodetool/removenode.html deleted file mode 100644 index 87945d179..000000000 --- a/src/doc/3.11.6/tools/nodetool/removenode.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/repair.html b/src/doc/3.11.6/tools/nodetool/repair.html deleted file mode 100644 index 2221d70c1..000000000 --- a/src/doc/3.11.6/tools/nodetool/repair.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends (inclusive)
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-            (exclusive)
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/repair_admin.html b/src/doc/3.11.6/tools/nodetool/repair_admin.html deleted file mode 100644 index e1b68e2cf..000000000 --- a/src/doc/3.11.6/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/replaybatchlog.html b/src/doc/3.11.6/tools/nodetool/replaybatchlog.html deleted file mode 100644 index c7779b78e..000000000 --- a/src/doc/3.11.6/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/resetfullquerylog.html b/src/doc/3.11.6/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 2926edbe6..000000000 --- a/src/doc/3.11.6/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/resetlocalschema.html b/src/doc/3.11.6/tools/nodetool/resetlocalschema.html deleted file mode 100644 index ae4b6f73d..000000000 --- a/src/doc/3.11.6/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/resumehandoff.html b/src/doc/3.11.6/tools/nodetool/resumehandoff.html deleted file mode 100644 index 22d8c6ef0..000000000 --- a/src/doc/3.11.6/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/ring.html b/src/doc/3.11.6/tools/nodetool/ring.html deleted file mode 100644 index fa70e7371..000000000 --- a/src/doc/3.11.6/tools/nodetool/ring.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/scrub.html b/src/doc/3.11.6/tools/nodetool/scrub.html deleted file mode 100644 index 374fc9192..000000000 --- a/src/doc/3.11.6/tools/nodetool/scrub.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/3.11.6/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 04c68bf04..000000000 --- a/src/doc/3.11.6/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setcachecapacity.html b/src/doc/3.11.6/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 565e44ca6..000000000 --- a/src/doc/3.11.6/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setcachekeystosave.html b/src/doc/3.11.6/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index 01c44fdef..000000000 --- a/src/doc/3.11.6/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setcompactionthreshold.html b/src/doc/3.11.6/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index c352f1654..000000000 --- a/src/doc/3.11.6/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setcompactionthroughput.html b/src/doc/3.11.6/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 5bb719b09..000000000 --- a/src/doc/3.11.6/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setconcurrency.html b/src/doc/3.11.6/tools/nodetool/setconcurrency.html deleted file mode 100644 index 03df37eba..000000000 --- a/src/doc/3.11.6/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setconcurrentcompactors.html b/src/doc/3.11.6/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index b6f0c8b1a..000000000 --- a/src/doc/3.11.6/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/3.11.6/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 8e06194dc..000000000 --- a/src/doc/3.11.6/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/3.11.6/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index 1ea674e53..000000000 --- a/src/doc/3.11.6/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/3.11.6/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index 1285c1550..000000000 --- a/src/doc/3.11.6/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setlogginglevel.html b/src/doc/3.11.6/tools/nodetool/setlogginglevel.html deleted file mode 100644 index 07d6a7314..000000000 --- a/src/doc/3.11.6/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setmaxhintwindow.html b/src/doc/3.11.6/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index b87095255..000000000 --- a/src/doc/3.11.6/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/setstreamthroughput.html b/src/doc/3.11.6/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index 67e493417..000000000 --- a/src/doc/3.11.6/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/settimeout.html b/src/doc/3.11.6/tools/nodetool/settimeout.html deleted file mode 100644 index 5ca79d765..000000000 --- a/src/doc/3.11.6/tools/nodetool/settimeout.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/settraceprobability.html b/src/doc/3.11.6/tools/nodetool/settraceprobability.html deleted file mode 100644 index 192ac2541..000000000 --- a/src/doc/3.11.6/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/sjk.html b/src/doc/3.11.6/tools/nodetool/sjk.html deleted file mode 100644 index 105debca7..000000000 --- a/src/doc/3.11.6/tools/nodetool/sjk.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/snapshot.html b/src/doc/3.11.6/tools/nodetool/snapshot.html deleted file mode 100644 index 2166da9d2..000000000 --- a/src/doc/3.11.6/tools/nodetool/snapshot.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/status.html b/src/doc/3.11.6/tools/nodetool/status.html deleted file mode 100644 index 359dbe55b..000000000 --- a/src/doc/3.11.6/tools/nodetool/status.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/statusautocompaction.html b/src/doc/3.11.6/tools/nodetool/statusautocompaction.html deleted file mode 100644 index f29e44127..000000000 --- a/src/doc/3.11.6/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/statusbackup.html b/src/doc/3.11.6/tools/nodetool/statusbackup.html deleted file mode 100644 index b964ba8a4..000000000 --- a/src/doc/3.11.6/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/statusbinary.html b/src/doc/3.11.6/tools/nodetool/statusbinary.html deleted file mode 100644 index f0c772963..000000000 --- a/src/doc/3.11.6/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/statusgossip.html b/src/doc/3.11.6/tools/nodetool/statusgossip.html deleted file mode 100644 index b60de03f4..000000000 --- a/src/doc/3.11.6/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/statushandoff.html b/src/doc/3.11.6/tools/nodetool/statushandoff.html deleted file mode 100644 index e6ed7f123..000000000 --- a/src/doc/3.11.6/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/stop.html b/src/doc/3.11.6/tools/nodetool/stop.html deleted file mode 100644 index b480b6678..000000000 --- a/src/doc/3.11.6/tools/nodetool/stop.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/stopdaemon.html b/src/doc/3.11.6/tools/nodetool/stopdaemon.html deleted file mode 100644 index a706d8cee..000000000 --- a/src/doc/3.11.6/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/tablehistograms.html b/src/doc/3.11.6/tools/nodetool/tablehistograms.html deleted file mode 100644 index f99b2a340..000000000 --- a/src/doc/3.11.6/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/tablestats.html b/src/doc/3.11.6/tools/nodetool/tablestats.html deleted file mode 100644 index e6aac0bd9..000000000 --- a/src/doc/3.11.6/tools/nodetool/tablestats.html +++ /dev/null @@ -1,167 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/toppartitions.html b/src/doc/3.11.6/tools/nodetool/toppartitions.html deleted file mode 100644 index 89f240e79..000000000 --- a/src/doc/3.11.6/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/tpstats.html b/src/doc/3.11.6/tools/nodetool/tpstats.html deleted file mode 100644 index ad26b902e..000000000 --- a/src/doc/3.11.6/tools/nodetool/tpstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/truncatehints.html b/src/doc/3.11.6/tools/nodetool/truncatehints.html deleted file mode 100644 index 258d93698..000000000 --- a/src/doc/3.11.6/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/upgradesstables.html b/src/doc/3.11.6/tools/nodetool/upgradesstables.html deleted file mode 100644 index 9216a90b9..000000000 --- a/src/doc/3.11.6/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/verify.html b/src/doc/3.11.6/tools/nodetool/verify.html deleted file mode 100644 index adc6f7808..000000000 --- a/src/doc/3.11.6/tools/nodetool/verify.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/version.html b/src/doc/3.11.6/tools/nodetool/version.html deleted file mode 100644 index 7822eadcf..000000000 --- a/src/doc/3.11.6/tools/nodetool/version.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/tools/nodetool/viewbuildstatus.html b/src/doc/3.11.6/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 4aa0b3374..000000000 --- a/src/doc/3.11.6/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.6/troubleshooting/index.html b/src/doc/3.11.6/troubleshooting/index.html deleted file mode 100644 index 91a630416..000000000 --- a/src/doc/3.11.6/troubleshooting/index.html +++ /dev/null @@ -1,100 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/.buildinfo b/src/doc/3.11.7/.buildinfo deleted file mode 100644 index 91fcb4b63..000000000 --- a/src/doc/3.11.7/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 304ed7f739f697e55c2d4dd55696f9b2 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/3.11.7/_images/eclipse_debug0.png b/src/doc/3.11.7/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug1.png b/src/doc/3.11.7/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug2.png b/src/doc/3.11.7/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug3.png b/src/doc/3.11.7/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug4.png b/src/doc/3.11.7/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug5.png b/src/doc/3.11.7/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/3.11.7/_images/eclipse_debug6.png b/src/doc/3.11.7/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/3.11.7/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/3.11.7/_sources/architecture/dynamo.rst.txt b/src/doc/3.11.7/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index a7dbb8750..000000000 --- a/src/doc/3.11.7/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,139 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and availability through *Consistency Levels*. -Essentially, an operation's consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this: - -- Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded - within a specified time window. -- Based on ``read_repair_chance`` and ``dclocal_read_repair_chance`` (part of a table's schema), read requests may be - randomly sent to all replicas in order to repair potentially inconsistent data. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels that are high enough to overlap, resulting in "strong" -consistency. This is typically expressed as ``W + R > RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/3.11.7/_sources/architecture/guarantees.rst.txt b/src/doc/3.11.7/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/3.11.7/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/3.11.7/_sources/architecture/index.rst.txt b/src/doc/3.11.7/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/3.11.7/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/3.11.7/_sources/architecture/overview.rst.txt b/src/doc/3.11.7/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/3.11.7/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/3.11.7/_sources/architecture/storage_engine.rst.txt b/src/doc/3.11.7/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index e4114e5af..000000000 --- a/src/doc/3.11.7/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -.. todo:: todo - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. diff --git a/src/doc/3.11.7/_sources/bugs.rst.txt b/src/doc/3.11.7/_sources/bugs.rst.txt deleted file mode 100644 index 240cfd495..000000000 --- a/src/doc/3.11.7/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs and Contributing -=============================== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``#cassandra`` :ref:`IRC channel `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/3.11.7/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/3.11.7/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index f205f7d30..000000000 --- a/src/doc/3.11.7/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,1911 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -*Default Value:* KEYSPACE - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using. - -Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down gossip and Thrift and kill the JVM, so the node can be replaced. - -stop - shut down gossip and Thrift, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``thrift_prepared_statements_cache_size_mb`` --------------------------------------------- - -Maximum size of the Thrift prepared statement cache - -If you do not use Thrift at all, it is safe to leave this value at "auto". - -See description of 'prepared_statements_cache_size_mb' above for more information. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync`` ------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic" or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.) - - -*Default Value:* batch - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -*Default Value:* 2 - -``commitlog_sync`` ------------------- - -the other option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_max_tree_depth`` ---------------------------------- -*This option is commented out by default.* - -Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs. - -The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -*Default Value:* 18 - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``start_rpc`` -------------- - -Whether to start the thrift rpc server. - -*Default Value:* false - -``rpc_address`` ---------------- - -The address or interface to bind the Thrift RPC service and native transport -server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``rpc_port`` ------------- - -port for Thrift to listen for clients on - -*Default Value:* 9160 - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``rpc_server_type`` -------------------- - -Cassandra provides two out-of-the-box options for the RPC Server: - -sync - One thread per thrift connection. For a very large number of clients, memory - will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size - per thread, and that will correspond to your use of virtual memory (but physical memory - may be limited depending on use of stack space). - -hsha - Stands for "half synchronous, half asynchronous." All thrift clients are handled - asynchronously using a small number of threads that does not vary with the amount - of thrift clients (and thus scales well to many clients). The rpc requests are still - synchronous (one thread per active request). If hsha is selected then it is essential - that rpc_max_threads is changed from the default value of unlimited. - -The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory. - -Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it. - -*Default Value:* sync - -``rpc_min_threads`` -------------------- -*This option is commented out by default.* - -Uncomment rpc_min|max_thread to set request pool size limits. - -Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all). - -The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently. - - -*Default Value:* 16 - -``rpc_max_threads`` -------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``rpc_send_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -uncomment to set socket buffer sizes on rpc connections - -``rpc_recv_buff_size_in_bytes`` -------------------------------- -*This option is commented out by default.* - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``thrift_framed_transport_size_in_mb`` --------------------------------------- - -Frame size for thrift (maximum message length). - -*Default Value:* 15 - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations - -*Default Value:* 10000 - -``slow_query_log_timeout_in_ms`` --------------------------------- - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero and read_repair_chance is < 1.0, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``request_scheduler`` ---------------------- - -request_scheduler -- Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below. - -*Default Value:* org.apache.cassandra.scheduler.NoScheduler - -``request_scheduler_options`` ------------------------------ -*This option is commented out by default.* - -Scheduler Options vary based on the type of scheduler - -NoScheduler - Has no options - -RoundRobin - throttle_limit - The throttle_limit is the number of in-flight - requests per client. Requests beyond - that limit are queued up until - running requests can complete. - The value of 80 here is twice the number of - concurrent_reads + concurrent_writes. - default_weight - default_weight is optional and allows for - overriding the default which is 1. - weights - Weights are optional and will default to 1 or the - overridden default_weight. The weight translates into how - many requests are handled during each turn of the - RoundRobin, based on the scheduler id. - - -*Default Value (complex option)*:: - - # throttle_limit: 80 - # default_weight: 5 - # weights: - # Keyspace1: 1 - # Keyspace2: 5 - -``request_scheduler_id`` ------------------------- -*This option is commented out by default.* -request_scheduler_id -- An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace. - -*Default Value:* keyspace - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack - -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client/server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_warn_threshold_in_ms`` ---------------------------- - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``enable_materialized_views`` ------------------------------ - - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* true - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* true diff --git a/src/doc/3.11.7/_sources/configuration/index.rst.txt b/src/doc/3.11.7/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/3.11.7/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/3.11.7/_sources/contactus.rst.txt b/src/doc/3.11.7/_sources/contactus.rst.txt deleted file mode 100644 index 8d0f5dd04..000000000 --- a/src/doc/3.11.7/_sources/contactus.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _irc-channels: - -IRC ---- - -To chat with developers or users in real-time, join our channels on `IRC freenode `__. The -following channels are available: - -- ``#cassandra`` - for user questions and general discussions. -- ``#cassandra-dev`` - strictly for questions or discussions related to Cassandra development. -- ``#cassandra-builds`` - results of automated test builds. - diff --git a/src/doc/3.11.7/_sources/cql/appendices.rst.txt b/src/doc/3.11.7/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/3.11.7/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/3.11.7/_sources/cql/changes.rst.txt b/src/doc/3.11.7/_sources/cql/changes.rst.txt deleted file mode 100644 index 1eee5369a..000000000 --- a/src/doc/3.11.7/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,204 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to -inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/3.11.7/_sources/cql/ddl.rst.txt b/src/doc/3.11.7/_sources/cql/ddl.rst.txt deleted file mode 100644 index 302777544..000000000 --- a/src/doc/3.11.7/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,649 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE Excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -- ``'SimpleStrategy'``: A simple strategy that defines a replication factor for the whole cluster. The only sub-options - supported is ``'replication_factor'`` to define that replication factor and is mandatory. -- ``'NetworkTopologyStrategy'``: A replication strategy that allows to set the replication factor independently for - each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is - the associated replication factor. - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records' - AND read_repair_chance = 1.0; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, c, d) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see `www.datastax.com/dev/blog/thrift-to-cql3 -`__ for more details) and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair_chance`` | *simple* | 0.1 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) for | -| | | | the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``dclocal_read_repair_chance`` | *simple* | 0 | The probability with which to query extra nodes (e.g. | -| | | | more nodes than required by the consistency level) | -| | | | belonging to the same data center than the read | -| | | | coordinator for the purpose of read repairs. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table' - AND read_repair_chance = 0.2; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/3.11.7/_sources/cql/definitions.rst.txt b/src/doc/3.11.7/_sources/cql/definitions.rst.txt deleted file mode 100644 index d4a5b59b9..000000000 --- a/src/doc/3.11.7/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,232 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/3.11.7/_sources/cql/dml.rst.txt b/src/doc/3.11.7/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/3.11.7/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/3.11.7/_sources/cql/functions.rst.txt b/src/doc/3.11.7/_sources/cql/functions.rst.txt deleted file mode 100644 index 47026cd94..000000000 --- a/src/doc/3.11.7/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,558 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Time conversion functions -````````````````````````` - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/3.11.7/_sources/cql/index.rst.txt b/src/doc/3.11.7/_sources/cql/index.rst.txt deleted file mode 100644 index 00d90e41e..000000000 --- a/src/doc/3.11.7/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do **not** refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL). - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/3.11.7/_sources/cql/indexes.rst.txt b/src/doc/3.11.7/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/3.11.7/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/3.11.7/_sources/cql/json.rst.txt b/src/doc/3.11.7/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/3.11.7/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/3.11.7/_sources/cql/mvs.rst.txt b/src/doc/3.11.7/_sources/cql/mvs.rst.txt deleted file mode 100644 index aabea10d8..000000000 --- a/src/doc/3.11.7/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,166 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. diff --git a/src/doc/3.11.7/_sources/cql/security.rst.txt b/src/doc/3.11.7/_sources/cql/security.rst.txt deleted file mode 100644 index 099fcc48e..000000000 --- a/src/doc/3.11.7/_sources/cql/security.rst.txt +++ /dev/null @@ -1,502 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/3.11.7/_sources/cql/triggers.rst.txt b/src/doc/3.11.7/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/3.11.7/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/3.11.7/_sources/cql/types.rst.txt b/src/doc/3.11.7/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/3.11.7/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/3.11.7/_sources/data_modeling/index.rst.txt b/src/doc/3.11.7/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/3.11.7/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/3.11.7/_sources/development/code_style.rst.txt b/src/doc/3.11.7/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/3.11.7/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/3.11.7/_sources/development/how_to_commit.rst.txt b/src/doc/3.11.7/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index d956c72d8..000000000 --- a/src/doc/3.11.7/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit —amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk —atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``—atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/3.11.7/_sources/development/how_to_review.rst.txt b/src/doc/3.11.7/_sources/development/how_to_review.rst.txt deleted file mode 100644 index dc9774362..000000000 --- a/src/doc/3.11.7/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,71 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/3.11.7/_sources/development/ide.rst.txt b/src/doc/3.11.7/_sources/development/ide.rst.txt deleted file mode 100644 index 298649576..000000000 --- a/src/doc/3.11.7/_sources/development/ide.rst.txt +++ /dev/null @@ -1,161 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -.. note:: - - `Bleeding edge development snapshots `_ of Cassandra are available from Jenkins continuous integration. - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/3.11.7/_sources/development/index.rst.txt b/src/doc/3.11.7/_sources/development/index.rst.txt deleted file mode 100644 index aefc5999c..000000000 --- a/src/doc/3.11.7/_sources/development/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Development -********************* - -.. toctree:: - :maxdepth: 2 - - ide - testing - patches - code_style - how_to_review - how_to_commit diff --git a/src/doc/3.11.7/_sources/development/patches.rst.txt b/src/doc/3.11.7/_sources/development/patches.rst.txt deleted file mode 100644 index e3d968fab..000000000 --- a/src/doc/3.11.7/_sources/development/patches.rst.txt +++ /dev/null @@ -1,125 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue tagged with the `low hanging fruit label `_ in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our `community page `_. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -3.x Tick-tock (see below) -3.0 Bug fixes only -2.2 Bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -Tick-Tock Releases -"""""""""""""""""" - -New releases created as part of the `tick-tock release process `_ will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk. - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: ``patch by X; reviewed by Y for CASSANDRA-ZZZZZ`` - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/3.11.7/_sources/development/testing.rst.txt b/src/doc/3.11.7/_sources/development/testing.rst.txt deleted file mode 100644 index b8eea6b28..000000000 --- a/src/doc/3.11.7/_sources/development/testing.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -TODO: `CASSANDRA-12365 `_ - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/3.11.7/_sources/faq/index.rst.txt b/src/doc/3.11.7/_sources/faq/index.rst.txt deleted file mode 100644 index d985e3716..000000000 --- a/src/doc/3.11.7/_sources/faq/index.rst.txt +++ /dev/null @@ -1,298 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/3.11.7/_sources/getting_started/configuring.rst.txt b/src/doc/3.11.7/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index 27fac7872..000000000 --- a/src/doc/3.11.7/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the steps above are enough, you don't really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/3.11.7/_sources/getting_started/drivers.rst.txt b/src/doc/3.11.7/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index baec82378..000000000 --- a/src/doc/3.11.7/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ diff --git a/src/doc/3.11.7/_sources/getting_started/index.rst.txt b/src/doc/3.11.7/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/3.11.7/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/3.11.7/_sources/getting_started/installing.rst.txt b/src/doc/3.11.7/_sources/getting_started/installing.rst.txt deleted file mode 100644 index 9be85e587..000000000 --- a/src/doc/3.11.7/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb https://downloads.apache.org/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://downloads.apache.org/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/3.11.7/_sources/getting_started/querying.rst.txt b/src/doc/3.11.7/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/3.11.7/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/3.11.7/_sources/index.rst.txt b/src/doc/3.11.7/_sources/index.rst.txt deleted file mode 100644 index 562603d19..000000000 --- a/src/doc/3.11.7/_sources/index.rst.txt +++ /dev/null @@ -1,41 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - - bugs - contactus diff --git a/src/doc/3.11.7/_sources/operating/backups.rst.txt b/src/doc/3.11.7/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/3.11.7/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/3.11.7/_sources/operating/bloom_filters.rst.txt b/src/doc/3.11.7/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/3.11.7/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/3.11.7/_sources/operating/bulk_loading.rst.txt b/src/doc/3.11.7/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/3.11.7/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/3.11.7/_sources/operating/cdc.rst.txt b/src/doc/3.11.7/_sources/operating/cdc.rst.txt deleted file mode 100644 index 192f62a09..000000000 --- a/src/doc/3.11.7/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,89 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property ``cdc=true`` (either when :ref:`creating the table -` or :ref:`altering it `), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in ``cassandra.yaml`` on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disable CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -This implementation included a refactor of CommitLogReplayer into `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -The initial implementation of Change Data Capture does not include a parser (see :ref:`reading-commitlogsegments` above) -so, if CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `Design doc `__ -- `JIRA ticket `__ diff --git a/src/doc/3.11.7/_sources/operating/compaction.rst.txt b/src/doc/3.11.7/_sources/operating/compaction.rst.txt deleted file mode 100644 index 0f3900042..000000000 --- a/src/doc/3.11.7/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,442 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table's ``read_repair_chance`` and ``dclocal_read_repair_chance`` to 0. - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/3.11.7/_sources/operating/compression.rst.txt b/src/doc/3.11.7/_sources/operating/compression.rst.txt deleted file mode 100644 index 01da34b6d..000000000 --- a/src/doc/3.11.7/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides three classes (``LZ4Compressor``, - ``SnappyCompressor``, and ``DeflateCompressor`` ). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/3.11.7/_sources/operating/hardware.rst.txt b/src/doc/3.11.7/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/3.11.7/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/3.11.7/_sources/operating/hints.rst.txt b/src/doc/3.11.7/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/3.11.7/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/3.11.7/_sources/operating/index.rst.txt b/src/doc/3.11.7/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/3.11.7/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/3.11.7/_sources/operating/metrics.rst.txt b/src/doc/3.11.7/_sources/operating/metrics.rst.txt deleted file mode 100644 index 04abb48e9..000000000 --- a/src/doc/3.11.7/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,706 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -These metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools scope= type= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessages..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMetrics scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -connectedNativeClients Counter Number of clients connected to this nodes native protocol server -connectedThriftClients Counter Number of clients connected to this nodes thrift protocol server -=========================== ============== =========== - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/3.11.7/_sources/operating/read_repair.rst.txt b/src/doc/3.11.7/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/3.11.7/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/3.11.7/_sources/operating/repair.rst.txt b/src/doc/3.11.7/_sources/operating/repair.rst.txt deleted file mode 100644 index 97d8ce8ba..000000000 --- a/src/doc/3.11.7/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Repair ------- - -.. todo:: todo diff --git a/src/doc/3.11.7/_sources/operating/security.rst.txt b/src/doc/3.11.7/_sources/operating/security.rst.txt deleted file mode 100644 index dfcd9e6c5..000000000 --- a/src/doc/3.11.7/_sources/operating/security.rst.txt +++ /dev/null @@ -1,410 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- - -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/3.11.7/_sources/operating/snitch.rst.txt b/src/doc/3.11.7/_sources/operating/snitch.rst.txt deleted file mode 100644 index faea0b3e1..000000000 --- a/src/doc/3.11.7/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero and read_repair_chance is < 1.0, this will allow - 'pinning' of replicas to hosts in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/3.11.7/_sources/operating/topo_changes.rst.txt b/src/doc/3.11.7/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index c42708e02..000000000 --- a/src/doc/3.11.7/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,124 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase. - -Once the bootstrapping is complete the node will be marked "UP", we rely on the hinted handoff's for making this node -consistent (since we don't accept writes since the start of the bootstrap). - -.. Note:: If the replacement process takes longer than ``max_hint_window_in_ms`` you **MUST** run repair to make the - replaced node consistent again, since it missed ongoing writes during bootstrapping. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/3.11.7/_sources/tools/cqlsh.rst.txt b/src/doc/3.11.7/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/3.11.7/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/3.11.7/_sources/tools/index.rst.txt b/src/doc/3.11.7/_sources/tools/index.rst.txt deleted file mode 100644 index 5a5e4d5ae..000000000 --- a/src/doc/3.11.7/_sources/tools/index.rst.txt +++ /dev/null @@ -1,26 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cqlsh - nodetool diff --git a/src/doc/3.11.7/_sources/tools/nodetool.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool.rst.txt deleted file mode 100644 index e37303110..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _nodetool: - -Nodetool --------- - -.. todo:: Try to autogenerate this from Nodetool’s help. diff --git a/src/doc/3.11.7/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/compact.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/decommission.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/describering.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/drain.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/flush.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/help.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/import.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/info.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/join.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/move.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/netstats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c20d0ac21..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,256 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-u | --username )] - [(-h | --host )] [(-p | --port )] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-pp | --print-port)] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`sjk` - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk --help' for more information. - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/profileload.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/refresh.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/removenode.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/repair.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/ring.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/scrub.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/sjk.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/status.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/stop.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/verify.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/version.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/3.11.7/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/3.11.7/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/3.11.7/_sources/troubleshooting/index.rst.txt b/src/doc/3.11.7/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 2e5cf106d..000000000 --- a/src/doc/3.11.7/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -.. TODO: todo diff --git a/src/doc/3.11.7/_static/ajax-loader.gif b/src/doc/3.11.7/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/3.11.7/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/3.11.7/_static/basic.css b/src/doc/3.11.7/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/3.11.7/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/3.11.7/_static/comment-bright.png b/src/doc/3.11.7/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/3.11.7/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/comment-close.png b/src/doc/3.11.7/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/3.11.7/_static/comment-close.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/comment.png b/src/doc/3.11.7/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/3.11.7/_static/comment.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/doctools.js b/src/doc/3.11.7/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/3.11.7/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/3.11.7/_static/documentation_options.js b/src/doc/3.11.7/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/3.11.7/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/3.11.7/_static/down-pressed.png b/src/doc/3.11.7/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/3.11.7/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/down.png b/src/doc/3.11.7/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/3.11.7/_static/down.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/extra.css b/src/doc/3.11.7/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/3.11.7/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/3.11.7/_static/file.png b/src/doc/3.11.7/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/3.11.7/_static/file.png and /dev/null differ diff --git a/src/doc/3.11.7/_static/jquery-3.2.1.js b/src/doc/3.11.7/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/3.11.7/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level. There are a couple of exceptions to this:

-
    -
  • Speculative retry may issue a redundant read request to an extra replica if the other replicas have not responded -within a specified time window.
  • -
  • Based on read_repair_chance and dclocal_read_repair_chance (part of a table’s schema), read requests may be -randomly sent to all replicas in order to repair potentially inconsistent data.
  • -
-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/architecture/guarantees.html b/src/doc/3.11.7/architecture/guarantees.html deleted file mode 100644 index 03edee06a..000000000 --- a/src/doc/3.11.7/architecture/guarantees.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-
-

Todo

-

todo

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/architecture/index.html b/src/doc/3.11.7/architecture/index.html deleted file mode 100644 index c50de3f1e..000000000 --- a/src/doc/3.11.7/architecture/index.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Architecture

-

This section describes the general architecture of Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/architecture/overview.html b/src/doc/3.11.7/architecture/overview.html deleted file mode 100644 index 216a7e1ba..000000000 --- a/src/doc/3.11.7/architecture/overview.html +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/3.11.7/architecture/storage_engine.html b/src/doc/3.11.7/architecture/storage_engine.html deleted file mode 100644 index 97ef46967..000000000 --- a/src/doc/3.11.7/architecture/storage_engine.html +++ /dev/null @@ -1,164 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-
-

Todo

-

todo

-
-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/bugs.html b/src/doc/3.11.7/bugs.html deleted file mode 100644 index 4138e67d6..000000000 --- a/src/doc/3.11.7/bugs.html +++ /dev/null @@ -1,108 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs and Contributing" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs and Contributing

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the #cassandra IRC channel.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Cassandra Development section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/configuration/cassandra_config_file.html b/src/doc/3.11.7/configuration/cassandra_config_file.html deleted file mode 100644 index 0758590fa..000000000 --- a/src/doc/3.11.7/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1826 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Default Value: KEYSPACE

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. You should leave this -alone for new clusters. The partitioner can NOT be changed without -reloading all data, so when upgrading you should set this to the -same partitioner you were already using.

-

Besides Murmur3Partitioner, partitioners included for backwards -compatibility include RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. Cassandra -will spread data evenly across them, subject to the granularity of -the configured compaction strategy. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-
stop
-
shut down gossip and Thrift, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

thrift_prepared_statements_cache_size_mb

-

Maximum size of the Thrift prepared statement cache

-

If you do not use Thrift at all, it is safe to leave this value at “auto”.

-

See description of ‘prepared_statements_cache_size_mb’ above for more information.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync

-

This option is commented out by default.

-

commitlog_sync may be either “periodic” or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been fsynced to disk. It will wait -commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -This window should be kept short because the writer threads will -be unable to do extra work while waiting. (You may need to increase -concurrent_writes for the same reason.)

-

Default Value: batch

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

Default Value: 2

-
-
-

commitlog_sync

-

the other option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_max_tree_depth

-

This option is commented out by default.

-

Limits the maximum Merkle tree depth to avoid consuming too much -memory during repairs.

-

The default setting of 18 generates trees of maximum size around -50 MiB / tree. If you are running out of memory during repairs consider -lowering this to 15 (~6 MiB / tree) or lower, but try not to lower it -too much past that or you will lose too much resolution and stream -too much redundant data during repair. Cannot be set lower than 10.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-

Default Value: 18

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for encrypted communication. Unused unless enabled in -encryption_options -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -Please note that the address on which the native transport is bound is the -same as the rpc_address. The port however is different and specified below.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests when the native transport is used. -This is similar to rpc_max_threads though the default differs slightly (and -there is no native_transport_min_threads, idle threads will always be stopped -after 30 seconds).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

start_rpc

-

Whether to start the thrift rpc server.

-

Default Value: false

-
-
-

rpc_address

-

The address or interface to bind the Thrift RPC service and native transport -server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

rpc_port

-

port for Thrift to listen for clients on

-

Default Value: 9160

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

rpc_server_type

-

Cassandra provides two out-of-the-box options for the RPC Server:

-
-
sync
-
One thread per thrift connection. For a very large number of clients, memory -will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size -per thread, and that will correspond to your use of virtual memory (but physical memory -may be limited depending on use of stack space).
-
hsha
-
Stands for “half synchronous, half asynchronous.” All thrift clients are handled -asynchronously using a small number of threads that does not vary with the amount -of thrift clients (and thus scales well to many clients). The rpc requests are still -synchronous (one thread per active request). If hsha is selected then it is essential -that rpc_max_threads is changed from the default value of unlimited.
-
-

The default is sync because on Windows hsha is about 30% slower. On Linux, -sync/hsha performance is about the same, with hsha of course using less memory.

-

Alternatively, can provide your own RPC server by providing the fully-qualified class name -of an o.a.c.t.TServerFactory that can create an instance of it.

-

Default Value: sync

-
-
-

rpc_min_threads

-

This option is commented out by default.

-

Uncomment rpc_min|max_thread to set request pool size limits.

-

Regardless of your choice of RPC server (see above), the number of maximum requests in the -RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -RPC server, it also dictates the number of clients that can be connected at all).

-

The default is unlimited and thus provides no protection against clients overwhelming the server. You are -encouraged to set a maximum that makes sense for you in production, but do keep in mind that -rpc_max_threads represents the maximum number of client requests this server may execute concurrently.

-

Default Value: 16

-
-
-

rpc_max_threads

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

rpc_send_buff_size_in_bytes

-

This option is commented out by default.

-

uncomment to set socket buffer sizes on rpc connections

-
-
-

rpc_recv_buff_size_in_bytes

-

This option is commented out by default.

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

thrift_framed_transport_size_in_mb

-

Frame size for thrift (maximum message length).

-

Default Value: 15

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.)

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations

-

Default Value: 10000

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

request_scheduler

-

request_scheduler – Set this to a class that implements -RequestScheduler, which will schedule incoming client requests -according to the specific policy. This is useful for multi-tenancy -with a single Cassandra cluster. -NOTE: This is specifically for requests from the client and does -not affect inter node communication. -org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -client requests to a node with a separate queue for each -request_scheduler_id. The scheduler is further customized by -request_scheduler_options as described below.

-

Default Value: org.apache.cassandra.scheduler.NoScheduler

-
-
-

request_scheduler_options

-

This option is commented out by default.

-

Scheduler Options vary based on the type of scheduler

-
-
NoScheduler
-
Has no options
-
RoundRobin
-
-
throttle_limit
-
The throttle_limit is the number of in-flight -requests per client. Requests beyond -that limit are queued up until -running requests can complete. -The value of 80 here is twice the number of -concurrent_reads + concurrent_writes.
-
default_weight
-
default_weight is optional and allows for -overriding the default which is 1.
-
weights
-
Weights are optional and will default to 1 or the -overridden default_weight. The weight translates into how -many requests are handled during each turn of the -RoundRobin, based on the scheduler id.
-
-
-
-

Default Value (complex option):

-
#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-
-
-
-

request_scheduler_id

-

This option is commented out by default. -request_scheduler_id – An identifier based on which to perform -the request scheduling. Currently the only valid option is keyspace.

-

Default Value: keyspace

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack

-

If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client/server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# algorithm: SunX509
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_warn_threshold_in_ms

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement -By default, Cassandra logs GC Pauses greater than 200 ms at INFO level

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: true

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/configuration/index.html b/src/doc/3.11.7/configuration/index.html deleted file mode 100644 index 43d8a1efa..000000000 --- a/src/doc/3.11.7/configuration/index.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/contactus.html b/src/doc/3.11.7/contactus.html deleted file mode 100644 index 49c9732e8..000000000 --- a/src/doc/3.11.7/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or the freenode IRC channels.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

IRC

-

To chat with developers or users in real-time, join our channels on IRC freenode. The -following channels are available:

-
    -
  • #cassandra - for user questions and general discussions.
  • -
  • #cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
  • #cassandra-builds - results of automated test builds.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/appendices.html b/src/doc/3.11.7/cql/appendices.html deleted file mode 100644 index 18be07d5b..000000000 --- a/src/doc/3.11.7/cql/appendices.html +++ /dev/null @@ -1,565 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/changes.html b/src/doc/3.11.7/cql/changes.html deleted file mode 100644 index c8debc82e..000000000 --- a/src/doc/3.11.7/cql/changes.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

-
    -
  • Adds a new ``duration `` data types (CASSANDRA-11873).
  • -
  • Support for GROUP BY (CASSANDRA-10707).
  • -
  • Adds a DEFAULT UNSET option for INSERT JSON to ignore omitted columns (CASSANDRA-11424).
  • -
  • Allows null as a legal value for TTL on insert and update. It will be treated as equivalent to
  • -
-

inserting a 0 (CASSANDRA-12216).

-
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/ddl.html b/src/doc/3.11.7/cql/ddl.html deleted file mode 100644 index 4acac0d1c..000000000 --- a/src/doc/3.11.7/cql/ddl.html +++ /dev/null @@ -1,765 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-
-
-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
    -
  • 'SimpleStrategy': A simple strategy that defines a replication factor for the whole cluster. The only sub-options -supported is 'replication_factor' to define that replication factor and is mandatory.
  • -
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for -each data-center. The rest of the sub-options are key-value pairs where a key is a data-center name and its value is -the associated replication factor.
  • -
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-
-

Column definitions

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-
-

Static columns

-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-
-
-
-

The Primary key

-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-
-

The partition key

-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-
-
-

The clustering columns

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, c, d)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-
-
-
-

Table options

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Compact tables

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage that are necessary to ensure backward compatibility with the deprecated Thrift API. And as COMPACT -STORAGE cannot, as of Cassandra 3.11.7, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is mainly targeted towards backward -compatibility for definitions created before CQL version 3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details) and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary but necessary for backward -compatibility with the (deprecated) Thrift API. Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-
-
-

Reversing the clustering order

-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-
-

Other table options

-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
read_repair_chancesimple0.1The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) for -the purpose of read repairs.
dclocal_read_repair_chancesimple0The probability with which to query extra nodes (e.g. -more nodes than required by the consistency level) -belonging to the same data center than the read -coordinator for the purpose of read repairs.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
-
-
Compaction options
-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-
-
-
Compression options
-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-
-
-
Caching options
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-
-
-
Other considerations:
-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table'
-       AND read_repair_chance = 0.2;
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/definitions.html b/src/doc/3.11.7/cql/definitions.html deleted file mode 100644 index e4282356b..000000000 --- a/src/doc/3.11.7/cql/definitions.html +++ /dev/null @@ -1,312 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term          ::=  constant | literal | function_call | type_hint | bind_marker
-literal       ::=  collection_literal | udt_literal | tuple_literal
-function_call ::=  identifier '(' [ term (',' term)* ] ')'
-type_hint     ::=  '(' cql_type `)` term
-bind_marker   ::=  '?' | ':' identifier
-
-

A term is thus one of:

-
    -
  • A constant.
  • -
  • A literal for either a collection, a user-defined type or a tuple -(see the linked sections for details).
  • -
  • A function call: see the section on functions for details on which native function exists and how to define your own user-defined ones.
  • -
  • A type hint: see the related section for details.
  • -
  • A bind marker, which denotes a variable to be bound at execution time. See the section on Prepared Statements -for details. A bind marker can be either anonymous (?) or named (:some_name). The latter form provides a more -convenient way to refer to the variable for binding it and should generally be preferred.
  • -
-
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/dml.html b/src/doc/3.11.7/cql/dml.html deleted file mode 100644 index 0045bdd93..000000000 --- a/src/doc/3.11.7/cql/dml.html +++ /dev/null @@ -1,558 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/functions.html b/src/doc/3.11.7/cql/functions.html deleted file mode 100644 index c6c968993..000000000 --- a/src/doc/3.11.7/cql/functions.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where -the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Time conversion functions

-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/index.html b/src/doc/3.11.7/cql/index.html deleted file mode 100644 index 75a9154d2..000000000 --- a/src/doc/3.11.7/cql/index.html +++ /dev/null @@ -1,239 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the deprecated -thrift API (and earlier version 1 and 2 of CQL).

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/indexes.html b/src/doc/3.11.7/cql/indexes.html deleted file mode 100644 index caf007fd9..000000000 --- a/src/doc/3.11.7/cql/indexes.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/json.html b/src/doc/3.11.7/cql/json.html deleted file mode 100644 index 336e6e048..000000000 --- a/src/doc/3.11.7/cql/json.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/mvs.html b/src/doc/3.11.7/cql/mvs.html deleted file mode 100644 index b5f65ed02..000000000 --- a/src/doc/3.11.7/cql/mvs.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/security.html b/src/doc/3.11.7/cql/security.html deleted file mode 100644 index 20a9b1a75..000000000 --- a/src/doc/3.11.7/cql/security.html +++ /dev/null @@ -1,704 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/triggers.html b/src/doc/3.11.7/cql/triggers.html deleted file mode 100644 index 80d189371..000000000 --- a/src/doc/3.11.7/cql/triggers.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/cql/types.html b/src/doc/3.11.7/cql/types.html deleted file mode 100644 index a2ff1f8f6..000000000 --- a/src/doc/3.11.7/cql/types.html +++ /dev/null @@ -1,697 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 3.11.7, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/data_modeling/index.html b/src/doc/3.11.7/data_modeling/index.html deleted file mode 100644 index e28c43627..000000000 --- a/src/doc/3.11.7/data_modeling/index.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/code_style.html b/src/doc/3.11.7/development/code_style.html deleted file mode 100644 index df275a5a8..000000000 --- a/src/doc/3.11.7/development/code_style.html +++ /dev/null @@ -1,208 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/how_to_commit.html b/src/doc/3.11.7/development/how_to_commit.html deleted file mode 100644 index a103dc5bd..000000000 --- a/src/doc/3.11.7/development/how_to_commit.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit —amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit —amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk —atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

—atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/how_to_review.html b/src/doc/3.11.7/development/how_to_review.html deleted file mode 100644 index e3310f95e..000000000 --- a/src/doc/3.11.7/development/how_to_review.html +++ /dev/null @@ -1,172 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/ide.html b/src/doc/3.11.7/development/ide.html deleted file mode 100644 index ac7f23424..000000000 --- a/src/doc/3.11.7/development/ide.html +++ /dev/null @@ -1,234 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone http://git-wip-us.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

Note

-

Bleeding edge development snapshots of Cassandra are available from Jenkins continuous integration.

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/index.html b/src/doc/3.11.7/development/index.html deleted file mode 100644 index 32e125c0d..000000000 --- a/src/doc/3.11.7/development/index.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Development" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/development/patches.html b/src/doc/3.11.7/development/patches.html deleted file mode 100644 index a45cb35ac..000000000 --- a/src/doc/3.11.7/development/patches.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue tagged with the low hanging fruit label in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefor it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or IRC channel listed on our community page.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - -
VersionPolicy
3.xTick-tock (see below)
3.0Bug fixes only
2.2Bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

Tick-Tock Releases

-

New releases created as part of the tick-tock release process will either focus on stability (odd version numbers) or introduce new features (even version numbers). Any code for new Cassandra features you should be based on the latest, unreleased 3.x branch with even version number or based on trunk.

-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch ending with the following statement on the last line: patch by X; reviewed by Y for CASSANDRA-ZZZZZ
  2. -
  3. When you’re happy with the result, create a patch:
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/development/testing.html b/src/doc/3.11.7/development/testing.html deleted file mode 100644 index 524cbea18..000000000 --- a/src/doc/3.11.7/development/testing.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Development" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. For frequent contributors, this Jenkins is set up to build branches from their GitHub repositories. It is likely that your reviewer will use this Jenkins instance to run tests for your patch. Read more on the motivation behind the CI server here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

TODO: CASSANDRA-12365

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/faq/index.html b/src/doc/3.11.7/faq/index.html deleted file mode 100644 index c5db1a3a4..000000000 --- a/src/doc/3.11.7/faq/index.html +++ /dev/null @@ -1,315 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX (and 9160 for the deprecated Thrift interface). The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/genindex.html b/src/doc/3.11.7/genindex.html deleted file mode 100644 index 10a40b50c..000000000 --- a/src/doc/3.11.7/genindex.html +++ /dev/null @@ -1,93 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/getting_started/configuring.html b/src/doc/3.11.7/getting_started/configuring.html deleted file mode 100644 index 52d7f4db6..000000000 --- a/src/doc/3.11.7/getting_started/configuring.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the steps above are enough, you don’t really need to change any configuration. -However, when you deploy a cluster of nodes, or use clients that are not on the same host, then there are some -parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/getting_started/drivers.html b/src/doc/3.11.7/getting_started/drivers.html deleted file mode 100644 index 5fd53f70b..000000000 --- a/src/doc/3.11.7/getting_started/drivers.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/getting_started/index.html b/src/doc/3.11.7/getting_started/index.html deleted file mode 100644 index c76f1c8be..000000000 --- a/src/doc/3.11.7/getting_started/index.html +++ /dev/null @@ -1,146 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/getting_started/installing.html b/src/doc/3.11.7/getting_started/installing.html deleted file mode 100644 index fd4de7db7..000000000 --- a/src/doc/3.11.7/getting_started/installing.html +++ /dev/null @@ -1,196 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xvf apache-cassandra-3.6-bin.tar.gz cassandra
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb https://downloads.apache.org/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://downloads.apache.org/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/getting_started/querying.html b/src/doc/3.11.7/getting_started/querying.html deleted file mode 100644 index 033b65a11..000000000 --- a/src/doc/3.11.7/getting_started/querying.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/index.html b/src/doc/3.11.7/index.html deleted file mode 100644 index 16c04122c..000000000 --- a/src/doc/3.11.7/index.html +++ /dev/null @@ -1,75 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v3.11.7

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/3.11.7/objects.inv b/src/doc/3.11.7/objects.inv deleted file mode 100644 index 5d83d29fe..000000000 Binary files a/src/doc/3.11.7/objects.inv and /dev/null differ diff --git a/src/doc/3.11.7/operating/backups.html b/src/doc/3.11.7/operating/backups.html deleted file mode 100644 index f413a617f..000000000 --- a/src/doc/3.11.7/operating/backups.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/operating/bloom_filters.html b/src/doc/3.11.7/operating/bloom_filters.html deleted file mode 100644 index aa9292001..000000000 --- a/src/doc/3.11.7/operating/bloom_filters.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/bulk_loading.html b/src/doc/3.11.7/operating/bulk_loading.html deleted file mode 100644 index 7d8701f63..000000000 --- a/src/doc/3.11.7/operating/bulk_loading.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/operating/cdc.html b/src/doc/3.11.7/operating/cdc.html deleted file mode 100644 index 2f133d99f..000000000 --- a/src/doc/3.11.7/operating/cdc.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the combined flushed and unflushed CDC-log is reached. An operator can -enable CDC on a table by setting the table property cdc=true (either when creating the table or altering it), after which any CommitLogSegments containing -data for a CDC-enabled table are moved to the directory specified in cassandra.yaml on segment discard. A threshold -of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will not allow CDC -data until a consumer parses and removes data from the destination archival directory.

-
-
-

Configuration

-
-

Enabling or disable CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

This implementation included a refactor of CommitLogReplayer into CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

The initial implementation of Change Data Capture does not include a parser (see Reading CommitLogSegments above) -so, if CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/compaction.html b/src/doc/3.11.7/operating/compaction.html deleted file mode 100644 index f7ad12868..000000000 --- a/src/doc/3.11.7/operating/compaction.html +++ /dev/null @@ -1,514 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy).

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled), and disable background read -repair by setting the table’s read_repair_chance and dclocal_read_repair_chance to 0.

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/compression.html b/src/doc/3.11.7/operating/compression.html deleted file mode 100644 index 8185448ef..000000000 --- a/src/doc/3.11.7/operating/compression.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides three classes (LZ4Compressor, -SnappyCompressor, and DeflateCompressor ). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/hardware.html b/src/doc/3.11.7/operating/hardware.html deleted file mode 100644 index 7d6c22f80..000000000 --- a/src/doc/3.11.7/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/hints.html b/src/doc/3.11.7/operating/hints.html deleted file mode 100644 index a6c7cab42..000000000 --- a/src/doc/3.11.7/operating/hints.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/operating/index.html b/src/doc/3.11.7/operating/index.html deleted file mode 100644 index 0c755e485..000000000 --- a/src/doc/3.11.7/operating/index.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/metrics.html b/src/doc/3.11.7/operating/metrics.html deleted file mode 100644 index b68318f72..000000000 --- a/src/doc/3.11.7/operating/metrics.html +++ /dev/null @@ -1,1601 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

These metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools scope=<ThreadPoolName> type=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessages.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMetrics scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsCounterNumber of clients connected to this nodes native protocol server
connectedThriftClientsCounterNumber of clients connected to this nodes thrift protocol server
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/read_repair.html b/src/doc/3.11.7/operating/read_repair.html deleted file mode 100644 index b4320f6a8..000000000 --- a/src/doc/3.11.7/operating/read_repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/operating/repair.html b/src/doc/3.11.7/operating/repair.html deleted file mode 100644 index 9f3f3d92b..000000000 --- a/src/doc/3.11.7/operating/repair.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/3.11.7/operating/security.html b/src/doc/3.11.7/operating/security.html deleted file mode 100644 index 71af24df5..000000000 --- a/src/doc/3.11.7/operating/security.html +++ /dev/null @@ -1,446 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/snitch.html b/src/doc/3.11.7/operating/snitch.html deleted file mode 100644 index 9aae4b334..000000000 --- a/src/doc/3.11.7/operating/snitch.html +++ /dev/null @@ -1,176 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero and read_repair_chance is < 1.0, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/operating/topo_changes.html b/src/doc/3.11.7/operating/topo_changes.html deleted file mode 100644 index a062745dc..000000000 --- a/src/doc/3.11.7/operating/topo_changes.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be down.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. The main difference -between normal bootstrapping of a new node is that this new node will not accept any writes during this phase.

-

Once the bootstrapping is complete the node will be marked “UP”, we rely on the hinted handoff’s for making this node -consistent (since we don’t accept writes since the start of the bootstrap).

-
-

Note

-

If the replacement process takes longer than max_hint_window_in_ms you MUST run repair to make the -replaced node consistent again, since it missed ongoing writes during bootstrapping.

-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/search.html b/src/doc/3.11.7/search.html deleted file mode 100644 index fb9944986..000000000 --- a/src/doc/3.11.7/search.html +++ /dev/null @@ -1,103 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/searchindex.js b/src/doc/3.11.7/searchindex.js deleted file mode 100644 index 9e174ae11..000000000 --- a/src/doc/3.11.7/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/security","cql/triggers","cql/types","data_modeling/index","development/code_style","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","tools/cqlsh","tools/index","tools/nodetool","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","troubleshooting/index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/code_style.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","troubleshooting/index.rst"],objects:{},objnames:{},objtypes:{},terms:{"00t89":21,"03t04":21,"0x0000000000000003":14,"0x00000004":13,"100mb":6,"10mb":6,"10s":52,"10x":[6,41],"11e6":52,"128th":4,"12gb":43,"12h30m":21,"15m":46,"160mb":41,"16mb":[30,41],"180kb":6,"19t03":139,"1mo":21,"1st":21,"24h":21,"250m":6,"256mb":6,"256th":6,"29d":21,"2e10":10,"2gb":43,"2nd":[6,11,50],"2xlarg":43,"300s":6,"327e":52,"32gb":43,"32mb":[6,30],"36x":34,"3ff3e5109f22":13,"3gb":42,"3rd":[6,46,50],"40f3":13,"4ae3":13,"4kb":11,"4xlarg":43,"50kb":6,"50mb":[6,41],"512mb":6,"5573e5b09f14":13,"5kb":6,"5mb":41,"64k":6,"64kb":42,"6ms":6,"6tb":43,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":24,"75th":46,"86400s":41,"89h4m48":21,"8gb":43,"8th":[6,40],"90th":46,"95ac6470":52,"95th":46,"98th":46,"99th":46,"9th":46,"\u00eatre":9,"abstract":[23,25],"boolean":[9,12,14,17,19,21,52],"break":[28,41],"byte":[6,9,13,21,46,62,80,116,166],"case":[6,10,11,12,13,14,16,17,18,21,24,25,28,29,30,38,43,49,51,52],"catch":23,"class":[6,11,14,21,23,26,29,41,42,45,49,117,129,150],"default":[4,6,10,11,13,14,17,19,21,26,29,30,31,34,38,40,41,42,46,49,51,52,57,76,80,87,116,117,119,122,132,133,139,154,156,167],"enum":9,"export":[26,46,52],"final":[14,19,23,26,41,43,49,133],"float":[9,10,11,12,14,17,21,38,42],"function":[6,9,10,11,12,15,16,18,19,21,25,32,36,49,50,52],"import":[11,14,21,26,27,29,31,41,43,46,52,117],"int":[9,10,11,13,14,17,18,19,21,29,40,42],"long":[6,13,21,24,25,30,41,46],"new":[0,4,6,10,11,14,16,17,18,19,20,21,23,25,26,28,29,33,36,38,41,43,49,51,108,115,117],"null":[9,10,12,13,14,17,18,21,23,52],"public":[6,14,23,29,30,34,49,50],"return":[6,9,11,13,14,16,17,18,19,21,25,132],"short":[6,21],"static":[6,9,10,18,50],"super":49,"switch":[6,10,19,26,30,45,46,49,50],"throw":[6,14,23,29],"true":[6,11,12,17,19,21,26,30,40,41,49,51,52,114,117],"try":[6,11,23,26,28,30,41,54,132],"var":[6,23,34],"void":29,"while":[6,10,11,12,13,21,24,28,38,41,42,43,49,52],AES:6,AND:[9,11,13,14,18,19,49,52],AWS:43,Added:10,Adding:[6,11,19,21,30,36,45,49],And:[11,14,19,49],Are:25,Ave:21,BUT:23,But:[13,15,19,21,23,28,30,52],CAS:6,CFs:[132,139],CLS:52,DCs:6,DNS:30,Doing:10,EBS:43,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,20,21,28,29,30,31,34,35,41,43,49,50,52],GCs:6,Has:[6,25],IDE:[27,36],IDEs:[26,27],IDs:[117,157],INTO:[6,9,11,13,14,17,21],IPs:[6,50,138,157],Ids:163,JKS:6,KBs:6,LCS:11,NFS:43,NOT:[6,9,10,11,13,14,16,18,19,20,21],Not:[13,19,28,41,42],ONE:[0,6,46,52],One:[6,29,30,41],PFS:6,Pis:43,Such:21,THE:6,TLS:[6,45],That:[11,12,18,21,28,30,41,52],The:[0,4,6,8,9,10,12,14,16,18,19,20,21,23,24,26,28,29,30,31,34,35,36,38,40,42,43,46,49,50,51,52,57,60,65,67,73,77,83,86,87,90,95,99,101,103,108,115,117,119,123,124,130,132,139,142,143,150,156,157,158,165,167,170,171,173],Their:21,Then:[13,29,30,34,41,49],There:[0,6,10,11,12,13,14,21,26,28,29,30,41,46,49],These:[4,6,11,14,26,46,49,52],USE:[9,14,15],USING:[9,13,16,20,21,41],Use:[11,13,19,30,35,45,52,55,60,117,122,132,163,170],Used:46,Uses:[6,17,45,50],Using:[11,13,29,30,49],WILL:6,WITH:[9,11,12,16,18,19,38,40,41,42,49,52],Will:[6,36,80,117,150],With:[6,13,17,30,41,51,56],Yes:30,_cache_max_entri:49,_if_:6,_must_:6,_trace:46,_udt:14,_update_interval_in_m:49,_use:14,_validity_in_m:49,a278b781fe4b2bda:34,abil:[14,30,42],abilityid:16,abl:[6,14,21,26,29,30,41],about:[4,6,19,26,28,29,30,38,41,50,52,59,117,138],abov:[6,8,11,12,13,14,21,26,28,30,31,40,41,46],absenc:12,abstracttyp:21,accept:[0,6,10,11,12,13,17,28,29,38,51,75,117],access:[6,10,21,26,28,43,45,46],accompani:6,accord:[6,30],accordingli:[6,14,30],account:[6,21,29],accru:[41,46],accumul:[6,41,46],accur:[6,30,38,138],accuraci:[38,119,167],acheiv:49,achiev:[41,46],achil:32,ack:6,acoount:46,acquir:[19,46],across:[6,11,19,28,46,49,50,117,121],action:[6,13],activ:[4,6,28,40,46,52,117,119,167],activetask:46,actual:[4,6,13,20,23,25,30,34,41,50,132],acycl:19,add:[0,6,9,10,11,21,24,25,28,31,34,36,41,49],addamsfamili:11,added:[0,6,10,11,14,25,41],adding:[6,13,14,25,43,52],addit:[0,6,9,11,13,19,21,26,28,31,41,43,46,49,52],addition:[11,13,41],address:[6,8,17,21,26,28,31,36,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],adher:10,adjac:41,adjust:[6,38],adv:34,advanc:[6,45,49],advantag:43,advers:30,advic:[28,30],advis:[6,12,21,30],af08:13,afd:21,affect:[6,25,28,30,41,139],afford:6,after:[5,6,10,11,12,13,14,16,17,18,26,28,30,40,41,43,45,46,49,50,52],afterward:[26,29],afunct:14,again:[6,28,41,51,52],against:[6,11,14,28,29,30,43,51,52,132],agent:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],agentlib:26,aggreg:[6,9,10,13,15,18,19,46,52],aid:12,aim:6,akeyspac:14,algorithm:[6,11,51],alia:[10,13,32],alias:[6,10,18],alic:19,align:23,aliv:6,all:[0,6,9,11,12,13,14,17,18,21,23,24,25,26,28,29,36,38,40,41,46,49,51,52,57,58,59,75,87,92,108,109,114,117,119,121,130,133,139,154,156,158,167,169,170,171],allmemtableslivedatas:46,allmemtablesoffheaps:46,allmemtablesonheaps:46,alloc:[6,30,40,43,46],allocate_tokens_for_keyspac:51,allow:[0,4,6,9,10,11,12,14,16,17,18,21,31,38,40,41,42,43,50],allowallauthent:[6,49],allowallauthor:[6,49],allowallinternodeauthent:6,almost:[6,14,21,41],alon:[6,23],along:[6,13,114,117],alongsid:[35,52],alphabet:23,alphanumer:[11,19],alreadi:[6,11,14,16,18,21,28,41,49,170],also:[0,4,6,10,11,12,13,14,17,18,19,21,26,28,29,30,31,41,43,46,49,51,52,87,171],alter:[9,10,15,17,30,38,40,41,42,49],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:21,alter_type_stat:[12,21],alter_user_stat:12,altern:[6,10,11,12,13,17,21,26,28,31,43,49],although:[6,28],alwai:[0,6,9,10,11,13,14,18,21,23,28,29,30,41,43],amend:24,amongst:11,amount:[6,11,13,21,26,28,29,30,41,42,43,46,51,52,132],amplif:[41,43],anaggreg:14,analogu:13,analyt:38,analyz:29,ani:[0,6,10,11,12,13,14,17,18,19,20,21,24,25,26,28,29,31,34,36,40,41,43,46,49,51,52,55,108,114,117,122,139,154],annot:23,anonym:[12,21],anoth:[6,11,14,19,21,29,41,49,52],anotherarg:14,ant:[26,28,29],anti:[6,21],anticip:11,anticompact:[41,163],antientropystag:46,antipattern:43,anymor:[24,41],anyon:23,anyth:41,anywai:6,anywher:13,apach:[2,5,6,7,14,20,23,24,25,26,28,29,30,33,34,41,42,46,49,53],api:[6,8,11,15,17,35,50],appear:[12,14,41,52],append:[21,24,43,46,52],appendic:[15,36],appendix:[12,15],appl:21,appli:[6,9,10,11,12,13,19,21,24,28,29,30,46,52],applic:[6,11,19,23,25,26,49],appreci:28,approach:[4,41,51],appropri:[6,11,19,21,25,28,49,50,51],approxim:[41,46],apt:34,arbitrari:[11,12,21],architectur:[30,36],archiv:[6,40,80],archive_command:80,archive_retri:80,aren:13,arg:[14,117,155],argnam:14,argnum:14,argument:[6,11,13,14,16,17,30,31,42,52,55,56,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],arguments_declar:14,arguments_signatur:14,around:[6,19,41,43,50],arrai:[6,30],arriv:[6,28,30],artifact:26,artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,21],asf:26,ask:[5,28,29,36,49],aspect:11,assassin:117,assertionerror:23,assertrow:29,assign:[6,13,30],associ:[6,11],assum:[6,11,14,26,49,50],assumpt:49,astyanax:32,async:[6,49],asynchron:[6,16,30,43],asynchroni:46,atabl:14,atom:[11,13,20,24],atomiclong:46,attach:28,attemp:46,attempt:[0,6,11,16,18,19,21,30,41,46,49,52,133],attent:[23,28],attribut:41,audit:[66,76,117],auditlog:76,auth:6,authent:[10,45,52],authenticatedus:6,author:[9,19,21,45],authorizationproxi:49,auto:[6,30,158],auto_bootstrap:51,autocompact:[41,67,77,117,158],autogener:54,autom:[8,23],automat:[6,13,14,16,26,29,30,34,41,49,51],avail:[0,6,8,11,14,19,26,28,29,34,40,49,50,52,57,87,130,139,150,170],availabil:6,averag:[6,14,41,46],average_live_cells_per_slice_last_five_minut:166,average_s:11,average_tombstones_per_slice_last_five_minut:166,averagefin:14,averagest:14,avg_bucket_s:41,avoid:[6,11,12,23,25,28,38,41,43,49,50,52,171],awai:[26,51,52],awar:[0,11,28,38,42,138],azur:43,b124:13,b70de1d0:13,back:[6,41,46,51,114,117],backend:6,background:[30,34,41,49],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,36,41,45,51,52,68,78,117,159],backward:[6,10,11,15,19,21],bad:[6,14,30,50],balanc:51,banana:21,band:21,bar:[12,23],bardet:21,bare:6,base:[0,4,6,10,11,13,14,18,19,21,24,28,29,30,41,43,46,49,51],bash:30,basi:[6,30,42],basic:[11,41,43],batch:[6,9,15,29,36,52],batch_remov:46,batch_stat:12,batch_stor:46,batchlog:[13,46,89,117,134,140],be34:13,beatl:21,beca:52,becaus:[6,13,14,34,41,42,49],becom:[4,6,11,14,19,28,41,46,49,51],been:[0,4,6,10,13,14,15,19,21,25,28,41,43,49,139],befor:[0,6,10,11,13,14,16,20,21,26,27,29,32,41,49,50,52,80,156],begin:[9,12,13,29,49,52],beginn:28,begintoken:52,behavior:[0,6,10,14,17,21,23,25,38,41,51,133],behind:[6,23,29,30,41],being:[6,11,13,17,21,25,29,30,38,41,46,51],belong:[11,13,14,46,57,117],below:[6,11,12,13,17,19,21,28,34,41,52,63],benchmark:43,benefici:41,benefit:[6,38,41,43,45],besid:6,best:[6,29,41,49,50],best_effort:6,better:[6,23,28,41,43],between:[0,6,9,10,13,15,28,30,38,41,46,49,51,132,154],beyond:[6,52,171],big:[41,60],bigger:[11,41],biggest:14,bigint:[9,14,17,21],bigintasblob:14,bin:[26,34,35,52],binari:[14,33,69,79,117,160],binauditlogg:76,bind:[6,10,12,14,30],bind_mark:[12,13,18,21],biolog:11,birth:13,birth_year:13,bit:[6,14,17,21,28,30,42,43],bite:30,bitrot:11,bitstr:9,black:6,blank:[6,23,30],bleed:26,blindli:30,blob:[9,10,12,17,21,36,42],blobasbigint:14,blobastyp:14,block:[4,6,11,24,31,41,43,46,49,80],blockedonalloc:6,blog:[6,11,13],blog_til:13,blog_titl:13,bloom:[4,11,36,43,45,46],bloom_filter_false_posit:166,bloom_filter_false_ratio:166,bloom_filter_fp_ch:[11,38],bloom_filter_off_heap_memory_us:166,bloom_filter_space_us:166,bloomfilterdiskspaceus:46,bloomfilterfalseposit:46,bloomfilterfalseratio:46,bloomfilteroffheapmemoryus:46,blunt:49,bnf:12,bob:[13,19],bodi:[11,12],boilerpl:27,boolstyl:52,boost:6,boot:30,bootstrap:[0,6,36,42,45,46,49,117,122,150],born:13,both:[0,6,11,13,14,18,21,24,25,28,30,31,38,41,42,43,46,49,51,52],bottleneck:6,bottom:30,bound:[6,11,12,21,43,49],box:[6,49,50],brace:23,bracket:12,braket:12,branch:[24,25,26,29],branchnam:28,breakpoint:26,breed:29,bring:6,brk:30,broadcast:6,broadcast_address:50,broken:[41,46],brows:6,browser:52,bucket:41,bucket_high:41,bucket_low:41,buffer:[4,6,46],bufferpool:45,bug:[10,24,29,30,36],build:[8,27,29,36,46,117,173],builder:[94,117,147],built:[26,46],bulk:[36,45],bump:10,bunch:23,burn:40,button:30,bytebuff:14,byteorderedpartition:[6,14],bytescompact:46,bytesflush:46,bytestyp:9,c73de1d3:13,cach:[6,30,31,43,45,50,108,110,111,112,117,141,142],cachecleanupexecutor:46,cachenam:46,calcul:[6,38,40,41,46,50],call:[9,11,12,13,14,19,23,31,36,41,43,46,51,117,150],callback:46,caller:23,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,23,24,25,26,28,29,31,34,35,36,38,40,41,42,43,46,49,50,51,52,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],cancel:[10,133],candid:41,cannot:[6,9,11,13,14,17,18,19,21,41,49,55,117],cap:[12,91,96,102,117,144,149,152],capabl:[6,30,52],capac:[6,40,46,50,117,119,141,167],captur:[6,36,45],care:[6,41,132],carlo:19,carri:[23,132],cascommit:46,cascontent:[103,153],casprepar:46,caspropos:46,casread:46,cassablanca:21,cassafort:32,cassandra:[0,2,4,5,8,10,11,13,14,19,20,21,23,24,28,32,33,35,38,41,42,43,46,50,51,52,76,80,117,128,132,135,139,164,172],cassandra_hom:[6,40,49],cassandraauthor:[6,49],cassandradaemon:[26,34],cassandralogin:49,cassandrarolemanag:[6,49],casser:32,cassi:32,cast:[10,13,18],caswrit:46,cat:21,categor:46,categori:[11,12,13,14,76],caught:[25,46],caus:[6,18,30,41,49],caution:6,caveat:49,cbc:6,ccm:[25,29],ccmlib:29,cdc:[6,11],cdc_enabl:40,cdc_free_space_check_interval_m:40,cdc_free_space_in_mb:40,cdc_raw:[6,40],cdc_raw_directori:40,cdccompactor:6,cell:[6,21,46,87,171],center:[6,11,21,30,50,51,73,83,117,132],central:[26,49,52],centric:19,certain:[6,9,11,19,29,41,49],certainli:14,certif:[49,117,128],cfname:[101,119,167],cfs:23,chain:19,chanc:38,chang:[6,11,12,15,19,21,24,26,27,33,34,36,42,45,46,49,150],channel:[5,8,28],charact:[11,12,13,17,19,21,23,52],chat:8,cheap:6,check:[0,6,11,13,23,25,26,28,29,30,38,40,41,46,49,108,117,132,171],checklist:[27,28,36],checkout:[26,28],checksum:[11,42,117,171],cherri:24,chess:13,child:52,chmod:49,choic:[6,11,36,41,45],choos:[0,6,11,27,32,43,46],chosen:[0,6,11,14],chown:49,christoph:21,chrome:52,chunk:[4,6,30,42,52],chunk_length_in_kb:[11,42],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:46,chunksiz:52,churn:6,cipher:[6,49],cipher_suit:6,circular:19,citi:21,clash:12,class_nam:6,classpath:[6,14,21,46],claus:[10,11,14,16,17,18,19,23],clean:[6,23,46,57,117,135],cleanli:28,cleanup:[30,41,45,46,87,117,163],clear:[25,28,59,108],clearsnapshot:117,click:[13,26,28,29],client:[0,6,8,10,11,13,17,19,21,25,30,31,33,36,43,45,52,59,117],client_encryption_opt:49,clientrequest:46,clientstat:117,clock:6,clockr:6,clojur:33,clone:[26,30,52],close:[6,15,49],closer:38,cloud:45,cluster:[0,4,6,9,10,13,14,20,21,25,29,31,35,36,41,43,46,49,50,51,52,64,85,89,105,117,140,157],cluster_nam:[31,35],clustering_column:11,clustering_ord:11,cmsparallelremarken:26,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,20,24,25,26,27,29,36,42,46],codestyl:23,col:14,cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,43,45,46,87],collection_liter:12,collection_typ:21,color:[21,52],column1:9,column:[6,9,10,12,13,14,15,16,17,18,21,42,46,52,101,119,139,156,167],column_definit:11,column_nam:[11,13,16],columnfamili:[6,9,23,41],colupdatetimedeltahistogram:46,com:[6,11,14,23,24,49],combin:[4,6,10,40,41],come:[6,9,49],comingl:41,comma:[6,11,12,13,31,49,51,52,76,119,122,167],command:[0,6,24,29,30,31,34,35,42,45,53,55,56,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],comment:[6,11,15,18,23,25,49],commit:[6,8,11,27,28,36,46],commitlog:[2,6,30,31,43,45],commitlog_archiv:6,commitlog_directori:[31,43],commitlog_segment_size_in_mb:30,commitlogread:40,commitlogreadhandl:40,commitlogreplay:40,commitlogseg:[6,45,46],committ:[24,28,29],common:[0,14,15,23,25,28,45,52],common_nam:11,commonli:117,commun:[6,8,25,26,28,30,31,35,49],commut:30,compact:[4,6,15,30,36,38,42,43,45,57,61,62,87,90,91,117,130,139,143,144,150,158,163,170],compacted_partition_maximum_byt:166,compacted_partition_mean_byt:166,compacted_partition_minimum_byt:166,compaction_:163,compaction_window_s:41,compaction_window_unit:41,compactionbyteswritten:46,compactionexecutor:46,compactionhistori:[41,117],compactionid:163,compactionparamet:41,compactionparametersjson:41,compactionstat:[41,117],compactionstrategi:45,compactor:[93,117,146],compar:[6,28,41,46],compat:[6,9,10,11,13,15,19,25,28],compatilibi:21,compet:6,compil:[23,26,52],complain:26,complet:[6,13,14,28,30,41,46,49,51,52,117,131,133],completedtask:46,complex:[6,9,14,21,28],complexarg:14,compliant:[6,14,49],complic:28,compon:[4,11,25,38,46,49,117,150],compos:[11,13,21],composit:11,compound:17,comprehens:25,compress:[4,6,29,36,41,43,45,46],compression_metadata_off_heap_memory_us:166,compressioninfo:4,compressionmetadataoffheapmemoryus:46,compressionratio:46,compressor:[6,11],compris:[4,11,42],compromis:49,comput:[6,14],concaten:14,concept:[15,19,41],concern:[13,14],concret:[12,21],concurr:[6,43,92,93,94,117,132,145,146,147],concurrentmarksweep:43,condens:13,condit:[6,10,12,13,19,21,23,24,41,46,49,52],conditionnotmet:46,conf:[6,30,31,34,46,49,52],config:[46,49,52],configur:[0,4,11,21,26,29,30,33,34,36,45,46,49,50,52,63,80,117,135,150],confirm:[6,8,25,26],conflict:[13,21,24],conform:[18,25],confus:[10,12,30],conjunct:52,connect:[6,11,19,21,26,35,36,46,49,50,52,59,63,116,117],connectednativecli:46,connectedthriftcli:46,connector:[30,32,49],consecut:31,consequ:[11,13,21,43],conserv:6,consid:[0,6,13,21,28,31,38,41,43],consider:[13,21],consist:[2,11,12,13,14,25,49,51],consol:[26,31,52],constant:[10,11,15,17,21],constantli:[6,41],construct:12,constructor:[6,23],consum:[6,29,38,40,46],consumpt:40,contact:[6,11,30,36],contain:[0,6,8,9,10,11,12,13,15,16,18,19,21,26,28,40,41,42,49,52,156],contend:[6,46],content:[4,6,11,12,13,36,41,52,80],contentionhistogram:46,context:[6,9,19,21,28,30,49],contigu:13,continu:[0,6,23,26,29,41,49,50],contrarili:12,contrast:[29,49],contribut:[24,27,29,36],contributor:[24,28,29,34],control:[0,6,10,11,13,15,25,31,34,41,49,50,52],conveni:[9,12,14,17,29,51],convent:[6,11,14,15,24,27,28,29,49,50],convers:10,convert:[10,13,14,41],coordin:[0,6,11,13,14,21,30,46,133],coordinatorreadlat:46,coordinatorscanlat:46,cop:23,copi:[0,30,41],core:[6,14,43,145],correct:[10,25,34,41,42,117,130],correctli:[6,11,30,41,49],correl:[6,10,50],correspond:[6,9,11,13,14,18,21,28,29,30,40,50],corrupt:[6,11,41,42,43,139,171],cost:[6,13,21,42],could:[6,12,21,25,28,41,52],couldn:34,count:[6,9,13,21,30,41,46,51],counter:[6,9,14,43,46,110,117,139,141,142],counter_mut:46,countercach:46,countermutationstag:46,counterwrit:[103,153],countri:[13,21],country_cod:21,coupl:[0,6],cours:[6,13],cover:[25,28,29,30,33,41,46],cpu:[6,11,40,42,45],cqerl:32,cql3:[11,14,25,29,52],cql:[6,10,11,12,13,14,16,17,19,21,29,32,35,36,41,45,49,53,150],cql_type:[11,12,13,14,19,21],cqlc:32,cqldefinit:14,cqlsh:[30,33,34,36,49,53],cqltester:[25,29],crash:43,crc32:4,crc:4,crc_check_chanc:[11,42],creat:[6,9,10,12,13,15,17,26,27,29,30,40,41,42,49,51,52,60],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,21],create_user_stat:12,createkeystor:6,createt:29,creation:[6,10,11,13,14,18,21],creator:19,credenti:[6,49],critic:[25,28,49],cross:[6,30,50],crossnodedroppedlat:46,cryptographi:6,csv:52,cuddli:21,curl:[24,34],current:[6,9,11,13,14,19,21,26,28,34,41,46,51,52,82,100,104,106,108,117,131,162,170],currentlyblockedtask:46,custom:[6,9,10,11,14,15,16,19,28,50,52],custom_option1:19,custom_option2:19,custom_typ:[14,21],cute:21,cvh:25,cycl:[6,40,80],daemon:[26,117,164],dai:[17,21,41],daili:80,danger:6,dash:12,data:[0,4,6,10,12,14,15,16,18,25,31,34,36,38,42,43,45,46,49,50,52,55,60,73,80,83,87,108,117,122,132,156,171],data_file_directori:[31,43],data_read:19,data_writ:19,databas:[12,13,15,20,41,43,49],datacent:[0,6,50,73,83,96,117,132,149],datacenter1:6,dataset:6,datastax:[6,11,14,32],datatyp:14,date:[9,10,14,15,17,139],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,41],daylight:21,db_user:49,dba:49,dc1:[6,11,49],dc2:[6,11,49],dcassandra:[41,46,49,51],dclocal_read_repair_ch:[0,11,41],dcom:49,dcpar:132,ddl:[11,52],ddl_statement:12,dead:[6,45,55,117],dead_node_ip:51,deb:34,debian:[30,33],debug:[31,52],decid:[9,41,50],decim:[9,14,17,21,52],decimalsep:52,declar:[11,12,14,21],decod:[17,21],decommiss:[6,51,117],decompress:42,decreas:[6,41],decrement:[13,21],decrypt:6,dedic:6,dedupl:[114,117],deem:6,deeper:28,default_time_to_l:[10,11,13],default_weight:6,defend:30,defin:[0,6,9,10,11,12,13,15,16,17,18,19,20,26,41,46,49,50,51,52,60,117],definit:[9,13,14,15,18,21,36,38],deflat:6,deflatecompressor:[11,42],degrad:6,delet:[6,9,10,11,12,15,17,19,21,28,36,52,80,87,117,169],delete_stat:[12,13],delimit:6,deliv:[0,6],deliveri:[6,117,118,137,148],delta:46,demand:49,deni:30,denorm:21,denot:12,dens:38,depend:[4,6,11,12,13,14,21,25,26,28,29,41],deploi:[30,31],deploy:[6,49,50],deprec:[6,10,11,14,15,30,41],depth:6,desc:[9,11,13,52],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,19,21,25,26,28,38,49,53,117],describeclust:117,descript:[6,10,11,14,21,46,52],descriptor:46,design:[14,40,41,43],desir:[16,21,30],destin:[40,52],detail:[5,6,10,11,12,13,14,21,30,45,49,52],detect:[2,6,11,24,30,49],detector:[85,117],determin:[0,6,13,19,38,42,50,132],determinist:30,dev:[6,8,11,30],develop:[5,8,26,28,29,36,43],dfb660d92ad8:52,dfp:171,dht:6,dictat:[6,49],did:[25,46],die:6,dies:36,diff:[15,23],differ:[0,6,11,12,13,14,15,19,21,24,26,28,29,30,31,34,41,42,43,46,51],difficult:[6,29],difficulti:21,digest:4,digit:[17,21,30],diminish:21,direct:[6,11,17,19,28,46],directli:[13,18,19,26,41],director:13,directori:[6,20,26,29,30,33,34,35,40,43,45,52,108,117,135],dirti:6,disabl:[6,11,14,41,42,49,50,52,66,67,68,69,70,71,72,73,74,83,117,140,142,144,149,152,153,154],disable_stcs_in_l0:41,disableauditlog:117,disableautocompact:[41,117],disablebackup:117,disablebinari:117,disablefullquerylog:117,disablegossip:117,disablehandoff:117,disablehintsfordc:117,disableoldprotocolvers:117,disablesnapshot:139,disallow:6,disambigu:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],discard:[6,40],disconnect:41,discourag:[11,21,28],discov:30,discuss:[8,21,28],disk:[4,6,11,31,36,38,40,41,42,45,46,80,114,117,130,171],displai:[11,52,56,62,92,107,109,116,117,166],disrupt:[30,49],distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,29,30,41,46,49,51],divid:12,djava:[26,30,49],dml:20,dml_statement:12,dmx4jaddress:46,dmx4jport:46,dns:30,dobar:23,doc:[6,25,40,49],document:[5,12,14,15,17,25,28,35,49,52],doe:[6,11,13,14,16,17,18,19,21,24,25,28,36,38,40,41,42,49,50,51,114,117],doesn:[6,14,21,23,29,30],dofoo:23,doing:[6,13,29,30,41,51],dollar:[10,12],domain:[49,138,157],don:[5,13,23,24,25,26,28,30,31,41,51,108,132],done:[6,11,13,21,28,29,31,35,41],doubl:[6,9,10,11,12,14,17,21,26,46,50],down:[6,19,41,46,50,51,71,117,132],download:[6,26,34,46],downward:19,drain:117,drive:[6,41,43],driver:[6,12,14,29,33,36,52],drop:[6,10,15,36,41,46,80],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,21],drop_user_stat:12,droppabl:[6,41],dropped_mut:166,droppedmessag:45,droppedmetr:46,droppedmut:46,dropwizard:46,dt_socket:26,dtest:[25,27],due:[11,13,21,30,34,46,51],dump:52,duplic:25,durable_writ:11,durat:[6,10,15,19,41,46,119,167],dure:[6,11,14,20,28,29,30,41,42,46,49,51,52,139],dying:30,dynam:[6,45,49],dynamic_snitch:50,dynamic_snitch_badness_threshold:50,dynamic_snitch_reset_interval_in_m:50,dynamic_snitch_update_interval_in_m:50,dynamo:[2,36],each:[0,4,6,10,11,12,13,14,17,18,19,21,24,28,35,36,41,42,43,46,49,50,51,52,117,142,158,171],each_quorum:0,earli:[6,12,28],earlier:15,easi:[9,28],easier:[0,28],easiest:30,ec2:[6,43,50],ec2multiregionsnitch:[6,50],ec2snitch:[6,50],ecc:43,echo:34,eclips:[23,27,29],ecosystem:25,edg:[25,26],edit:[26,31,34,46,49],effect:[6,11,21,28,30,38,42,49,71,117],effectiv:46,effici:[6,11,41,50,51],effort:6,either:[6,8,12,13,14,16,21,23,24,26,28,30,34,35,40,41,46,49,169],elaps:[41,46],element:[21,52],elig:6,els:[11,13,23,28],email:[8,16,21,36],embed:29,emploi:38,empti:[6,9,10,11,12,52],emptytyp:9,enabl:[6,11,14,17,19,29,30,41,42,50,51,52,76,77,78,80,83,84,117,154],enable_user_defined_funct:14,enableauditlog:117,enableautocompact:[41,117],enablebackup:117,enablebinari:117,enablefullquerylog:117,enablegossip:117,enablehandoff:117,enablehintsfordc:117,enableoldprotocolvers:117,encapsul:[23,46],enclos:[9,10,12,14,19],enclosur:12,encod:[15,21,25,52],encount:[5,13,34,46],encourag:[6,11],encrypt:[6,45],encryption_opt:6,end:[21,28,30,41,49,52,60,95,117,132],end_token:[60,132],end_token_1:122,end_token_2:122,end_token_n:122,endpoint:[46,50,55,95,117,132,169],endpoint_snitch:50,endtoken:52,enforc:[17,49],engin:[2,11,28,36,46],enhanc:43,enough:[0,6,21,30,31,41,50,52],enqueu:6,ensur:[11,13,18,20,30,42,49],entail:30,enter:[30,52],entir:[0,4,6,14,21,30,38,41,49,51,52],entri:[4,6,9,13,16,28,36,46,49,52],entropi:6,entry_titl:13,enumer:19,env:[30,31,46,49],environ:[0,5,6,26,30,33,43],ephemer:43,epoch:21,equal:[0,6,10,11,13,21,23,41],equival:[10,11,12,13,14,19,24,41],eras:11,erlang:33,erlcass:32,err:52,errfil:52,error:[6,11,12,14,16,18,19,21,23,25,26,34,36,52,133],escap:[12,17],especi:[28,30,41,52],essenti:[0,6,14,30,52],establish:[6,19,50],estim:46,estimatedcolumncounthistogram:46,estimatedpartitioncount:46,estimatedpartitionsizehistogram:46,etc:[6,18,21,23,25,30,31,34,41,46,49],eth0:6,eth1:6,ev1:21,even:[0,6,10,12,13,14,17,21,28,36,41,49,52,63,139,170],evenli:6,event:[13,21,41,52,132],event_typ:13,eventu:[4,13],ever:[23,29,30,43],everi:[4,6,11,13,14,18,19,20,21,35,38,41,43,52],everyth:[12,23,26,30],evict:46,evil:[6,14],exact:[11,12,14,42],exactli:[11,14,18,49],exampl:[0,6,11,13,14,17,19,21,29,34,35,41,49,50,52],exaust:6,excalibur:11,exce:[4,6,17,23],exceed:[6,43],excel:11,excelsior:11,except:[0,13,14,17,25,27,28,29,30,46],excess:38,exchang:[6,30],exclud:[46,76,100,117],excluded_categori:76,excluded_keyspac:76,excluded_us:76,exclus:[21,29,132],execut:[6,9,11,12,13,14,19,26,29,35,41,46,49,52],exhaust:6,exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,29,36,38,41,42,50,51],expect:[6,10,12,21,23,25,28,41,49],expens:[6,38,50],experi:[6,41],experienc:6,experiment:132,expir:[6,10,11,13,21,45,49,139],expiri:41,explain:[23,25,28,34],explicit:10,explicitli:[6,10,13,17,21,23,41,50],explor:26,expon:10,exponenti:46,expos:[6,9,49],express:[0,6,10,12,50],expung:30,extend:[21,28,29,108,171],extens:[6,11,49],extern:[46,51],extra:[0,6,11,41],extract:[23,34],extrem:[6,13],fact:[21,29,30],factor:[0,6,11,36,42,49],fail:[6,13,14,21,36,41,52,117,133],failur:[2,6,28,36,41,43,46,50,85,117,171],failuredetector:117,fairli:[6,40,49],fake:14,fall:6,fallback:[6,50],fals:[6,11,12,17,19,21,38,40,41,42,46,49,51,52,139],famili:[6,43,101,119,156,167],fanout_s:41,fast:[6,38,41],faster:[6,28,42,43,117,142],fastest:[6,24,50],fatal:6,fault:30,fav:[16,21],fax:21,fct:14,fct_using_udt:14,fear:30,feasibl:21,featur:[25,26,28,49],fed:6,feedback:28,feel:24,fetch:[6,11,52],few:[41,43],fewer:[6,28],fffffffff:[17,21],field:[10,13,14,17,21,23,38],field_definit:21,field_nam:13,fifteen:46,fifteenminutecachehitr:46,figur:41,file:[4,7,11,26,27,28,29,30,31,33,36,38,41,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],filenam:[11,52,101,117],filesystem:49,fill:[40,41],fillion:13,filter:[4,9,11,18,36,43,45,46,132],finalfunc:[9,14],find:[6,26,29,34,38,41,51,95,99],fine:[6,28,49],finer:6,finish:[26,28,117,134],fip:[6,49],fire:20,firefox:52,firewal:[6,30,31,50],first:[5,6,11,13,14,21,28,30,33,41,43,49,52,132,139],firstnam:13,fit:[6,41,46],five:46,fiveminutecachehitr:46,fix:[6,10,12,24,30,41,43],flag:[6,13,24,25,28,40,46,51],flexibl:49,flight:[6,49],flip:11,floor:6,flow:[6,19,25],fluent:32,flush:[4,6,40,41,43,46,75,117,156],fname:14,focu:28,folder:[26,163],follow:[0,5,6,8,9,10,11,12,13,14,17,18,19,21,23,24,25,26,28,29,30,31,34,36,40,41,42,46,49,50,52,57,60,67,77,86,87,123,132,139,153,158,170,171],font:12,foo:[11,12,40],footprint:[117,119],forc:[4,6,11,13,52,60,63,117,131,132,133],forcefulli:[55,117],foreground:[31,34],forev:41,forget:5,fork:28,form:[6,10,11,12,14,19,62,116,166],formal:12,format:[6,10,17,21,24,25,27,28,46,52,61,80,101,122,166,168],former:[6,46],forward:[6,11],found:[5,12,14,15,28,29,31,35,49,52,163,171],four:13,fqcn:29,fraction:6,frame:6,framework:[25,29],franc:[13,21],free:[6,11,21,24,26,46],freed:4,freenod:8,frequenc:[6,40],frequent:[6,29,36,41,49],fresh:51,friendli:[6,21,29],from:[0,4,6,9,11,12,13,14,15,17,18,19,21,24,27,28,29,33,35,36,38,40,41,42,43,46,49,50,51,54,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,126,127,130,131,132,133,135,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],fromjson:15,froom:21,frozen:[9,10,11,13,14,21],fruit:[21,28],fsync:[6,46],full:[6,9,11,13,16,19,28,34,35,41,42,49,52,70,80,117,123,132,135],full_nam:166,fulli:[6,11,12,14,45,49],function_cal:12,function_nam:[13,14,19],fundament:17,further:[5,6,11,18,21,41,45,49],furthermor:[10,13,49],futur:[6,9,10,11,21,28,82,117,162],g1gc:43,game:[14,21],garbag:[11,43,45,46,87],garbage_collect:163,garbagecollect:117,gather:41,gaug:46,gaurante:0,gc_grace_second:11,gc_type:46,gce:[30,43],gcg:6,gcstat:117,gener:[0,2,4,6,8,11,12,13,14,17,21,25,26,27,28,30,43,49,52,103,139,153],genuin:23,get:[6,8,24,26,28,30,34,36,38,41,92,93,94,97,100,117],getbatchlogreplaythrottl:117,getcompactionthreshold:117,getcompactionthroughput:117,getconcurr:117,getconcurrentcompactor:117,getconcurrentviewbuild:117,getendpoint:117,getint:14,getinterdcstreamthroughput:117,getlocalhost:[6,30],getlogginglevel:117,getlong:14,getmaxhintwindow:117,getpartition:23,getreplica:117,getse:117,getsstabl:117,getstr:14,getstreamthroughput:117,gettempsstablepath:23,getter:[19,23],gettimeout:117,gettraceprob:117,gib:[62,116,166],gist:23,git:[5,24,26,28],github:[23,24,28,29],give:[18,19,21,28,29,36,52],given:[0,6,11,12,13,14,16,21,28,38,41,49,51,52,58,60,65,67,77,90,99,103,117,123,143,150,154,158,165],global:[6,52,117,141],gmt:21,goal:[6,41],gocassa:32,gocql:32,going:[6,28,41],gone:6,good:[6,23,28,29,30,52],googl:[23,52],gori:30,gossip:[2,6,30,46,50,71,81,105,117,161],gossipinfo:117,gossipingpropertyfilesnitch:[6,50],gossipstag:46,got:6,gp2:43,gpg:34,grace:45,grai:21,grain:49,grammar:[11,12],grant:[6,9,49],grant_permission_stat:12,grant_role_stat:12,granular:[6,87],graph:19,gravesit:11,great:[28,41],greater:[0,6,21,30,50,146,147],greatli:6,green:21,group:[6,10,11,19,41,46,49,50],group_by_claus:13,grow:21,guarante:[0,2,11,13,14,21,28,36,38,41,51,52],guid:[6,26],guidelin:[10,25,43],had:[9,10,41],half:[6,24,30],hand:[6,13,43],handl:[6,14,25,27,28,30,40,43,46,49,80],handoff:[6,46,51,72,106,117,148],handoffwindow:117,hang:28,happen:[6,13,23,24,28,36,41,46,50],happi:28,happili:43,hard:[6,14,41,43],harder:6,hardwar:[6,36,45],has:[0,4,6,10,11,12,13,14,18,19,21,23,28,30,41,43,46,49,50,52],hash:[4,6,41],hashcod:23,haskel:33,hasn:80,have:[0,5,6,9,10,11,12,13,14,15,18,19,21,23,24,25,26,28,29,30,31,34,38,41,42,43,46,49,50,80,139],haven:28,hayt:32,hdd:[6,43],head:28,header:[26,52],headroom:6,heap:[4,6,26,31,36,38,42,43,46],heap_buff:6,heavi:6,heavili:43,held:[6,43,117,121],help:[5,6,10,28,29,35,54,56,117,155],helper:29,henc:[5,6,11,21],here:[6,24,29,30,32,41,46,49],hex:[12,17,101],hexadecim:[10,12,101],hibern:51,hidden:51,hide:[23,25],hierarch:19,hierarchi:19,high:[0,6,30,41,43],higher:[0,19,28,38,41,46,51,119,167],highest:41,highli:[28,30,43,49],hint:[0,6,11,12,30,31,36,45,46,51,72,73,82,83,98,106,117,118,137,148,151,162,169],hintedhandoff:[6,45],hintedhandoffmanag:46,hints_creat:46,hints_directori:31,hints_not_stor:46,hintsdispatch:46,histogram:[41,46,117,120,165],histor:28,histori:[23,59,61,117],hit:[6,41,46],hitrat:46,hoc:29,hold:[0,6,10,13,19,30,41,52],home:[21,52],hope:41,hopefulli:28,host:[6,31,36,46,50,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hostnam:[6,30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],hot:[6,46],hotspot:11,hotspotdiagnost:49,hottest:6,hour:[6,21,28,41],hourli:80,how:[0,5,6,7,8,11,12,21,25,26,27,28,29,33,35,36,41,42,46,50,52,80],howev:[6,9,10,11,12,13,15,17,18,21,28,29,30,31,34,38,42,43,49,52],hsha:6,html:6,http:[6,23,24,26,34,46],httpadaptor:46,hub:30,human:[11,62,116,166],hypothet:24,iauthent:6,iauthor:6,icompressor:42,idea:[6,14,27,28,29,30,41,52],ideal:[6,29,41,49],idempot:[13,21],idemptot:21,ident:0,identifi:[6,9,10,11,13,14,15,16,19,20,21],idiomat:8,idl:6,ieee:[17,21],iendpointsnitch:[6,50],ignor:[0,6,10,14,21,23,52,166],iinternodeauthent:6,illeg:14,illustr:19,imag:21,imagin:41,immedi:[6,11,21,28,38,42,57,117],immut:[4,30,42,43],impact:[6,11,25,41,45,49],implement:[6,10,13,14,18,19,23,29,30,40,42,49,50],implementor:6,impli:[11,12,21],implic:[0,49],implicitli:14,import_:52,imposs:41,improv:[0,6,11,21,28,29,38,41,43,50,51,52],inact:30,includ:[4,6,10,11,12,13,18,19,21,23,28,40,41,43,46,49,52,76,133,170],included_categori:76,included_keyspac:76,included_us:76,inclus:[28,132],incom:6,incomingbyt:46,incompat:[6,10],incomplet:25,inconsist:[0,30],incorrect:30,increas:[6,11,30,38,41,42,43,46,50,51,132],increment:[6,10,13,21,28,41,68,78,117,133,139,159],incur:[13,21,46],indent:23,independ:[11,41,43,49],index:[4,6,9,10,11,12,13,15,21,36,41,45,52,117,123],index_build:163,index_identifi:16,index_nam:16,index_summari:163,index_summary_off_heap_memory_us:166,indexclass:16,indexedentrys:46,indexinfocount:46,indexinfoget:46,indexnam:123,indexsummaryoffheapmemoryus:46,indic:[5,6,12,13,23,28,30,132],indirectli:13,individu:[6,10,14,21,28,29,43,49],induc:13,inequ:[10,13],inet:[9,11,14,17,21],inetaddress:[6,30],inexpens:43,infin:[9,10,12],influenc:11,info:[6,31,46,65,117],inform:[4,6,12,13,21,35,49,50,51,52,56,59,85,105,107,108,109,116,117,138,155,157],ingest:6,ingestr:52,inher:[11,21],inherit:19,init:46,initcond:[9,14],initi:[6,14,23,25,40,46,49,52,117,150],initial_token:51,input:[9,10,14,17,21,25,52],inputd:21,inreleas:34,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,19,21,30,33,36,43,52],insert_stat:[12,13],insid:[6,11,12,13,21,23,52],inspect:[6,26,52],instabl:6,instal:[6,20,30,33,36,52],instanc:[6,10,11,12,13,14,16,18,19,20,21,26,29,30,40,41,43,46],instantan:46,instanti:10,instantli:6,instead:[10,11,13,18,21,23,30,41,138,157],instruct:[6,8,11,24,26,36],instrument:49,intasblob:13,integ:[0,10,11,12,13,17,21,46],integr:[27,29,36],intellij:[23,27],intend:[25,49],intens:[6,29,30],intent:25,inter:[6,96,117,149],interact:[29,35,52],interest:[0,41,49],interfac:[6,10,14,23,30,31,42,49],intern:[6,9,11,13,18,21,25,30,43,46],internaldroppedlat:46,internalresponsestag:46,internet:6,internod:[6,30],internode_encrypt:[6,49],internodeconnect:[103,153],internodeus:[103,153],interpret:[10,21,52],interrupt:30,interv:[6,9,46],intra:[6,46,50],intrins:21,introduc:[6,10,17,28,51],introduct:[10,19,29],intvalu:14,invalid:[6,13,19,25,49,108,110,111,112,117],invalidatecountercach:117,invalidatekeycach:117,invalidaterowcach:117,invertedindex:20,investig:6,invoc:14,invok:[24,34,49,171],involv:[6,13,41,42,49],ioerror:23,ip1:6,ip2:6,ip3:6,ip_address:55,ipv4:[6,17,21,30],ipv6:[6,17,21],irc:[5,28,36],irolemanag:6,irrevers:[11,21],isn:[0,18,23,28,30],iso:21,isol:[6,11,13],issu:[0,6,19,24,28,29,30,38,41,42,132],item:[12,21,25,26],iter:[0,6],its:[4,6,11,12,13,14,21,26,30,41,46,49,50,51],itself:[6,11,16,30,34],iv_length:6,jaa:49,jacki:24,jamm:26,januari:21,jar:[14,23,26,46],java7:49,java:[6,14,20,21,23,26,28,33,34,36,40,41,43,46,49,117,155],javaag:26,javadoc:[23,25],javas:6,javascript:[6,14],javax:49,jbod:43,jce8:6,jce:6,jcek:6,jconsol:[36,41,49],jdk:6,jdwp:26,jenkin:[26,29],jetbrain:26,jira:[5,6,25,28,29,40],jkskeyprovid:6,jmc:[41,49],jmx:[6,19,36,45,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],jmx_password:49,jmx_user:49,jmxremot:49,job:[28,57,87,130,132,139,170],job_thread:132,john:[13,21],join:[6,8,13,36,41,49,51,117],joss:13,jpg:21,jsmith:21,json:[9,10,13,15,36,41,42,61,166,168],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,judgement:23,junit:[23,26,29],jurisdict:6,just:[6,14,19,26,28,29,30,41,49],jvm:[6,20,26,30,31,45,49,51],jvm_extra_opt:26,jvm_opt:[31,49],jvmstabilityinspector:25,keep:[6,8,11,23,28,30,41,46,108],keepal:[6,30],kei:[4,6,9,10,13,14,17,21,29,30,34,40,41,42,43,46,49,57,95,99,101,111,117,121,141,142,166],kept:[6,41,46],kernel:[6,30],key_alia:6,key_password:6,key_provid:6,keycach:46,keycachehitr:46,keyserv:34,keyspac:[0,6,9,10,12,14,15,16,19,21,36,38,41,42,45,49,51,52,57,58,60,65,67,76,77,86,87,90,95,99,101,108,117,119,121,122,123,124,130,132,138,139,143,156,157,158,165,166,167,170,171,173],keyspace1:[6,19],keyspace2:6,keyspace_nam:[11,14,19,21,41],keystor:[6,49],keystore_password:6,keystorepassword:49,keyword:[10,11,13,14,15,16,17,21],kib:[62,116,166],kick:[117,134],kill:[6,34],kilobyt:42,kind:[11,12,21,28,40,41],kitten:21,knife:[117,155],know:[6,13,21,23,41],known:[19,21,32,35,38,41],ks_owner:49,ks_user:49,ktlist:156,kundera:32,label:[21,28],lag:46,land:42,landlin:21,lang:[36,46,49],languag:[6,9,10,12,14,20,21,32,35,36,52],larg:[6,11,13,14,21,29,36,41,43,46,52],larger:[6,29,30,41,42,43],largest:[6,46],last:[6,12,13,14,15,28,41,46,55,117],lastli:[13,21],lastnam:13,latenc:[0,6,30,46,50],later:[0,11,21,23,28,30],latest:[0,28,34,41,52,171],latter:12,layer:43,layout:11,lazi:11,lazili:11,lead:[6,10,21,41],learn:[6,29,30,52],least:[0,6,11,12,13,18,30,41,43],leav:[6,12,13,23,29,30,52],left:[6,17,41],legaci:[6,19],legal:10,length:[4,6,10,17,21,25,41],less:[6,21,28,30,38,43],let:[6,41],letter:17,level:[6,10,11,13,19,23,25,31,43,45,46,49,52,97,108,117,150],leveledcompactionstrategi:[11,38,41],lexic:30,lib:[6,20,25,26,34],libqtcassandra:32,librari:[8,25,29,32,46,52],licenc:25,licens:[25,26,28],life:28,lifespan:43,like:[0,6,12,13,14,17,21,23,24,25,28,29,30,36,41,42,43,49],likewis:19,limit:[6,9,10,11,18,19,21,30,40,41,42,49],line:[12,23,28,29,31,34,35,49,53,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],linear:43,linearli:38,link:[6,8,11,12,28,29,34],linux:[6,30],list:[4,5,6,9,10,11,12,13,14,17,26,28,29,31,34,35,36,41,49,51,52,55,57,58,59,60,65,67,73,76,77,83,86,87,90,92,95,99,100,101,103,107,108,114,115,117,119,122,123,124,127,130,131,132,133,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],list_liter:[13,21],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,36,46],listen_address:[31,35,36],listen_interfac:31,listsnapshot:117,liter:[10,12,14,17,52],littl:23,live:[13,36,41,46,51],livediskspaceus:46,livescannedhistogram:46,livesstablecount:46,load:[0,6,11,20,21,36,45,46,49,50,51,109,117,124,132,157],local:[0,6,11,26,28,29,35,43,46,49,50,52,117,126,132,136,169],local_jmx:49,local_on:[0,49,52],local_quorum:[0,52],local_read_count:166,local_read_latency_m:166,local_seri:52,local_write_latency_m:166,localhost:[6,35,49],locat:[6,33,34,42,46,49,50,52,163],lock:[6,30,46],log:[6,11,13,25,29,33,34,36,40,45,46,49,66,70,76,80,97,117,132,135,150,163],log_al:41,logback:31,logger:[23,31,76],logic:[6,20],login:[6,9,19,29,49],lol:21,longer:[6,9,10,30,41,51,57,117],look:[6,12,24,28,29,41,43],lookup:46,loop:23,lose:[6,41,51],loss:[6,21],lost:[41,51],lot:[6,35,36],low:[6,28,117,119],lower:[0,6,11,12,13,19,30,38,41,46,51],lowercas:12,lowest:[28,41],lz4:6,lz4compressor:[6,11,42],macaddr:9,machin:[6,11,29,30,46,49,50,51],made:[6,21,36,38,43,49],magnet:6,magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,19,21,25,26,28,29,30,34,38,41,49,50,51,52,139],mail:[5,28,36],main:[0,14,18,26,30,33,34,49,51,52],main_actor:13,mainli:[6,11],maintain:[6,28],mainten:46,major:[0,10,28,49,60,117],make:[0,6,8,9,20,21,23,26,28,29,30,31,34,41,49,51,52,114,117],man:6,manag:[6,19,26,29,46,49,51,56,117],mandatori:[11,14],mani:[0,6,11,23,25,28,41,42,43,46,49,52,57,60,67,77,80,86,87,132,139,158,170,171],manipul:[12,15,29,36],manual:[6,24,30],map:[6,9,10,11,13,14,17,19,36,46],map_liter:[11,16,19,21],mar:21,mark:[6,19,41,51,71,117],marker:[6,11,12,25,30],match:[6,12,13,14,17,19,46,50],materi:[6,10,11,12,15,36,46,52,117,173],materialized_view_stat:12,matter:[11,30],max:[6,36,41,46,49,52,80,90,98,117,132,143,151],max_hint_window_in_m:51,max_log_s:80,max_map_count:30,max_mutation_size_in_kb:[6,30],max_queue_weight:80,max_thread:6,max_threshold:41,maxattempt:52,maxbatchs:52,maxfiledescriptorcount:46,maxhintwindow:151,maxim:43,maximum:[4,6,14,38,46,52,80,92,117,139,145],maximum_live_cells_per_slice_last_five_minut:166,maximum_tombstones_per_slice_last_five_minut:166,maxinserterror:52,maxoutputs:52,maxparseerror:52,maxpartitions:46,maxpools:46,maxrequest:52,maxrow:52,maxthreshold:143,maxtimeuuid:10,mayb:13,mbean:[6,19,41,46,49],mbeanserv:19,mbp:6,mct:6,mean:[6,9,11,12,13,14,17,18,21,36,41,46,50,52,132],meaning:13,meanpartitions:46,meant:[21,30,46],measur:[6,25,29,46,51,52],mechan:40,median:46,meet:[6,25],megabyt:6,member:23,membership:6,memlock:30,memori:[4,6,11,36,38,41,45],memory_pool:46,memtabl:[2,6,38,40,41,42,43,46,156],memtable_allocation_typ:4,memtable_cell_count:166,memtable_cleanup_threshold:4,memtable_data_s:166,memtable_off_heap_memory_us:166,memtable_switch_count:166,memtablecolumnscount:46,memtableflushwrit:46,memtablelivedatas:46,memtableoffheaps:46,memtableonheaps:46,memtablepool:6,memtablepostflush:46,memtablereclaimmemori:46,memtableswitchcount:46,mention:[6,21,28,46,49],menu:26,mere:23,merg:[24,28,38,42,43,45],mergetool:24,merkl:[6,46],mess:[28,29],messag:[6,21,25,28,34,36,46],met:13,meta:[13,46],metadata:[4,19,42,43,46],metal:6,meter:46,method:[10,13,14,19,23,25,26,29,36,49],metric:[6,45],metricnam:46,metricsreporterconfigfil:46,mib:[6,62,116,166],microsecond:[6,11,13,21,46],midnight:21,might:[6,13,41,46,55,57,58,60,65,67,73,77,80,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],migrat:[6,46,50],migrationstag:46,millisecond:[6,10,21,46,119,139,167],min:[6,30,40,41,46,52,90,117,143],min_sstable_s:41,min_threshold:41,minbatchs:52,mind:6,minim:[6,41,43],minimum:[6,11,14,31,46],minor:[10,12,45],minpartitions:46,minthreshold:143,mintimeuuid:10,minut:[6,21,41,46,80],misbehav:41,misc:[103,153],miscelen:46,miscellan:6,miscstag:46,miss:[11,41,46,51],misslat:46,mistaken:[55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],mitig:[6,49],mix:[6,41],mmap:30,mnt:16,mock:29,mode:[6,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],model:[11,15,19,28,36,49],moder:43,modern:43,modif:[13,19],modifi:[6,9,10,11,14,19,21,28,38,41,42],modification_stat:13,modul:52,modular:25,moment:[6,28],monitor:[30,36,45,49,50,56,117],monkeyspeci:[11,18],monkeyspecies_by_popul:18,month:21,more:[0,4,6,10,11,12,13,21,23,28,29,31,35,36,38,43,45,46,49,50,51,60,86,87,117,119,132,139,155,167,171],moreov:13,most:[6,11,12,13,21,26,28,29,30,31,41,42,43,49,52,59,117,167],mostli:[6,11,21],motiv:[29,41],mount:6,move:[6,28,30,36,40,45,46,117],movement:45,movi:[13,21],movingaverag:6,mtime:11,much:[0,5,6,11,38,41,50],multi:[0,6,12,25],multilin:27,multipl:[4,6,10,11,12,13,14,21,23,25,26,28,30,31,41,43,50,122],multipli:41,murmur3partit:4,murmur3partition:[6,14,52],must:[0,6,10,11,13,14,17,18,19,23,28,29,30,31,41,46,49,51,52,156],mutant:16,mutat:[0,6,13,30,40,46,171],mutationstag:46,mv1:18,mx4j:46,mx4j_address:46,mx4j_port:46,mx4jtool:46,mxbean:19,myaggreg:14,mycolumn:17,mydir:52,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,20],mytrigg:20,nairo:21,name:[6,9,10,11,12,13,14,16,17,18,19,20,21,25,26,28,29,30,31,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],names_valu:13,nan:[9,10,12],nanosecond:21,nathan:13,nativ:[6,10,12,15,17,25,30,35,46,52,69,79,117,123,160],native_transport_min_thread:6,native_transport_port:31,native_transport_port_ssl:49,native_typ:21,natur:[11,21,23,41,42],nearli:26,neccessari:6,necessari:[6,11,14,19,28,34,42,49],necessarili:[6,12,31],need:[0,6,10,11,12,13,19,21,23,25,26,28,29,30,31,34,35,38,41,42,43,49,50,52,95,99],neg:6,neglig:13,neighbour:41,neither:[18,21,49],neon:26,nerdmovi:[13,16],nest:[12,13,23],net:[6,26,30,33,34,49],netstat:[51,117],network:[6,13,30,43,49,50,116,117,120],networktopologystrategi:[11,49],never:[6,10,11,12,13,14,21,23,30,41],nevertheless:13,new_rol:19,new_superus:49,newargtuplevalu:14,newargudtvalu:14,newer:[41,43,52,87],newest:[11,41],newli:[11,21,28,40,117,124],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[6,30,35,41,52],ngem3b:13,ngem3c:13,nifti:24,nio:[6,14,46],no_pubkei:34,node:[0,4,6,11,13,14,20,21,25,29,31,32,35,36,38,40,41,43,45,46,50,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nodej:33,nodetool:[34,36,38,42,45,49,51,53,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],nologin:9,non:[6,9,10,11,12,13,14,19,21,30,38,42,46,49,52],none:[6,11,13,21,49],nonsens:19,nor:[11,18,21],norecurs:[9,19],norm:46,normal:[14,17,26,30,34,46,51,52],noschedul:6,nosuperus:[9,19],notabl:[14,17],notat:[10,12,13,52],note:[0,5,6,10,11,12,13,14,15,17,19,21,24,28,30,41,49],noth:[6,11,14,24,29,30],notic:6,notif:8,notion:[11,12],now:[10,23,26,41,51],ntp:6,nullval:52,num_cor:52,num_token:51,number:[0,6,10,11,12,13,14,17,18,21,26,28,29,30,34,38,41,42,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:166,numer:[15,38],numprocess:52,object:[6,11,12,25],objectnam:19,observ:23,obsolet:[6,43,46],obtain:[12,49],obviou:[14,24],obvious:11,occup:13,occupi:[6,46],occur:[10,12,13,20,21,30,41,43,46],occurr:21,octet:[6,50],odd:28,off:[4,6,30,42,46,49,52,117,134],off_heap_memory_used_tot:166,offer:[15,29,42],offheap:[38,43],offheap_buff:6,offheap_object:6,offici:[36,52],offset:[4,46],often:[6,11,12,23,28,29,30,41,42,43,49,50,52,80],ohc:6,ohcprovid:6,okai:23,old:[4,6,41,51,74,84,117],older:[6,14,26,34,41,43,52],oldest:[6,11],omit:[6,10,11,13,17,21,150],onc:[4,6,11,12,14,21,24,26,28,29,30,40,41,42,43,46,49,51,52],one:[0,4,6,9,10,11,12,13,14,17,18,19,21,23,26,28,29,31,36,38,41,43,46,49,50,51,52,57,60,67,77,86,87,103,117,132,139,153,156,158,170,171],oneminutecachehitr:46,ones:[6,11,12,13,14,18,19,46],ongo:[41,51],onli:[0,6,9,11,12,13,14,17,18,19,21,23,28,29,31,36,38,41,42,43,46,49,50,52,132,156,166],onlin:52,only_purge_repaired_tombston:41,onto:[4,41],open:[5,6,26,49,50],openfiledescriptorcount:46,openjdk:34,oper:[0,6,10,11,13,16,18,19,21,23,36,38,40,43,46,49,51,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],operatingsystem:46,opertaion:6,opportun:38,ops:30,opt:14,optim:[6,11,12,13,30,41,43,51],optimis:132,option1_valu:19,option:[4,6,9,10,12,13,14,16,19,21,26,29,30,34,42,43,45,49,51,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],oracl:[6,34,49],order:[0,4,6,9,10,14,18,21,23,28,30,38,40,41,50,51,52],ordering_claus:13,orderpreservingpartition:6,org:[6,14,20,23,26,29,30,34,41,42,46,49],organ:[4,26,32],origin:[9,24,28,139],orign:13,other:[0,4,6,10,12,13,14,18,19,21,24,26,28,31,36,38,41,43,46,49,50,51,117,122,133],other_rol:19,otherwis:[0,9,12,13,16,21,92],our:[5,6,8,24,26,28,41],ourselv:24,out:[6,12,23,26,28,41,46,49,50,51,132],outbound:6,outboundtcpconnect:6,outgo:6,outgoingbyt:46,outlin:49,outofmemoryerror:36,output:[14,19,25,26,38,41,52,60,61,166,168],outsid:[11,20,21],over:[0,6,11,21,30,41,46,49,50,51],overal:14,overflow:[17,139],overhead:[6,30,42,46,51],overidden:49,overlap:[0,41],overload:[6,14,30],overrid:[6,23,49,51,139],overridden:[6,11],overview:[2,36,45],overwhelm:6,overwrit:[42,43],overwritten:[46,87],own:[0,6,11,12,14,21,28,30,34,41,42,46,49,95,101,108,117,171],owner:21,ownership:[41,138],p0000:21,pacif:21,packag:[26,30,31,33,35,52],packet:6,page:[6,21,26,28,29,30,43,46],paged_slic:46,pages:52,pagetimeout:52,pai:23,pair:[6,11,19,21,41,49],parallel:[29,41,132],paramet:[6,14,23,25,26,31,38,43,50,51,117,150],paranoid:6,parenthesi:[11,52],parnew:43,pars:[6,12,40,52],parser:[9,10,40],part:[0,5,6,11,13,14,18,21,25,26,28,29,30,50,51,52],parti:[25,46],partial:4,particip:[0,20],particular:[11,12,13,14,17,19,21,30,43,46,49],particularli:[12,21,49],partit:[4,6,10,13,14,30,38,41,43,46,87,95,99,117,139,167],partition:[4,10,13,14,52,64,117,132],partition_kei:[11,13],partli:13,pass:[25,28,31,52,155],password:[6,9,13,19,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],password_a:19,password_b:19,passwordauthent:[6,49],passwordfilepath:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],past:[6,46],patch:[10,13,23,24,25,27,29,36],path:[5,6,16,25,34,38,41,42,43,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],patter:19,pattern:[6,19,21],paus:[6,30,117,118],pausehandoff:117,paxo:[13,46,52],peer:[6,46],peerip:46,penalti:[6,13],pend:[41,46,117,131],pending_flush:166,pendingcompact:46,pendingflush:46,pendingrangecalcul:46,pendingtask:46,pendingtasksbytablenam:46,pennsylvania:21,peopl:[28,30],per:[0,4,6,10,11,13,23,24,28,30,38,40,41,42,46,49,52,117,140,148],percent:46,percent_repair:166,percentag:[6,46,50],percentil:46,percentrepair:46,perdiskmemtableflushwriter_0:46,perfect:14,perform:[6,11,13,19,21,24,25,27,30,31,38,41,43,46,49,50,52,132],period:[6,43,46,49,117,119],perman:[11,30,41,43],permiss:[6,9,12,29,49],permit:[6,19,40,49],persist:[4,30,38,43,49],perspect:30,pet:21,pgrep:34,phantom:32,phase:[51,52],phi:6,phone:[13,21],php:33,physic:[0,6,11,30,43,50],pick:[24,28,30,41,49,51,122],pid:[30,34],piec:[12,41,46],pile:6,pin:[6,50],ping:28,pkcs5pad:6,pkill:34,place:[5,6,16,20,23,24,28,40,41,46,49,52,117,124],placehold:[14,52],plai:[14,21],plain:4,plan:[11,24,28],platform:19,platter:[6,43],player:[14,21],playorm:32,pleas:[5,6,11,13,14,15,21,23,26,29,30],plu:[14,41,46],plug:6,pluggabl:[19,49],plugin:46,poe:21,point:[6,10,17,21,23,26,36,49,52,95,117],pointer:14,polici:[6,28,49,171],pool:[6,34,46,117,145,168],popul:[11,18],popular:[26,43],port:[6,26,31,36,46,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],portion:[43,52],posit:[4,6,10,11,21,38,46,51],possbili:6,possess:19,possibl:[6,10,11,13,14,17,19,21,25,28,29,30,38,41,43,46,49,51],post:[13,117,142],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,25,41,43,49,51,139],power:6,pr3z1den7:21,practic:[11,12,13,49],pre:[6,17,21,43,49],preced:30,precis:[10,17,21,41],precondit:46,predefin:11,predict:13,prefer:[0,6,11,12,21,23,28,49,50],preferipv4stack:26,prefix:[11,12,21],prepar:[6,14,15,46],preparedstatementscount:46,preparedstatementsevict:46,preparedstatementsexecut:46,preparedstatementsratio:46,prepend:21,prerequisit:33,present:[12,13,18,46],preserv:[6,17,19],press:34,pressur:[6,46],pretti:52,prevent:[6,29,40],preview:132,previou:[6,10,11,21,41,51],previous:6,previsouli:[83,117],primari:[9,10,13,14,21,29,40,41,42,49,51],primarili:[6,11],primary_kei:[11,18],print:[52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],prior:[6,13,19,21],prioriti:28,privat:[6,23,49,50],privileg:[19,34,49],probabilist:[38,42],probabl:[6,11,29,38,41,104,117,154],problem:[5,6,14,24,25,30,49],problemat:21,proc:[6,30],proce:[25,42,51],procedur:[13,49],process:[0,6,14,24,25,26,28,29,30,34,40,42,43,46,49,51,52,56,92,117,118,137,145],prod_clust:52,produc:[13,14,41,80],product:[6,28,30,43,50],profil:[13,117,119],profileload:117,program:[14,29],progress:[23,24,28,38,45,117,173],project:[23,29,46],promin:11,prompt:52,propag:[6,11,14,23,25,50],proper:[11,21,30,49],properli:[6,25],properti:[6,11,19,33,40,41,49,50,51],propertyfilesnitch:[6,50],proport:[6,13],proportion:[6,89,117,140],propos:[6,46],protect:[6,43],protocol:[6,25,30,35,46,49,52,59,69,74,79,84,117,160],provid:[0,5,6,11,12,13,14,15,17,21,26,28,35,40,41,42,43,46,49,50,51,53,116,117,127,131],proxim:[6,50],proxyhistogram:117,prv:132,ps1:49,ps22dhd:13,pt89h8m53:21,pull:[29,41,46,132],purg:43,purpos:[11,12,13,21,43,49],push:[24,28,46],put:[15,28,31,41,51,108,132],pwf:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],python:[14,28,29,33,34,52],quak:[14,21],qualifi:[6,11,14,28],qualiti:49,quantiti:21,queri:[6,10,11,12,13,14,16,18,19,33,36,41,46,52,70,80,117,135],question:[8,19,36],queu:[6,46],queue:[6,46,80],quick:[108,171],quickli:[30,41],quill:32,quintana:21,quit:[41,52],quorum:[0,49,52],quot:[9,10,11,12,14,17,19,52],quotat:19,quoted_identifi:12,quoted_nam:11,race:[21,24],rack1:6,rack:[0,6,49,50],rackdc:[6,50],rackinferringsnitch:[6,50],raid0:43,raid1:43,raid5:43,rain:12,rais:[12,30],raison:9,ram:[38,42,43],random:[11,14,30,51],randomli:[0,6,51],randompartition:[6,13,14],rang:[2,6,10,11,13,21,25,41,45,46,52,60,65,103,117,122,132,153],range_slic:46,rangekeysampl:117,rangelat:46,rangemov:51,rangeslic:46,rapid:43,rare:[10,38],raspberri:43,rate:[6,11,46,49,52],ratebasedbackpressur:6,ratefil:52,rather:[13,30,41,43],ratio:[6,42,43,46],raw:[6,14],reach:[6,28,30,40,41],read:[0,6,11,13,21,23,25,29,30,33,36,38,41,42,43,45,46,49,50,52,103,153,166,171],read_lat:166,read_repair:46,read_repair_ch:[0,6,11,41,50],read_request_timeout:30,readabl:[11,62,116,166],readi:[28,49],readlat:46,readrepair:46,readrepairstag:46,readstag:46,readwrit:49,real:[8,11,23,30],realiz:41,realli:[6,29,31],reason:[0,6,13,14,15,30,31,34,41,43,49,51],rebuild:[38,41,42,46,117,123,139],rebuild_index:117,receiv:[6,14,28,30,41,43],recent:[6,28,29,43,59],reclaim:41,recogn:[13,26,28],recommend:[6,11,21,30,43,49,51],recompact:41,recompress:42,reconnect:49,record:[11,13,21,28,41],recov:[6,30,41],recoveri:6,recreat:52,recurs:80,recv:34,recycl:[6,46],redistribut:6,redo:28,reduc:[6,30,41,42,63,89,117,132,140],reduct:6,redund:[0,6,23,25,28,43],reenabl:[79,81,82,117],refactor:40,refer:[6,11,12,13,14,15,21,23,29,30,34,35,52],referenc:6,reflect:41,refresh:[6,49,52,117,125],refreshsizeestim:117,refus:36,regard:[11,13],regardless:[0,6,19,28],regener:38,regexp:12,region:[6,50],regist:21,registri:49,regress:[25,29],regular:[9,12,26,29,30,46,52],regularstatementsexecut:46,reinsert:139,reject:[6,13,30,40,49],rel:[6,21,52],relat:[8,10,12,13,26,28,41,46],releas:[6,10,34,52],relev:[13,19,21,28,42,49],reli:[6,14,21,30,51],reliabl:41,reload:[6,117,126,127,128,129],reloadlocalschema:117,reloadse:117,reloadssl:117,reloadtrigg:117,reloc:[117,130,163],relocatesst:117,remain:[6,13,14,21,24,41,46,51,166],remaind:[17,42],remedi:41,remot:[0,24,26,36,41,49,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],remov:[4,6,10,11,12,13,14,15,17,21,25,30,36,40,45,49,55,58,87,117,131],removenod:[51,55,117],renam:[9,21],reorder:6,repair:[0,4,6,11,30,36,42,45,46,50,51,108,117,133,150,171],repair_admin:117,repeat:[12,34,42,49],replac:[6,9,14,19,21,25,30,36,41,45,80],replace_address_first_boot:51,replai:[0,21,43,46,89,117,134,140],replaybatchlog:117,replic:[2,6,11,36,41,43,49,51,55,117],replica:[0,6,11,13,30,41,46,50,51,63,99,117],replication_factor:[0,11,49],repo:[24,26],report:[28,36,45],report_writ:19,reportfrequ:52,repositori:[5,8,26,28,29,34],repres:[6,10,17,19,21,30,41,46,49,50,52],represent:[10,17],request:[0,6,13,19,20,29,30,38,41,43,45,49,50,52,117,154,170],request_respons:46,requestresponsestag:46,requestschedul:6,requesttyp:46,requir:[0,6,11,13,14,19,23,24,25,26,28,30,38,42,43,49],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15],reset:[6,13,117,136,150],reset_bootstrap_progress:51,resetfullquerylog:117,resetlocalschema:117,resid:[6,13,30,46],resolut:[6,13,30],resolv:[24,30,138,157],resort:[55,117],resourc:[19,49],resp:14,respect:[6,10,14,34,50,80],respond:[0,6,12],respons:[0,6,19,30,46,51],ressourc:21,rest:[6,11,12,21,25,51],restart:[30,41,49,51,117,124,142],restor:[41,51,52],restrict:[10,11,13,18,19],result:[0,6,8,10,11,12,14,17,19,21,28,30,41,46,52],resum:[56,117,137],resumehandoff:117,resurrect:41,resync:[117,136],retain:[30,41],rethrow:23,retri:[0,6,21,46,80],retriev:[11,13,19],reus:25,revers:13,review:[11,23,27,28,29,36],revok:[9,49],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[38,41,42,117,139,170],rewritten:[43,139],rfc:[14,21],rhel:36,rich:21,rider:21,riderresult:21,right:[6,26,30,52],ring:[2,6,36,49,51,52,113,115,117,150],risk:11,rmem_max:6,rmi:[30,49],robin:6,rogu:14,role:[6,9,10,12,15,45],role_a:19,role_admin:19,role_b:19,role_c:19,role_manag:49,role_nam:19,role_opt:19,role_or_permission_stat:12,role_permiss:6,roll:[30,49,80],roll_cycl:80,romain:21,root:[6,24,28,34],rotat:6,roughli:6,round:[6,13,41,46],roundrobin:6,roundrobinschedul:6,rout:[6,50],row:[0,4,6,10,11,13,14,15,17,18,29,35,38,42,43,46,52,87,108,112,117,139,141,142],rowcach:46,rowcachehit:46,rowcachehitoutofrang:46,rowcachemiss:46,rowindexentri:46,rows_per_partit:11,rpc:[6,46],rpc_min:6,rpc_timeout_in_m:[103,153],rsc:171,rubi:[14,33],rule:[6,12,14,28,30],run:[5,6,12,21,24,26,28,30,31,34,41,43,46,49,51,108,117,132,155],runtim:[6,33,97,117],runtimeexcept:23,rust:33,safe:[6,14,21,41,49],safeguard:43,safeti:[41,51],sai:36,said:[11,28,30,117,170],same:[0,5,6,11,12,13,14,15,17,18,19,21,24,26,28,31,36,38,41,46,49,50,132],sampl:[4,6,12,14,46,52,80,117,119,121,167],sampler:[46,119,167],san:43,sandbox:[6,14],sasi:6,satisfi:[0,23,43,46,51],satur:[6,46],save:[6,13,21,30,31,38,42,43,51,117,142],saved_cach:6,saved_caches_directori:31,sbin:30,scala:[14,33],scalar:15,scale:[6,29,42],scan:[6,13,38,46],scenario:24,scene:30,schedul:6,schema:[0,9,11,14,17,46,52,64,117,126,136],schema_own:19,scope:[19,46,49],score:[6,14,21,50],script:[6,14,26,29,80],scrub:[38,41,42,46,117,163],search:28,second:[6,11,12,13,21,30,40,43,49,52,117,140,148],secondari:[10,12,13,15,36,41,46,117,123],secondary_index_stat:12,secondaryindexmanag:46,section:[2,5,7,10,11,12,13,15,19,21,30,33,34,35,41,46,49,51,53],secur:[6,14,15,36,45],see:[0,4,6,10,11,12,13,14,17,19,21,26,28,35,36,40,41,46,49,51,52,87,117,132],seed:[6,31,36,50,100,117,127],seedprovid:6,seek:[6,43,46],seen:[6,11],segment:[4,6,40,46,52,80],select:[6,9,10,11,12,14,15,19,26,29,30,35,38,41,49,52,122],select_claus:13,select_stat:[12,18],self:25,selinux:30,semant:[10,13,14],semi:30,send:[6,8,30],sens:[6,10,13,15,30],sensic:14,sensit:[11,12,14,17],sensor:21,sent:[0,6,21,30,46],separ:[4,6,11,13,23,28,31,41,43,49,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],seq:[6,132],sequenc:12,sequenti:[6,43,132],seren:13,seri:[11,41,52],serial:6,serializingcacheprovid:6,serv:[13,43,49],server:[6,12,13,21,26,29,30,43,46,49],server_encryption_opt:49,servic:[6,26,34,49,51],session:[6,19,49,117,133],set:[0,6,9,10,11,12,13,14,17,18,25,27,28,29,31,36,38,40,41,42,43,46,49,50,51,52,57,76,87,117,130,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,170],set_liter:21,setbatchlogreplaythrottl:117,setcachecapac:117,setcachekeystosav:117,setcompactionthreshold:[41,117],setcompactionthroughput:[41,117],setconcurr:117,setconcurrentcompactor:117,setconcurrentviewbuild:117,sethintedhandoffthrottlekb:117,setint:14,setinterdcstreamthroughput:117,setlogginglevel:117,setlong:14,setmaxhintwindow:117,setstr:14,setstreamthroughput:117,setter:[19,23],settimeout:117,settraceprob:117,setup:[28,29,49],sever:[4,13,19,41,49],sfunc:[9,14],sha:24,shadow:41,share:[11,13,26],sharedpool:52,sharp:32,shed:30,shell:[35,36,53],shift:21,ship:[29,35,49,52],shortcut:18,shorter:49,shorthand:52,should:[0,5,6,10,11,12,13,14,17,19,21,25,26,28,29,30,31,32,33,35,38,41,42,43,46,49,50,51,52,122,132,153],shouldn:11,show:[19,36,51,65,85,105,117,121,131,138,157,158,166,173],shown:[12,52,166],shrink:6,shut:6,shutdown:[6,43],side:[11,13,17,21,49],sign:[13,21,30],signal:[117,128],signatur:[34,40],signific:[6,26,28,29,43],significantli:6,silent:14,similar:[6,13,14,42,43],similarli:[0,10,17,23,43,117,122],simpl:[6,11,26,29,49],simple_classnam:29,simple_select:13,simplequerytest:29,simplereplicationstrategi:49,simpleseedprovid:6,simplesnitch:[6,50],simplestrategi:11,simpli:[0,6,11,13,14,17,21,26,29,41,43,46,51,171],simul:29,simultan:[6,43,52,57,87,130,139,170],sinc:[6,11,13,14,21,26,30,34,41,46,51],singl:[0,6,10,11,12,13,14,17,18,19,21,23,28,31,35,36,45,46,49,50,52,60],singleton:25,situat:[6,29,41],size:[4,6,11,21,23,30,31,38,40,42,43,45,46,49,52,80,114,117],size_estim:[117,125],sizetieredcompactionstrategi:[11,41],sjk:117,skip:[6,13,46,51,52,139,156],skipcol:52,skiprow:52,sks:34,sla:25,slash:12,slf4j:23,slightli:6,slow:[6,50],slower:[6,11,38],slowest:6,slowli:[6,21],small:[6,11,13,21,30,41,43],smaller:[6,30,41,43,52],smallest:[0,11,14,46],smallint:[9,10,14,17,21],smith:21,smoother:10,smoothli:6,snappi:6,snappycompressor:[11,42],snapshot:[6,26,46,58,114,117,139],snapshot_nam:58,snapshotnam:[58,117],snitch:[6,36,45,64,117],socket:[6,49,153],sole:11,solid:[6,43],some:[0,6,9,11,12,13,14,21,26,28,29,30,31,40,41,42,46,49,51,52],some_funct:14,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[24,41],sometim:[6,12,13],someudt:14,somewher:34,soon:49,sooner:6,sort:[4,11,13,21,41,43,166],sort_kei:166,sourc:[5,6,8,14,27,34,46,122],source_elaps:52,space:[6,23,30,40,41,43,46],space_used_by_snapshots_tot:166,space_used_l:166,space_used_tot:166,span:[6,13,41],sparingli:13,spark:32,spec:[25,35,46,52],speci:[11,18],special:[12,13,29,30,41,46],specif:[6,9,11,12,13,19,21,26,28,30,32,40,41,46,49,52,117,122,132],specifc:46,specifi:[0,6,10,11,12,13,14,16,18,19,21,26,30,35,40,41,42,46,49,51,52,58,60,101,117,122,132,138,151,153,156,163,166,169],specific_dc:132,specific_host:132,specific_keyspac:122,specific_sourc:122,specific_token:122,specul:[0,46],speculativeretri:46,speed:[6,36],spent:46,spike:30,spin:[6,43],spindl:6,spirit:[6,50],split:[23,30,41,46,52,60],spread:[6,50],sql:[13,15],squar:12,squash:28,src:122,ssd:[6,16,43],ssl:[6,30,45,52,117,128],ssl_storage_port:50,sss:17,sstabl:[2,6,11,30,38,42,43,45,57,60,87,101,108,114,117,124,130,139,170,171],sstable_compression_ratio:166,sstable_count:166,sstable_s:41,sstable_size_in_mb:41,sstableexpiredblock:41,sstablesperreadhistogram:46,sstablewrit:23,stabil:28,stabl:[34,52],stack:6,stage:[28,92,117,145],stai:[36,41],stale:49,stall:[6,51],stand:[6,29],standalon:29,standard:[6,21,30,34,46],start:[0,6,9,13,27,30,31,34,36,41,43,46,49,51,60,132,163],start_token:[60,132],start_token_1:122,start_token_2:122,start_token_n:122,starter:28,startup:[6,20,26,30,41,46,51],starvat:6,state:[6,14,38,41,43,46,51,117,157],statement:[6,9,10,11,13,14,15,16,17,19,20,21,25,27,28,38,41,46,49,52],static0:11,static1:11,statist:[4,41,46,52,62,88,117,120,165,166,168],statu:[19,25,28,30,34,52,117,131,158,159,160,161,162,171],statusautocompact:117,statusbackup:117,statusbinari:117,statusgossip:117,statushandoff:117,stc:11,stdin:52,stdout:52,step:[6,26,31,49],still:[0,6,10,13,14,17,21,23,49,51,52],stop:[6,34,52,75,117,135,164],stop_commit:6,stop_paranoid:6,stopdaemon:117,storag:[2,11,15,16,28,30,36,42,43,45],storage_port:[31,50],storageservic:[6,23],store:[0,4,6,10,11,12,13,21,36,38,41,42,43,46,49,52,72,80,82,117,162],store_typ:6,straight:51,straightforward:40,strategi:[0,6,11,45,50],stream:[4,6,36,41,42,45,56,96,102,117,122,132,149,150,152,153],street:21,strength:6,strict:[10,41],strictli:[8,11,14],string:[6,10,11,12,13,14,16,17,19,20,21,46,52,101],strong:0,strongli:[6,11,12,49],structur:[4,6,9,19,25,38,46],stub:49,style:[6,25,26,27,28,29,36],stype:[9,14],sub:[11,13,21,34,41],subclass:6,subdirectori:[6,20],subject:[6,14,49],submiss:[6,28],submit:[28,29,36,60],subscrib:8,subscript:8,subsequ:[6,13,30,41,42],subset:[19,41,52],substitut:34,subsystem:49,subvert:41,succed:46,succesfulli:46,success:[0,52],sudden:6,sudo:[30,34],suffici:[6,43],suggest:[12,28,43],suit:[6,28,29,49],suitabl:[13,14,25,28],sum:40,summari:[4,6,46],sun:[23,49],sunx509:6,supercolumn:9,supersed:[10,139],superus:[9,19,49],suppli:[13,24],support:[0,6,9,10,11,12,13,14,15,16,18,19,21,28,29,30,32,36,41,49,52,139,163],suppos:13,sure:[6,8,23,26,28,29,30,31,34,41],surplu:30,surpris:0,surprisingli:6,surround:[17,52],suscept:14,suspect:[5,28],suspend:26,swamp:30,swap:6,swiss:[117,155],symmetri:17,symptom:30,sync:[6,30,46,132],synchron:6,synonym:19,synopsi:[55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],syntact:[11,19],syntax:[10,12,13,14,19,21,41,42],sys:6,sysctl:30,sysintern:6,system:[6,11,14,19,29,30,31,35,41,43,46,49,52,91,93,94,96,102,108,117,124,125,126,144,146,147,149,152],system_auth:[6,49],system_trac:132,tab:23,tabl:[0,4,6,9,10,12,13,14,15,16,17,18,19,20,21,29,38,41,42,45,49,52,57,60,67,75,77,86,87,90,95,99,108,117,123,124,126,130,132,139,143,156,158,163,165,166,170,171],table1:19,table_nam:[11,13,16,19,20,41,166],table_opt:[11,18],tablehistogram:117,tablestat:117,tag:[21,25,28,156],take:[6,10,11,13,14,21,25,26,28,30,38,41,42,43,51,117,156],taken:[6,40,41,46],tar:34,tarbal:[31,33,52],target:[11,19,26,29,41],task:[6,26,28,46,52],tcp:[6,30],tcp_keepalive_intvl:30,tcp_keepalive_prob:30,tcp_keepalive_tim:30,tcp_nodelai:6,tcp_wmem:6,teach:[6,50],team:30,technetwork:6,technic:[11,15],technot:6,tee:34,tell:[6,13,25,30,31,46],temporari:49,temporarili:6,tenanc:6,tend:[6,30,43],tendenc:6,terabyt:42,term:[6,13,14,15,18,21],termin:[12,52],ternari:23,test:[6,8,23,25,27,28,35,36,43,52],test_keyspac:49,testabl:[25,28],testbatchandlist:29,testmethod1:29,testmethod2:29,testsom:29,teststaticcompactt:29,text:[4,9,11,12,13,14,17,21,40,42,49],than:[0,6,11,12,13,14,15,18,21,23,28,36,41,42,43,49,50,51,133,146,147],thei:[6,9,10,11,12,13,14,15,18,19,21,23,25,28,29,36,38,41,42,43,46,49],them:[6,10,11,13,14,21,23,28,29,30,35,38,41,46,49,117,170],themselv:[13,19],theoret:11,therefor:[28,29,49],thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,19,21,23,24,25,26,28,29,30,31,33,34,36,38,40,41,42,43,46,49,50,51,52,53,54,55,57,58,60,63,65,67,73,77,83,86,87,89,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],thing:[6,21,24,28,30,33,41],think:6,third:[21,25,46],thobb:52,those:[11,12,13,14,16,17,18,19,21,28,30,40,41,49,52,170],though:[6,10,12,21,36,41,42,46],thousand:52,thousandssep:52,thread:[6,43,46,49,57,87,117,130,132,139,148,168,170],threadpool:45,threadpoolnam:46,threadprioritypolici:26,three:[0,6,38,41,42,49,52],threshold:[4,40,43,50,90,117,143,150],thrift:[6,9,11,15,30,46],throttl:[6,89,117,140,144,148,149,152],throttle_limit:6,through:[0,5,9,10,11,12,13,26,28,30,35,40,41,52],throughout:49,throughput:[0,6,41,42,43,46,91,96,102,117,144,149,152],throwabl:[25,29],thrown:21,thu:[6,10,11,12,13,18,21,30,46,50,51,117,170],thumb:[6,28],thusli:21,tib:[62,116,166],ticket:[5,24,25,28,29,40],tie:30,tier:45,ties:13,tighter:6,tightli:6,tild:52,time:[0,6,8,9,10,11,12,13,15,16,17,18,23,25,26,28,29,30,38,40,42,45,46,49,52,117,119],timehorizon:6,timelin:11,timeout:[6,21,30,46,52,103,117,153],timeout_in_m:153,timeout_typ:[103,153],timer:[6,46],timestamp:[4,9,10,11,13,14,15,17,36,41,52,139],timeunit:41,timeuuid:[9,10,11,17,21],timewindowcompactionstrategi:11,timezon:[17,52],tini:[6,41],tinyint:[9,10,14,17,21],tjake:23,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,toc:4,todai:12,todat:14,todo:[25,29],togeth:[6,11,13,14,41],toggl:49,tojson:15,token:[2,4,6,9,10,12,13,30,41,46,52,60,65,108,109,115,117,122,132,138,171],toler:38,tom:13,tombston:[4,6,11,17,30,45,46,87,139],tombstone_compact:163,tombstone_compaction_interv:41,tombstone_threshold:41,tombstonescannedhistogram:46,ton:29,too:[6,11,12,14,21,25,41],tool:[6,12,28,30,36,41,46,49,51],top:[13,21,28,36,46,119,166,167],topcount:[119,167],topic:52,topolog:[6,50,138],toppartit:117,total:[6,13,40,41,46,114,117],totalblockedtask:46,totalcommitlogs:46,totalcompactionscomplet:46,totaldiskspaceus:46,totalhint:46,totalhintsinprogress:46,totallat:46,totimestamp:14,touch:[8,30,41],tough:29,tounixtimestamp:14,tour:21,toward:11,tpstat:117,trace:[6,46,104,117,132,154],track:[6,41,46],tracker:28,tradeoff:[0,6],tradit:[41,42],traffic:[6,50],trail:23,transact:[13,20,46,163],transfer:[6,30,49],transform:13,transit:[10,19],translat:6,transpar:[6,30],transport:[6,26,46,69,79,117,160],treat:[0,6,10,30,50],tree:[6,26,46],tri:41,trigger:[4,6,9,12,15,36,38,42,45,57,117,129],trigger_nam:20,trigger_stat:12,trip:[6,13],trivial:49,troubleshoot:[25,36],truediskspaceus:[114,117],truesnapshotss:46,truli:9,truncat:[6,9,10,15,19,103,117,153,169],truncate_stat:12,truncatehint:117,trunk:[24,25,26,28],trust:49,trustor:6,truststor:[6,49],truststore_password:6,truststorepassword:49,tserverfactori:6,ttl:[4,6,9,10,11,14,17,21,45,139],tty:52,tunabl:2,tune:[30,38,41,43],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:21,tuplevalu:[10,14],turn:[0,6,28,30,49],twc:[11,41],twice:[6,21],two:[0,6,11,12,13,14,17,26,36,38,41,43,49,50,52],txt:[4,14,24,25,28],type:[0,6,10,11,12,13,14,15,19,25,34,36,43,45,49,52,103,117,153,163],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,30,38,41,43,46,49,52],ubuntu:26,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:21,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:30,unabl:[6,25,36],unaffect:21,unavail:[6,11,46,49,51],unblock:46,unbound:21,unchecked_tombstone_compact:41,uncom:[6,46,49],uncommon:28,uncompress:[6,42,46],undelet:41,under:[6,21,23,29,46,49],underli:[6,18,41,49],understand:[6,28,30],unencrypt:[6,49],unexpectedli:21,unfinishedcommit:46,unflush:[40,156],unfortun:29,uniqu:[11,14,21],unit:[21,25,27,41,117,141],unixtimestampof:[10,14],unless:[6,11,13,16,18,19,21,23,40,49,50],unlik:[6,10,13,21],unlimit:[6,30,52],unlog:9,unnecessari:[25,51],unnecessarili:40,unpredict:13,unprepar:46,unquot:12,unquoted_identifi:12,unquoted_nam:11,unrel:28,unreleas:28,unrepair:45,unsecur:49,unset:[6,10,13,17],unsign:21,unspecifi:6,unsubscrib:[8,36],untar:34,until:[0,6,21,38,40,41,42,49,50],unus:6,unusu:25,updat:[6,9,10,11,12,14,15,17,18,19,21,25,28,29,34,36,41,42,46,49,52],update_paramet:13,update_stat:[12,13],upgrad:[6,41,117,170],upgrade_sst:163,upgradesst:[38,41,42,117],upload:28,upon:[6,21,38,42],upper:[12,17,41,49],ups:43,upstream:28,uptim:[109,117],url:24,usag:[4,6,11,21,36,38,40,42,46,52],use:[6,9,10,11,12,13,14,16,17,18,19,21,23,25,26,28,29,31,34,35,36,38,40,41,43,46,49,50,51,52,57,87,100,117,119,130,139,167,170],use_stat:12,usecas:41,useconcmarksweepgc:26,usecondcardmark:26,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,19,21,25,26,28,29,30,41,43,46,49,50,51,52,55,57,58,60,65,67,73,76,77,83,86,87,90,92,95,99,101,103,107,108,115,117,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useecassandra:49,useful:[0,6,11,14,28,41,42,46,51,52,55,57,58,60,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],useparnewgc:26,user1:13,user2:13,user3:13,user4:13,user:[5,6,8,9,10,11,12,13,15,16,17,18,25,28,30,34,38,41,42,43,49,52,60,76,117],user_count:13,user_defined_typ:21,user_funct:19,user_nam:13,user_occup:13,user_opt:19,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,49,52,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],uses:[0,4,6,11,12,13,14,16,19,20,29,30,49],usethreadprior:26,using:[4,6,10,11,12,13,14,18,19,21,26,28,29,33,34,35,36,38,42,43,45,46,49,51,52,60,122,139,156],usr:52,usual:[6,13,21,24,29,38,49,132],utc:[17,52],utd:11,utf8:[21,52],utf8typ:9,utf:52,util:[14,25,41,52],uuid:[9,10,11,12,17,21],val0:11,val1:11,val:14,valid:[6,10,11,12,13,14,17,21,30,41,42,46,49,52,132,139,163],validationexecutor:46,valu:[6,9,10,11,12,13,14,16,17,21,25,26,30,38,41,46,49,50,52,76,104,108,117,140,144,146,147,148,149,151,152,153,154],value1:13,value2:13,value_in_kb_per_sec:[140,148],value_in_m:151,value_in_mb:[144,149,152],valueof:14,varchar:[9,11,14,17,21],vari:[6,42],variabl:[6,10,12,17,21,26,33],variant:12,varieti:40,varint:[9,11,14,17,21],variou:[26,29,43,49],veri:[6,11,13,28,29,30,38,41,42,43],verifi:[28,30,32,34,42,108,117,163],version:[5,6,9,11,14,15,21,26,28,32,34,41,46,51,59,64,74,84,117,170,171],vertic:52,via:[6,8,10,19,25,30,31,41,42,43,46,49,50],view:[6,10,11,12,15,19,36,46,52,94,117,147,173],view_build:163,view_nam:18,viewbuildstatu:117,viewlockacquiretim:46,viewmutationstag:46,viewpendingmut:46,viewreadtim:46,viewreplicasattempt:46,viewreplicassuccess:46,viewwrit:46,viewwritelat:46,virtual:[0,6,30,41,46,51],visibl:[11,19,23,38],vnode:[6,42],volum:[6,40,42],vulner:[6,49],wai:[4,6,12,15,17,18,21,24,26,29,30,41,42,132],wait:[0,6,11,28,30,46,117,134],waitingoncommit:46,waitingonfreememtablespac:46,waitingonsegmentalloc:46,want:[6,11,13,26,28,29,30,49,51],warmup:[117,142],warn:[6,11,23,29,45,132],washington:21,wasn:10,wast:6,watch:29,weaker:0,websit:[29,34],week:21,weight:[6,46,80],welcom:8,well:[6,11,13,14,17,21,25,26,40,42,43,49,50,117,135],went:46,were:[6,9,10,19,25,26,41,46],what:[11,13,21,27,29,31,36,41,43,49,52],whatev:[10,13,30],whedon:13,when:[4,6,9,10,11,12,13,14,15,16,17,19,21,23,25,28,29,31,36,38,40,42,43,45,46,49,50,51,52,55,57,58,60,63,65,67,73,77,83,86,87,90,92,95,99,101,103,107,108,115,119,122,123,124,130,131,132,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,165,166,167,169,170,171,173],where:[0,4,6,9,10,11,12,14,16,17,18,19,21,25,29,31,34,38,41,42,49,51,52,80,132],where_claus:13,wherea:[21,49],whether:[0,6,9,11,13,26,41,50,52,80],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,28,29,30,31,34,35,38,40,41,42,43,46,49,50,51,60,95,99,108,114,117,122,132],whichev:[0,6],whitelist:49,whitespac:27,who:[19,28,30],whole:[6,11,13,14,21,41],whose:[11,21,163],why:[25,28,36],wide:[4,40],width:12,wiki:[6,26],wildcard:[13,19],window:[0,6,45,46,49,98,106,117,151],winner:30,wip:[26,28],wipe:[30,51],wire:30,wise:11,wish:[6,41,46],within:[0,4,6,11,12,13,16,28,30,41,43,46,49],withing:6,without:[6,11,12,13,14,19,21,24,26,28,29,30,40,43,46,49,52,55,108,117,124],wmem_max:6,won:[6,13,24],wont:41,word:[10,11,12,18,19,21,30],work:[6,10,11,14,15,17,23,24,26,27,29,30,41,43,46,49,50,51,52],worker:52,workload:[6,25,38,41,43],workspac:26,worktre:26,worri:[28,30],wors:[6,50],worst:[6,28],worthwhil:6,would:[6,12,13,14,17,19,26,28,29,36,41,42,43,49,50],wrap:50,write:[0,4,6,10,11,13,21,23,25,29,30,40,41,42,43,46,49,50,51,52,75,103,117,153,166],write_lat:166,write_request_timeout:30,writelat:46,writer:[6,23],writetim:[9,14],writetimeoutexcept:6,written:[4,6,20,30,38,41,42,46],wrong:6,wrte:46,www:[6,11,34],xlarg:43,xml:31,xmn220m:26,xms1024m:26,xmx1024m:26,xmx:43,xss256k:26,xvf:34,yaml:[6,14,31,34,46,49,50,51,61,76,80,117,135,166,168],year:[13,21],yes:[9,11,49],yet:[11,46],yield:[13,51],you:[5,6,8,10,11,12,13,14,16,17,18,20,21,23,24,26,27,29,30,31,32,33,34,35,36,41,46,49,50,51,52,55,117,156],younger:14,your:[0,5,6,8,10,11,12,23,26,28,29,30,31,34,36,41,43,49,50,52],yourself:[24,29],yyyi:[17,21],z_0:[11,16,18],zero:[6,10,30,46,50],zip:21,zipcod:21,zone:[6,21,50],zzzzz:28},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs and Contributing","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Security","Triggers","Data Types","Data Modeling","Code Style","How-to Commit","Review Checklist","Building and IDE Integration","Cassandra Development","Contributing Code Changes","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","cqlsh: the CQL shell","Cassandra Tools","Nodetool","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","Troubleshooting"],titleterms:{"class":50,"function":[13,14,17],"import":[23,108],"long":29,"new":30,"static":11,"switch":41,Adding:51,IDE:26,IDEs:23,LCS:41,TLS:49,The:[11,13,15,17,41],USE:11,Use:42,Uses:42,Using:26,Will:30,With:49,access:49,add:30,address:30,advanc:42,after:51,aggreg:14,alias:13,all:[19,30],alloc:51,allocate_tokens_for_keyspac:6,allow:13,alter:[11,18,19,21],ani:30,apach:36,appendic:9,appendix:9,architectur:2,ask:30,assassin:55,assign:51,auth:49,authent:[6,19,49],author:[6,49],auto_snapshot:6,automat:19,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:37,batch:[13,30],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,befor:28,benefit:42,binari:34,blob:[14,30],bloom:38,boilerpl:23,bootstrap:[30,41,51,56],branch:28,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:46,bug:[5,28],build:26,bulk:[30,39],cach:[11,46,49],call:30,can:30,captur:[40,52],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,26,27,29,30,31,34,36,40,45,49,53],cast:14,cdc:40,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,chang:[10,28,30,31,38,40,41],characterist:21,checklist:25,choic:43,choos:28,circleci:29,claus:13,cleanup:[51,57],clear:52,clearsnapshot:58,client:[32,35,46,49],client_encryption_opt:6,clientstat:59,clojur:32,cloud:43,cluster:[11,30],cluster_nam:6,code:[23,28],collect:[21,41],column:11,column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[26,41,52],comment:12,commit:24,commit_failure_polici:6,commitlog:[4,46],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:40,common:[11,41,43],compact:[9,11,41,46,60],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:61,compactionstat:62,compactionstrategi:41,compat:52,compress:[11,42],concern:41,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_writ:6,condition:19,configur:[6,7,31,40,42],connect:30,consider:11,consist:[0,52],constant:12,contact:8,contribut:[5,28],control:19,convent:[12,23],convers:14,copi:52,count:14,counter:[13,21],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:43,cql:[9,15,46,52],cqlsh:[35,52],cqlshrc:52,creat:[11,14,16,18,19,20,21,28],credenti:19,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:29,custom:21,cython:52,data:[11,13,17,19,21,22,30,40,41,51],data_file_directori:6,databas:19,date:21,dead:51,debian:34,debug:26,decommiss:63,defin:[14,21],definit:[11,12],defragment:41,delet:[13,30,41],depend:52,describ:[52,65],describeclust:64,detail:41,detect:0,develop:27,dies:30,directori:[31,41],disabl:40,disableauditlog:66,disableautocompact:67,disablebackup:68,disablebinari:69,disablefullquerylog:70,disablegossip:71,disablehandoff:72,disablehintsfordc:73,disableoldprotocolvers:74,disk:[30,43],disk_failure_polici:6,disk_optimization_strategi:6,document:36,doe:30,drain:75,driver:[32,35],drop:[9,11,14,16,18,19,20,21,30],droppedmessag:46,dtest:29,durat:21,dynam:50,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:30,eclips:26,email:30,enabl:[40,49],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_user_defined_funct:6,enableauditlog:76,enableautocompact:77,enablebackup:78,enablebinari:79,enablefullquerylog:80,enablegossip:81,enablehandoff:82,enablehintsfordc:83,enableoldprotocolvers:84,encod:17,encrypt:49,endpoint_snitch:6,engin:4,entri:30,environ:31,erlang:32,error:30,even:30,except:23,exist:30,exit:52,expand:52,experiment:6,expir:41,factor:30,fail:[30,51],failur:[0,30],failuredetector:85,featur:6,file:[6,23,34],file_cache_size_in_mb:6,filedescriptorratio:46,filter:[13,38],fix:28,flush:86,format:23,frequent:30,from:[26,30,34,52],fromjson:17,fulli:41,further:40,garbag:41,garbagecollect:87,garbagecollector:46,gc_grace_second:41,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:88,gener:23,get:33,getbatchlogreplaythrottl:89,getcompactionthreshold:90,getcompactionthroughput:91,getconcurr:92,getconcurrentcompactor:93,getconcurrentviewbuild:94,getendpoint:95,getinterdcstreamthroughput:96,getlogginglevel:97,getmaxhintwindow:98,getreplica:99,getse:100,getsstabl:101,getstreamthroughput:102,gettimeout:103,gettraceprob:104,give:30,gossip:0,gossipinfo:105,grace:41,grant:19,group:13,guarante:1,handl:23,handoffwindow:106,hang:51,happen:30,hardwar:43,haskel:32,heap:30,help:[52,107],hint:44,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:46,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,host:[30,52],how:[24,30],idea:26,identifi:12,impact:42,incremental_backup:6,index:[16,46],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:109,initial_token:6,insert:[13,17,35],instal:34,integr:[26,49],intellij:26,inter:49,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[19,49],internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:110,invalidatekeycach:111,invalidaterowcach:112,irc:8,java:[30,32],jconsol:30,jmx:[30,41,46,49],join:[30,113],json:17,jvm:46,kei:[11,16,18],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,30,46],keyword:[9,12],lang:30,languag:15,larg:30,level:[0,41],limit:13,line:[26,52],list:[8,19,21,30],listen:30,listen_address:[6,30],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:114,liter:21,live:30,load:[30,39],locat:31,log:[30,31,41],login:52,lot:30,made:30,mail:8,main:31,major:41,manipul:13,manual:51,map:[16,21,30],materi:18,max:[14,30],max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:30,memori:[30,43,46],memorypool:46,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:41,messag:30,method:30,metric:46,min:14,minor:41,mintimeuuid:14,model:22,monitor:[46,51],more:[30,41],move:[51,115],movement:51,multilin:23,nativ:[14,21],native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:32,netstat:116,networktopologystrategi:0,newer:26,node:[30,49,51],nodej:32,nodetool:[30,41,54,117],noteworthi:21,now:14,num_token:6,one:30,onli:30,oper:[30,41,42,45],option:[11,18,41,52],order:[11,13],otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[11,30],outofmemoryerror:30,overview:[3,40],packag:34,page:52,paramet:[13,40,41],partit:11,partition:6,password:49,patch:28,pausehandoff:118,perform:29,permiss:19,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:32,pick:0,point:30,port:30,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:34,primari:[11,18],profileload:119,progress:51,project:26,properti:31,proxyhistogram:120,python:32,pytz:52,queri:[15,35],question:30,rang:[0,51],range_request_timeout_in_m:6,rangekeysampl:121,read:[40,47],read_request_timeout_in_m:6,rebuild:122,rebuild_index:123,refresh:124,refreshsizeestim:125,refus:30,releas:28,reloadlocalschema:126,reloadse:127,reloadssl:128,reloadtrigg:129,relocatesst:130,remot:30,remov:[41,51],removenod:131,repair:[41,47,48,132],repair_admin:133,repair_session_max_tree_depth:6,replac:51,replaybatchlog:134,replic:[0,30],report:[5,30,46],request:46,request_schedul:6,request_scheduler_id:6,request_scheduler_opt:6,request_timeout_in_m:6,reserv:9,resetfullquerylog:135,resetlocalschema:136,result:13,resum:51,resumehandoff:137,revers:11,review:25,revok:19,rhel:30,right:28,ring:[0,30,138],role:[19,49],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpc_max_thread:6,rpc_min_thread:6,rpc_port:6,rpc_recv_buff_size_in_byt:6,rpc_send_buff_size_in_byt:6,rpc_server_typ:6,rubi:32,run:29,runtim:31,rust:32,safeti:6,sai:30,same:30,saved_caches_directori:6,scala:32,scalar:14,scrub:139,secondari:16,secur:[19,49],see:30,seed:30,seed_provid:6,select:[13,17,18],selector:13,serial:52,server_encryption_opt:6,session:52,set:[19,21,26,30],setbatchlogreplaythrottl:140,setcachecapac:141,setcachekeystosav:142,setcompactionthreshold:143,setcompactionthroughput:144,setconcurr:145,setconcurrentcompactor:146,setconcurrentviewbuild:147,sethintedhandoffthrottlekb:148,setinterdcstreamthroughput:149,setlogginglevel:150,setmaxhintwindow:151,setstreamthroughput:152,settimeout:153,settraceprob:154,setup:26,share:52,shell:52,show:[30,52],signatur:14,simplestrategi:0,singl:[30,41],size:41,sjk:155,slow_query_log_timeout_in_m:6,snapshot:156,snapshot_before_compact:6,snitch:50,sourc:[26,52],special:52,speed:30,ssl:49,ssl_storage_port:6,sstabl:[4,41,46],sstable_preemptive_open_interval_in_mb:6,stai:30,standard:49,start:[26,28,33],start_native_transport:6,start_rpc:6,starv:41,statement:[12,18,23],statu:157,statusautocompact:158,statusbackup:159,statusbinari:160,statusgossip:161,statushandoff:162,stc:41,stop:163,stopdaemon:164,storag:[4,9,46],storage_port:6,store:30,strategi:41,stream:[30,46,51],stream_throughput_outbound_megabits_per_sec:6,streaming_keep_alive_period_in_sec:6,stress:29,style:23,sum:14,support:17,tabl:[11,40,46],tablehistogram:165,tablestat:166,tarbal:34,term:12,test:[26,29],than:30,thei:30,though:30,threadpool:46,threshold:6,thrift_framed_transport_size_in_mb:6,thrift_prepared_statements_cache_size_mb:6,tick:28,tier:41,time:[14,21,41],timestamp:[21,30],timeuuid:14,timewindowcompactionstrategi:41,tock:28,todo:[0,1,3,4,11,22,37,39,44,47,48,54],tojson:17,token:[0,14,51],tombston:41,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[29,53],top:30,toppartit:167,tpstat:168,trace:52,tracetype_query_ttl:6,tracetype_repair_ttl:6,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[20,41],troubleshoot:174,truncat:11,truncate_request_timeout_in_m:6,truncatehint:169,ttl:[13,41],tunabl:0,tupl:21,two:30,type:[9,17,21,41,46],udt:21,unabl:30,unit:[26,29],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:41,unsubscrib:30,updat:[13,30],upgradesst:170,usag:[30,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173],use:30,user:[14,19,21],using:[30,41],uuid:14,variabl:31,verifi:171,version:[10,52,172],view:18,viewbuildstatu:173,warn:40,welcom:36,what:[28,30],when:[30,41],where:13,whitespac:23,why:[30,41],window:41,windows_timer_interv:6,without:41,work:[21,28],write_request_timeout_in_m:6,writetim:13,yaml:40,you:28}}) \ No newline at end of file diff --git a/src/doc/3.11.7/tools/cqlsh.html b/src/doc/3.11.7/tools/cqlsh.html deleted file mode 100644 index caa550ed2..000000000 --- a/src/doc/3.11.7/tools/cqlsh.html +++ /dev/null @@ -1,481 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/index.html b/src/doc/3.11.7/tools/index.html deleted file mode 100644 index 773866cd4..000000000 --- a/src/doc/3.11.7/tools/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool.html b/src/doc/3.11.7/tools/nodetool.html deleted file mode 100644 index 9384d34a9..000000000 --- a/src/doc/3.11.7/tools/nodetool.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-

Todo

-

Try to autogenerate this from Nodetool’s help.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/assassinate.html b/src/doc/3.11.7/tools/nodetool/assassinate.html deleted file mode 100644 index 414c6e898..000000000 --- a/src/doc/3.11.7/tools/nodetool/assassinate.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/bootstrap.html b/src/doc/3.11.7/tools/nodetool/bootstrap.html deleted file mode 100644 index d97fc0c31..000000000 --- a/src/doc/3.11.7/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/cleanup.html b/src/doc/3.11.7/tools/nodetool/cleanup.html deleted file mode 100644 index f2360d869..000000000 --- a/src/doc/3.11.7/tools/nodetool/cleanup.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/clearsnapshot.html b/src/doc/3.11.7/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 76560d27a..000000000 --- a/src/doc/3.11.7/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/clientstats.html b/src/doc/3.11.7/tools/nodetool/clientstats.html deleted file mode 100644 index 720487b15..000000000 --- a/src/doc/3.11.7/tools/nodetool/clientstats.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/compact.html b/src/doc/3.11.7/tools/nodetool/compact.html deleted file mode 100644 index 2f46d498d..000000000 --- a/src/doc/3.11.7/tools/nodetool/compact.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/compactionhistory.html b/src/doc/3.11.7/tools/nodetool/compactionhistory.html deleted file mode 100644 index eba07d124..000000000 --- a/src/doc/3.11.7/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/compactionstats.html b/src/doc/3.11.7/tools/nodetool/compactionstats.html deleted file mode 100644 index 8344ecffb..000000000 --- a/src/doc/3.11.7/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/decommission.html b/src/doc/3.11.7/tools/nodetool/decommission.html deleted file mode 100644 index 9e8cd4b27..000000000 --- a/src/doc/3.11.7/tools/nodetool/decommission.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/describecluster.html b/src/doc/3.11.7/tools/nodetool/describecluster.html deleted file mode 100644 index 92bbf95c8..000000000 --- a/src/doc/3.11.7/tools/nodetool/describecluster.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/describering.html b/src/doc/3.11.7/tools/nodetool/describering.html deleted file mode 100644 index e2189ec8e..000000000 --- a/src/doc/3.11.7/tools/nodetool/describering.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disableauditlog.html b/src/doc/3.11.7/tools/nodetool/disableauditlog.html deleted file mode 100644 index c9c791db0..000000000 --- a/src/doc/3.11.7/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disableautocompaction.html b/src/doc/3.11.7/tools/nodetool/disableautocompaction.html deleted file mode 100644 index 3926764ac..000000000 --- a/src/doc/3.11.7/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablebackup.html b/src/doc/3.11.7/tools/nodetool/disablebackup.html deleted file mode 100644 index 775e10c4f..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablebinary.html b/src/doc/3.11.7/tools/nodetool/disablebinary.html deleted file mode 100644 index 420c4081b..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablefullquerylog.html b/src/doc/3.11.7/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index 678098636..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablegossip.html b/src/doc/3.11.7/tools/nodetool/disablegossip.html deleted file mode 100644 index dd123799b..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablehandoff.html b/src/doc/3.11.7/tools/nodetool/disablehandoff.html deleted file mode 100644 index cca69e5a5..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disablehintsfordc.html b/src/doc/3.11.7/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index b6b2307e9..000000000 --- a/src/doc/3.11.7/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/disableoldprotocolversions.html b/src/doc/3.11.7/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index f40a8b95a..000000000 --- a/src/doc/3.11.7/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/drain.html b/src/doc/3.11.7/tools/nodetool/drain.html deleted file mode 100644 index 0a246054f..000000000 --- a/src/doc/3.11.7/tools/nodetool/drain.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enableauditlog.html b/src/doc/3.11.7/tools/nodetool/enableauditlog.html deleted file mode 100644 index d57ec5acb..000000000 --- a/src/doc/3.11.7/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enableautocompaction.html b/src/doc/3.11.7/tools/nodetool/enableautocompaction.html deleted file mode 100644 index d3edcc52e..000000000 --- a/src/doc/3.11.7/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablebackup.html b/src/doc/3.11.7/tools/nodetool/enablebackup.html deleted file mode 100644 index 5a2150f33..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablebinary.html b/src/doc/3.11.7/tools/nodetool/enablebinary.html deleted file mode 100644 index e9daeeda2..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablefullquerylog.html b/src/doc/3.11.7/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index fa999eacf..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablegossip.html b/src/doc/3.11.7/tools/nodetool/enablegossip.html deleted file mode 100644 index 72edf0dee..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablehandoff.html b/src/doc/3.11.7/tools/nodetool/enablehandoff.html deleted file mode 100644 index b34b60ac1..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enablehintsfordc.html b/src/doc/3.11.7/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 83604681f..000000000 --- a/src/doc/3.11.7/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/enableoldprotocolversions.html b/src/doc/3.11.7/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 1daaab88e..000000000 --- a/src/doc/3.11.7/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/failuredetector.html b/src/doc/3.11.7/tools/nodetool/failuredetector.html deleted file mode 100644 index 266dd7d15..000000000 --- a/src/doc/3.11.7/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/flush.html b/src/doc/3.11.7/tools/nodetool/flush.html deleted file mode 100644 index 2dd1293dd..000000000 --- a/src/doc/3.11.7/tools/nodetool/flush.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/garbagecollect.html b/src/doc/3.11.7/tools/nodetool/garbagecollect.html deleted file mode 100644 index 84d59386b..000000000 --- a/src/doc/3.11.7/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/gcstats.html b/src/doc/3.11.7/tools/nodetool/gcstats.html deleted file mode 100644 index d5c3bed30..000000000 --- a/src/doc/3.11.7/tools/nodetool/gcstats.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/3.11.7/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index 680cdca31..000000000 --- a/src/doc/3.11.7/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getcompactionthreshold.html b/src/doc/3.11.7/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 0487f652c..000000000 --- a/src/doc/3.11.7/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getcompactionthroughput.html b/src/doc/3.11.7/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index 7cb0de0bb..000000000 --- a/src/doc/3.11.7/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getconcurrency.html b/src/doc/3.11.7/tools/nodetool/getconcurrency.html deleted file mode 100644 index 354ccea6f..000000000 --- a/src/doc/3.11.7/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getconcurrentcompactors.html b/src/doc/3.11.7/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index 55bb097ee..000000000 --- a/src/doc/3.11.7/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/3.11.7/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index f93b19f1d..000000000 --- a/src/doc/3.11.7/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getendpoints.html b/src/doc/3.11.7/tools/nodetool/getendpoints.html deleted file mode 100644 index c059c2b0b..000000000 --- a/src/doc/3.11.7/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/3.11.7/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 7132dba7e..000000000 --- a/src/doc/3.11.7/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getlogginglevels.html b/src/doc/3.11.7/tools/nodetool/getlogginglevels.html deleted file mode 100644 index 9a0fde4e3..000000000 --- a/src/doc/3.11.7/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getmaxhintwindow.html b/src/doc/3.11.7/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 2b7251e5c..000000000 --- a/src/doc/3.11.7/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getreplicas.html b/src/doc/3.11.7/tools/nodetool/getreplicas.html deleted file mode 100644 index dc397a95f..000000000 --- a/src/doc/3.11.7/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getseeds.html b/src/doc/3.11.7/tools/nodetool/getseeds.html deleted file mode 100644 index ba1ae7762..000000000 --- a/src/doc/3.11.7/tools/nodetool/getseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getsstables.html b/src/doc/3.11.7/tools/nodetool/getsstables.html deleted file mode 100644 index c6df9235a..000000000 --- a/src/doc/3.11.7/tools/nodetool/getsstables.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/getstreamthroughput.html b/src/doc/3.11.7/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 683c71f44..000000000 --- a/src/doc/3.11.7/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/gettimeout.html b/src/doc/3.11.7/tools/nodetool/gettimeout.html deleted file mode 100644 index 1903721e5..000000000 --- a/src/doc/3.11.7/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/gettraceprobability.html b/src/doc/3.11.7/tools/nodetool/gettraceprobability.html deleted file mode 100644 index 9b1fa7739..000000000 --- a/src/doc/3.11.7/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/gossipinfo.html b/src/doc/3.11.7/tools/nodetool/gossipinfo.html deleted file mode 100644 index 48b90216a..000000000 --- a/src/doc/3.11.7/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/handoffwindow.html b/src/doc/3.11.7/tools/nodetool/handoffwindow.html deleted file mode 100644 index 1a0d0c7d5..000000000 --- a/src/doc/3.11.7/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/help.html b/src/doc/3.11.7/tools/nodetool/help.html deleted file mode 100644 index faa6f46b9..000000000 --- a/src/doc/3.11.7/tools/nodetool/help.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/import.html b/src/doc/3.11.7/tools/nodetool/import.html deleted file mode 100644 index 765cdca86..000000000 --- a/src/doc/3.11.7/tools/nodetool/import.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/info.html b/src/doc/3.11.7/tools/nodetool/info.html deleted file mode 100644 index adca8bd59..000000000 --- a/src/doc/3.11.7/tools/nodetool/info.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/invalidatecountercache.html b/src/doc/3.11.7/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 126dcd1d8..000000000 --- a/src/doc/3.11.7/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/invalidatekeycache.html b/src/doc/3.11.7/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 2efbcf9f2..000000000 --- a/src/doc/3.11.7/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/invalidaterowcache.html b/src/doc/3.11.7/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index 62e6cad07..000000000 --- a/src/doc/3.11.7/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/join.html b/src/doc/3.11.7/tools/nodetool/join.html deleted file mode 100644 index b3fc04b36..000000000 --- a/src/doc/3.11.7/tools/nodetool/join.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/listsnapshots.html b/src/doc/3.11.7/tools/nodetool/listsnapshots.html deleted file mode 100644 index 0a2b313c3..000000000 --- a/src/doc/3.11.7/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size. True size is the total size of all SSTables which
-        are not backed up to disk. Size on disk is total size of the snapshot on
-        disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/move.html b/src/doc/3.11.7/tools/nodetool/move.html deleted file mode 100644 index 7b0d22767..000000000 --- a/src/doc/3.11.7/tools/nodetool/move.html +++ /dev/null @@ -1,131 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/netstats.html b/src/doc/3.11.7/tools/nodetool/netstats.html deleted file mode 100644 index 3947b1cd4..000000000 --- a/src/doc/3.11.7/tools/nodetool/netstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/nodetool.html b/src/doc/3.11.7/tools/nodetool/nodetool.html deleted file mode 100644 index 86cb14c00..000000000 --- a/src/doc/3.11.7/tools/nodetool/nodetool.html +++ /dev/null @@ -1,223 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Nodetool" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-u <username> | –username <username>)]
-
[(-h <host> | –host <host>)] [(-p <port> | –port <port>)] -[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-pp | –print-port)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

sjk - Run commands of ‘Swiss Java Knife’. Run ‘nodetool sjk –help’ for more information.

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/pausehandoff.html b/src/doc/3.11.7/tools/nodetool/pausehandoff.html deleted file mode 100644 index 4b18ce5d0..000000000 --- a/src/doc/3.11.7/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/profileload.html b/src/doc/3.11.7/tools/nodetool/profileload.html deleted file mode 100644 index 295568b45..000000000 --- a/src/doc/3.11.7/tools/nodetool/profileload.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/proxyhistograms.html b/src/doc/3.11.7/tools/nodetool/proxyhistograms.html deleted file mode 100644 index c9fc06220..000000000 --- a/src/doc/3.11.7/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/rangekeysample.html b/src/doc/3.11.7/tools/nodetool/rangekeysample.html deleted file mode 100644 index 8321ae391..000000000 --- a/src/doc/3.11.7/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/rebuild.html b/src/doc/3.11.7/tools/nodetool/rebuild.html deleted file mode 100644 index a065741ae..000000000 --- a/src/doc/3.11.7/tools/nodetool/rebuild.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/rebuild_index.html b/src/doc/3.11.7/tools/nodetool/rebuild_index.html deleted file mode 100644 index 6dcf17e77..000000000 --- a/src/doc/3.11.7/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/refresh.html b/src/doc/3.11.7/tools/nodetool/refresh.html deleted file mode 100644 index 539326a4e..000000000 --- a/src/doc/3.11.7/tools/nodetool/refresh.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/refreshsizeestimates.html b/src/doc/3.11.7/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index b4f278bac..000000000 --- a/src/doc/3.11.7/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/reloadlocalschema.html b/src/doc/3.11.7/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 7d3a30b79..000000000 --- a/src/doc/3.11.7/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/reloadseeds.html b/src/doc/3.11.7/tools/nodetool/reloadseeds.html deleted file mode 100644 index 24f25a9a9..000000000 --- a/src/doc/3.11.7/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/reloadssl.html b/src/doc/3.11.7/tools/nodetool/reloadssl.html deleted file mode 100644 index c7e84296a..000000000 --- a/src/doc/3.11.7/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/reloadtriggers.html b/src/doc/3.11.7/tools/nodetool/reloadtriggers.html deleted file mode 100644 index ae6176fc5..000000000 --- a/src/doc/3.11.7/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/relocatesstables.html b/src/doc/3.11.7/tools/nodetool/relocatesstables.html deleted file mode 100644 index d69baa7f6..000000000 --- a/src/doc/3.11.7/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/removenode.html b/src/doc/3.11.7/tools/nodetool/removenode.html deleted file mode 100644 index 15731f33e..000000000 --- a/src/doc/3.11.7/tools/nodetool/removenode.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/repair.html b/src/doc/3.11.7/tools/nodetool/repair.html deleted file mode 100644 index 07b7f6b73..000000000 --- a/src/doc/3.11.7/tools/nodetool/repair.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends (inclusive)
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-            (exclusive)
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/repair_admin.html b/src/doc/3.11.7/tools/nodetool/repair_admin.html deleted file mode 100644 index 73e7b8138..000000000 --- a/src/doc/3.11.7/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/replaybatchlog.html b/src/doc/3.11.7/tools/nodetool/replaybatchlog.html deleted file mode 100644 index c20f4ad6e..000000000 --- a/src/doc/3.11.7/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/resetfullquerylog.html b/src/doc/3.11.7/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 513a03848..000000000 --- a/src/doc/3.11.7/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/resetlocalschema.html b/src/doc/3.11.7/tools/nodetool/resetlocalschema.html deleted file mode 100644 index 422128180..000000000 --- a/src/doc/3.11.7/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/resumehandoff.html b/src/doc/3.11.7/tools/nodetool/resumehandoff.html deleted file mode 100644 index 689173a49..000000000 --- a/src/doc/3.11.7/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/ring.html b/src/doc/3.11.7/tools/nodetool/ring.html deleted file mode 100644 index 9a163d0a8..000000000 --- a/src/doc/3.11.7/tools/nodetool/ring.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/scrub.html b/src/doc/3.11.7/tools/nodetool/scrub.html deleted file mode 100644 index 514d24d0e..000000000 --- a/src/doc/3.11.7/tools/nodetool/scrub.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/3.11.7/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 23458af3a..000000000 --- a/src/doc/3.11.7/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setcachecapacity.html b/src/doc/3.11.7/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 867ba1b37..000000000 --- a/src/doc/3.11.7/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setcachekeystosave.html b/src/doc/3.11.7/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index e3fe84df9..000000000 --- a/src/doc/3.11.7/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setcompactionthreshold.html b/src/doc/3.11.7/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index c99a9a421..000000000 --- a/src/doc/3.11.7/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setcompactionthroughput.html b/src/doc/3.11.7/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 08a176962..000000000 --- a/src/doc/3.11.7/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setconcurrency.html b/src/doc/3.11.7/tools/nodetool/setconcurrency.html deleted file mode 100644 index 03d4a1da9..000000000 --- a/src/doc/3.11.7/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setconcurrentcompactors.html b/src/doc/3.11.7/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index 884c78898..000000000 --- a/src/doc/3.11.7/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/3.11.7/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index a749a1c79..000000000 --- a/src/doc/3.11.7/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/3.11.7/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index a14207e03..000000000 --- a/src/doc/3.11.7/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/3.11.7/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index 5cefa88b8..000000000 --- a/src/doc/3.11.7/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setlogginglevel.html b/src/doc/3.11.7/tools/nodetool/setlogginglevel.html deleted file mode 100644 index 98bb95b03..000000000 --- a/src/doc/3.11.7/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setmaxhintwindow.html b/src/doc/3.11.7/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 42b51e29b..000000000 --- a/src/doc/3.11.7/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/setstreamthroughput.html b/src/doc/3.11.7/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index 1f0cfd735..000000000 --- a/src/doc/3.11.7/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/settimeout.html b/src/doc/3.11.7/tools/nodetool/settimeout.html deleted file mode 100644 index b7a166441..000000000 --- a/src/doc/3.11.7/tools/nodetool/settimeout.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/settraceprobability.html b/src/doc/3.11.7/tools/nodetool/settraceprobability.html deleted file mode 100644 index 0816d83b8..000000000 --- a/src/doc/3.11.7/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/sjk.html b/src/doc/3.11.7/tools/nodetool/sjk.html deleted file mode 100644 index d28546f02..000000000 --- a/src/doc/3.11.7/tools/nodetool/sjk.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/snapshot.html b/src/doc/3.11.7/tools/nodetool/snapshot.html deleted file mode 100644 index 174c0dbae..000000000 --- a/src/doc/3.11.7/tools/nodetool/snapshot.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/status.html b/src/doc/3.11.7/tools/nodetool/status.html deleted file mode 100644 index f60541911..000000000 --- a/src/doc/3.11.7/tools/nodetool/status.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/statusautocompaction.html b/src/doc/3.11.7/tools/nodetool/statusautocompaction.html deleted file mode 100644 index a786e172e..000000000 --- a/src/doc/3.11.7/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/statusbackup.html b/src/doc/3.11.7/tools/nodetool/statusbackup.html deleted file mode 100644 index 1b189bb4d..000000000 --- a/src/doc/3.11.7/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/statusbinary.html b/src/doc/3.11.7/tools/nodetool/statusbinary.html deleted file mode 100644 index 160bde1ca..000000000 --- a/src/doc/3.11.7/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/statusgossip.html b/src/doc/3.11.7/tools/nodetool/statusgossip.html deleted file mode 100644 index 218870a59..000000000 --- a/src/doc/3.11.7/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/statushandoff.html b/src/doc/3.11.7/tools/nodetool/statushandoff.html deleted file mode 100644 index b651a9f29..000000000 --- a/src/doc/3.11.7/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/stop.html b/src/doc/3.11.7/tools/nodetool/stop.html deleted file mode 100644 index 67c796a6e..000000000 --- a/src/doc/3.11.7/tools/nodetool/stop.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/stopdaemon.html b/src/doc/3.11.7/tools/nodetool/stopdaemon.html deleted file mode 100644 index 35611aefe..000000000 --- a/src/doc/3.11.7/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/tablehistograms.html b/src/doc/3.11.7/tools/nodetool/tablehistograms.html deleted file mode 100644 index 661b64881..000000000 --- a/src/doc/3.11.7/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/tablestats.html b/src/doc/3.11.7/tools/nodetool/tablestats.html deleted file mode 100644 index ee3e034dd..000000000 --- a/src/doc/3.11.7/tools/nodetool/tablestats.html +++ /dev/null @@ -1,167 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/toppartitions.html b/src/doc/3.11.7/tools/nodetool/toppartitions.html deleted file mode 100644 index 10282993e..000000000 --- a/src/doc/3.11.7/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/tpstats.html b/src/doc/3.11.7/tools/nodetool/tpstats.html deleted file mode 100644 index 7e4256965..000000000 --- a/src/doc/3.11.7/tools/nodetool/tpstats.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/truncatehints.html b/src/doc/3.11.7/tools/nodetool/truncatehints.html deleted file mode 100644 index b8ceac820..000000000 --- a/src/doc/3.11.7/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/upgradesstables.html b/src/doc/3.11.7/tools/nodetool/upgradesstables.html deleted file mode 100644 index 7d7072bab..000000000 --- a/src/doc/3.11.7/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/verify.html b/src/doc/3.11.7/tools/nodetool/verify.html deleted file mode 100644 index f10069748..000000000 --- a/src/doc/3.11.7/tools/nodetool/verify.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/version.html b/src/doc/3.11.7/tools/nodetool/version.html deleted file mode 100644 index 031ff5e21..000000000 --- a/src/doc/3.11.7/tools/nodetool/version.html +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/tools/nodetool/viewbuildstatus.html b/src/doc/3.11.7/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 1c299fb41..000000000 --- a/src/doc/3.11.7/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/3.11.7/troubleshooting/index.html b/src/doc/3.11.7/troubleshooting/index.html deleted file mode 100644 index f9b762731..000000000 --- a/src/doc/3.11.7/troubleshooting/index.html +++ /dev/null @@ -1,100 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/.buildinfo b/src/doc/4.0-alpha1/.buildinfo deleted file mode 100644 index a4c4dab70..000000000 --- a/src/doc/4.0-alpha1/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 0f0bdbd91badf17e7d9d141e1a363890 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/4.0-alpha1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml b/src/doc/4.0-alpha1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml deleted file mode 100644 index fc5db0814..000000000 --- a/src/doc/4.0-alpha1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Keyspace Name -keyspace: stresscql - -# The CQL for creating a keyspace (optional if it already exists) -# Would almost always be network topology unless running something locall -keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -# Table name -table: blogposts - -# The CQL for creating a table you wish to stress (optional if it already exists) -table_definition: | - CREATE TABLE blogposts ( - domain text, - published_date timeuuid, - url text, - author text, - title text, - body text, - PRIMARY KEY(domain, published_date) - ) WITH CLUSTERING ORDER BY (published_date DESC) - AND compaction = { 'class':'LeveledCompactionStrategy' } - AND comment='A table to hold blog posts' - -### Column Distribution Specifications ### - -columnspec: - - name: domain - size: gaussian(5..100) #domain names are relatively short - population: uniform(1..10M) #10M possible domains to pick from - - - name: published_date - cluster: fixed(1000) #under each domain we will have max 1000 posts - - - name: url - size: uniform(30..300) - - - name: title #titles shouldn't go beyond 200 chars - size: gaussian(10..200) - - - name: author - size: uniform(5..20) #author names should be short - - - name: body - size: gaussian(100..5000) #the body of the blog post can be long - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # Our partition key is the domain so only insert one per batch - - select: fixed(1)/1000 # We have 1000 posts per domain so 1/1000 will allow 1 post per batch - - batchtype: UNLOGGED # Unlogged batches - - -# -# A list of queries you wish to run against the schema -# -queries: - singlepost: - cql: select * from blogposts where domain = ? LIMIT 1 - fields: samerow - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow diff --git a/src/doc/4.0-alpha1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml b/src/doc/4.0-alpha1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml deleted file mode 100644 index 17161af27..000000000 --- a/src/doc/4.0-alpha1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml +++ /dev/null @@ -1,44 +0,0 @@ -spacenam: example # idenitifier for this spec if running with multiple yaml files -keyspace: example - -# Would almost always be network topology unless running something locally -keyspace_definition: | - CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -table: staff_activities - -# The table under test. Start with a partition per staff member -# Is this a good idea? -table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when) - ) - -columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -insert: - # we only update a single partition in any given insert - partitions: fixed(1) - # we want to insert a single row per partition and we have between 20 and 500 - # rows per partition - select: fixed(1)/500 - batchtype: UNLOGGED # Single partition unlogged batches are essentially noops - -queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug0.png b/src/doc/4.0-alpha1/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug1.png b/src/doc/4.0-alpha1/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug2.png b/src/doc/4.0-alpha1/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug3.png b/src/doc/4.0-alpha1/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug4.png b/src/doc/4.0-alpha1/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug5.png b/src/doc/4.0-alpha1/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/eclipse_debug6.png b/src/doc/4.0-alpha1/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/4.0-alpha1/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_images/example-stress-graph.png b/src/doc/4.0-alpha1/_images/example-stress-graph.png deleted file mode 100644 index a65b08b16..000000000 Binary files a/src/doc/4.0-alpha1/_images/example-stress-graph.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_sources/architecture/dynamo.rst.txt b/src/doc/4.0-alpha1/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index 12c586e2c..000000000 --- a/src/doc/4.0-alpha1/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,164 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -.. _transient-replication: - -Transient Replication -~~~~~~~~~~~~~~~~~~~~~ - -Transient replication allows you to configure a subset of replicas to only replicate data that hasn't been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn't been incrementally repaired. - -To use transient replication, you first need to enable it in ``cassandra.yaml``. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as ``/ RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/4.0-alpha1/_sources/architecture/guarantees.rst.txt b/src/doc/4.0-alpha1/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/4.0-alpha1/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha1/_sources/architecture/index.rst.txt b/src/doc/4.0-alpha1/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/4.0-alpha1/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/4.0-alpha1/_sources/architecture/overview.rst.txt b/src/doc/4.0-alpha1/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/4.0-alpha1/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha1/_sources/architecture/storage_engine.rst.txt b/src/doc/4.0-alpha1/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index 23b738de7..000000000 --- a/src/doc/4.0-alpha1/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables. - -All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the "commitlog_segment_size_in_mb" option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running "nodetool drain" before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup. - -- ``commitlog_segment_size_in_mb``: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. - -***NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*** - -*Default Value:* 32 - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied. - -- ``commitlog_sync``: may be either “periodic” or “batch.” - - - ``batch``: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait "commitlog_sync_batch_window_in_ms" milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason. - - - ``commitlog_sync_batch_window_in_ms``: Time to wait between "batch" fsyncs - *Default Value:* 2 - - - ``periodic``: In periodic mode, writes are immediately ack'ed, and the CommitLog is simply synced every "commitlog_sync_period_in_ms" milliseconds. - - - ``commitlog_sync_period_in_ms``: Time to wait between "periodic" fsyncs - *Default Value:* 10000 - -*Default Value:* batch - -*** NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using "batch" mode, it is recommended to store commitlogs in a separate, dedicated device.** - - -- ``commitlog_directory``: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -- ``commitlog_compression``: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported. - -(Default Value: (complex option):: - - # - class_name: LZ4Compressor - # parameters: - # - - -- ``commitlog_total_space_in_mb``: Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume. - -*Default Value:* 8192 - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. - -SSTable Versions -^^^^^^^^^^^^^^^^ - -This section was created using the following -`gist `_ -which utilized this original -`source `_. - -The version numbers, to date are: - -Version 0 -~~~~~~~~~ - -* b (0.7.0): added version to sstable filenames -* c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings -* d (0.7.0): row size in data component becomes a long instead of int -* e (0.7.0): stores undecorated keys in data and index components -* f (0.7.0): switched bloom filter implementations in data component -* g (0.8): tracks flushed-at context in metadata component - -Version 1 -~~~~~~~~~ - -* h (1.0): tracks max client timestamp in metadata component -* hb (1.0.3): records compression ration in metadata component -* hc (1.0.4): records partitioner in metadata component -* hd (1.0.10): includes row tombstones in maxtimestamp -* he (1.1.3): includes ancestors generation in metadata component -* hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) -* ia (1.2.0): - - * column indexes are promoted to the index file - * records estimated histogram of deletion times in tombstones - * bloom filter (keys and columns) upgraded to Murmur3 -* ib (1.2.1): tracks min client timestamp in metadata component -* ic (1.2.5): omits per-row bloom filter of column names - -Version 2 -~~~~~~~~~ - -* ja (2.0.0): - - * super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format) - * tracks max local deletiontime in sstable metadata - * records bloom_filter_fp_chance in metadata component - * remove data size and column count from data file (CASSANDRA-4180) - * tracks max/min column values (according to comparator) -* jb (2.0.1): - - * switch from crc32 to adler32 for compression checksums - * checksum the compressed data -* ka (2.1.0): - - * new Statistics.db file format - * index summaries can be downsampled and the sampling level is persisted - * switch uncompressed checksums to adler32 - * tracks presense of legacy (local and remote) counter shards -* la (2.2.0): new file name format -* lb (2.2.7): commit log lower bound included - -Version 3 -~~~~~~~~~ - -* ma (3.0.0): - - * swap bf hash order - * store rows natively -* mb (3.0.7, 3.7): commit log lower bound included -* mc (3.0.8, 3.9): commit log intervals included - -Example Code -~~~~~~~~~~~~ - -The following example is useful for finding all sstables that do not match the "ib" SSTable version - -.. code-block:: bash - - find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots" diff --git a/src/doc/4.0-alpha1/_sources/bugs.rst.txt b/src/doc/4.0-alpha1/_sources/bugs.rst.txt deleted file mode 100644 index 32d676f9d..000000000 --- a/src/doc/4.0-alpha1/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs -============== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``cassandra`` :ref:`Slack room `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/4.0-alpha1/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/4.0-alpha1/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index fdf88d558..000000000 --- a/src/doc/4.0-alpha1/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,2048 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -*Default Value:* KEYSPACE - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``network_authorizer`` ----------------------- - -Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}. - -- AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization. -- CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllNetworkAuthorizer - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using. - -The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down the node and kill the JVM, so the node can be replaced. - -stop - shut down the node, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic", "group", or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed. - - -*Default Value:* 2 - -``commitlog_sync_group_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes. - - -*Default Value:* 1000 - -``commitlog_sync`` ------------------- - -the default option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``periodic_commitlog_sync_lag_block_in_ms`` -------------------------------------------- -*This option is commented out by default.* - -When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete. - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1:7000" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_space_in_mb`` ------------------------------- -*This option is commented out by default.* - -Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_frame_block_size_in_kb`` -------------------------------------------- -*This option is commented out by default.* - -If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed. - -*Default Value:* 32 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_allow_older_protocols`` ------------------------------------------- - -Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored. - -*Default Value:* true - -``native_transport_idle_timeout_in_ms`` ---------------------------------------- -*This option is commented out by default.* - -Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period. - -Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side. - -Idle connection timeouts are disabled by default. - -*Default Value:* 60000 - -``rpc_address`` ---------------- - -The address or interface to bind the native transport server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``concurrent_validations`` --------------------------- -*This option is commented out by default.* - -Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default) - -*Default Value:* 0 - -``concurrent_materialized_view_builders`` ------------------------------------------ - -Number of simultaneous materialized view builder tasks to allow. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_entire_sstables`` --------------------------- -*This option is commented out by default.* - -When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696. - -*Default Value:* true - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms. - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms. - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``internode_application_send_queue_capacity_in_bytes`` ------------------------------------------------------- -*This option is commented out by default.* - -Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details. - -The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000 - -The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000 - -The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000 - -Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received. - -The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth. - -The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster. - - -*Default Value:* 4194304 #4MiB - -``internode_application_send_queue_reserve_endpoint_capacity_in_bytes`` ------------------------------------------------------------------------ -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_send_queue_reserve_global_capacity_in_bytes`` ---------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``internode_application_receive_queue_capacity_in_bytes`` ---------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 4194304 #4MiB - -``internode_application_receive_queue_reserve_endpoint_capacity_in_bytes`` --------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_receive_queue_reserve_global_capacity_in_bytes`` ------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``slow_query_log_timeout_in_ms`` --------------------------------- - - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``streaming_connections_per_host`` ----------------------------------- -*This option is commented out by default.* - -Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files). - -*Default Value:* 1 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html - -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - # set to true for allowing secure incoming connections - enabled: false - # If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port - optional: false - # if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used - # during upgrade to 4.0; otherwise, set to false. - enable_legacy_ssl_storage_port: false - # on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true. - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client-to-server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``gc_warn_threshold_in_ms`` ---------------------------- -*This option is commented out by default.* - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature. - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``ideal_consistency_level`` ---------------------------- -*This option is commented out by default.* - -Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability. - -*Default Value:* EACH_QUORUM - -``full_query_log_dir`` ----------------------- -*This option is commented out by default.* - -Path to write full query log data to when the full query log is enabled -The full query log will recrusively delete the contents of this path at -times. Don't place links in this directory to other parts of the filesystem. - -*Default Value:* /tmp/cassandrafullquerylog - -``automatic_sstable_upgrade`` ------------------------------ -*This option is commented out by default.* - -Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version - -*Default Value:* false - -``max_concurrent_automatic_sstable_upgrades`` ---------------------------------------------- -*This option is commented out by default.* -Limit the number of concurrent sstable upgrades - -*Default Value:* 1 - -``audit_logging_options`` -------------------------- - -Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options. - -``full_query_logging_options`` ------------------------------- -*This option is commented out by default.* - - -default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog - -``corrupted_tombstone_strategy`` --------------------------------- -*This option is commented out by default.* - -validate tombstones on reads and compaction -can be either "disabled", "warn" or "exception" - -*Default Value:* disabled - -``diagnostic_events_enabled`` ------------------------------ - -Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX. - -*Default Value:* false - -``native_transport_flush_in_batches_legacy`` --------------------------------------------- -*This option is commented out by default.* - -Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating. - -*Default Value:* false - -``repaired_data_tracking_for_range_reads_enabled`` --------------------------------------------------- - -Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads - -*Default Value:* false - -``repaired_data_tracking_for_partition_reads_enabled`` ------------------------------------------------------- - -*Default Value:* false - -``report_unconfirmed_repaired_data_mismatches`` ------------------------------------------------ -If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_transient_replication`` --------------------------------- - -Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use. - -*Default Value:* false diff --git a/src/doc/4.0-alpha1/_sources/configuration/index.rst.txt b/src/doc/4.0-alpha1/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/4.0-alpha1/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/4.0-alpha1/_sources/contactus.rst.txt b/src/doc/4.0-alpha1/_sources/contactus.rst.txt deleted file mode 100644 index 3ed9004dd..000000000 --- a/src/doc/4.0-alpha1/_sources/contactus.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or :ref:`Slack rooms `. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _slack: - -Slack ------ -To chat with developers or users in real-time, join our rooms on `ASF Slack `__: - -- ``cassandra`` - for user questions and general discussions. -- ``cassandra-dev`` - strictly for questions or discussions related to Cassandra development. - diff --git a/src/doc/4.0-alpha1/_sources/cql/appendices.rst.txt b/src/doc/4.0-alpha1/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/4.0-alpha1/_sources/cql/changes.rst.txt b/src/doc/4.0-alpha1/_sources/cql/changes.rst.txt deleted file mode 100644 index 6691f156a..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,211 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.5 -^^^^^ - -- Adds support for arithmetic operators (:jira:`11935`) -- Adds support for ``+`` and ``-`` operations on dates (:jira:`11936`) -- Adds ``currentTimestamp``, ``currentDate``, ``currentTime`` and ``currentTimeUUID`` functions (:jira:`13132`) - - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/4.0-alpha1/_sources/cql/ddl.rst.txt b/src/doc/4.0-alpha1/_sources/cql/ddl.rst.txt deleted file mode 100644 index afb130e48..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,788 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -``SimpleStrategy`` -"""""""""""""""""" - -A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -``NetworkTopologyStrategy``. ``SimpleStrategy`` supports a single mandatory argument: - -========================= ====== ======= ============================================= -sub-option type since description -========================= ====== ======= ============================================= -``'replication_factor'`` int all The number of replicas to store per range -========================= ====== ======= ============================================= - -``NetworkTopologyStrategy`` -""""""""""""""""""""""""""" - -A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options: - -===================================== ====== ====== ============================================= -sub-option type since description -===================================== ====== ====== ============================================= -``''`` int all The number of replicas to store per range in - the provided datacenter. -``'replication_factor'`` int 4.0 The number of replicas to use as a default - per datacenter if not specifically provided. - Note that this always defers to existing - definitions or explicit datacenter settings. - For example, to have three replicas per - datacenter, supply this with a value of 3. -===================================== ====== ====== ============================================= - -Note that when ``ALTER`` ing keyspaces and supplying ``replication_factor``, -auto-expansion will only *add* new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying ``replication_factor``, -explicitly zero out the datacenter you want to have zero replicas. - -An example of auto-expanding datacenters with two datacenters: ``DC1`` and ``DC2``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true; - - -An example of auto-expanding and overriding a datacenter:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true; - -An example that excludes a datacenter while using ``replication_factor``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ; - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true; - -If :ref:`transient replication ` has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format ``'/'`` - -For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:: - - CREATE KEYSPACE some_keysopace - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'}; - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records'; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, b, c) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``speculative_retry`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``additional_write_policy`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``memtable_flush_period_in_ms``| *simple* | 0 | Time (in ms) before Cassandra flushes memtables to disk. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair`` | *simple* | BLOCKING | Sets read repair behavior (see below) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _speculative-retry-options: - -Speculative retry options -######################### - -By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ``ONE``, a quorum for ``QUORUM``, and so on. -``speculative_retry`` determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. ``additional_write_policy`` specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive): - -============================ ======================== ============================================================================= - Format Example Description -============================ ======================== ============================================================================= - ``XPERCENTILE`` 90.5PERCENTILE Coordinators record average per-table response times for all replicas. - If a replica takes longer than ``X`` percent of this table's average - response time, the coordinator queries an additional replica. - ``X`` must be between 0 and 100. - ``XP`` 90.5P Synonym for ``XPERCENTILE`` - ``Yms`` 25ms If a replica takes more than ``Y`` milliseconds to respond, - the coordinator queries an additional replica. - ``MIN(XPERCENTILE,YMS)`` MIN(99PERCENTILE,35MS) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is lower at the time of calculation. - Parameters are ``XPERCENTILE``, ``XP``, or ``Yms``. - This is helpful to help protect against a single slow instance; in the - happy case the 99th percentile is normally lower than the specified - fixed value however, a slow host may skew the percentile very high - meaning the slower the cluster gets, the higher the value of the percentile, - and the higher the calculated time used to determine if we should - speculate or not. This allows us to set an upper limit that we want to - speculate at, but avoid skewing the tail latencies by speculating at the - lower value when the percentile is less than the specified fixed upper bound. - ``MAX(XPERCENTILE,YMS)`` MAX(90.5P,25ms) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is higher at the time of calculation. - ``ALWAYS`` Coordinators always query all replicas. - ``NEVER`` Coordinators never query additional replicas. -============================ =================== ============================================================================= - -This setting does not affect reads with consistency level ``ALL`` because they already query all replicas. - -Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default ``99PERCENTILE``. - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Read Repair options -################### - -The ``read_repair`` options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back - in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of - replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. - Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement - is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it - is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a - batch, but then select a single row by specifying the clustering column in a SELECT statement. - -The available read repair settings are: - -Blocking -```````` -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity - -None -```` - -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table'; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/4.0-alpha1/_sources/cql/definitions.rst.txt b/src/doc/4.0-alpha1/_sources/cql/definitions.rst.txt deleted file mode 100644 index 3df6f2099..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,234 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `arithmetic_operation` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - arithmetic_operation: '-' `term` | `term` ('+' | '-' | '*' | '/' | '%') `term` - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- An arithmetic operation between terms. see :ref:`the section on arithmetic operations ` -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/4.0-alpha1/_sources/cql/dml.rst.txt b/src/doc/4.0-alpha1/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/4.0-alpha1/_sources/cql/functions.rst.txt b/src/doc/4.0-alpha1/_sources/cql/functions.rst.txt deleted file mode 100644 index 965125a79..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,581 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``currentTimeUUID`` is an alias of ``now``. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Datetime functions -`````````````````` - -Retrieving the current date/time -################################ - -The following functions can be used to retrieve the date/time at the time where the function is invoked: - -===================== =============== - Function name Output type -===================== =============== - ``currentTimestamp`` ``timestamp`` - ``currentDate`` ``date`` - ``currentTime`` ``time`` - ``currentTimeUUID`` ``timeUUID`` -===================== =============== - -For example the last 2 days of data can be retrieved using:: - - SELECT * FROM myTable WHERE date >= currentDate() - 2d - -Time conversion functions -######################### - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/4.0-alpha1/_sources/cql/index.rst.txt b/src/doc/4.0-alpha1/_sources/cql/index.rst.txt deleted file mode 100644 index b4c21cf6c..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - operators - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/4.0-alpha1/_sources/cql/indexes.rst.txt b/src/doc/4.0-alpha1/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/4.0-alpha1/_sources/cql/json.rst.txt b/src/doc/4.0-alpha1/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/4.0-alpha1/_sources/cql/mvs.rst.txt b/src/doc/4.0-alpha1/_sources/cql/mvs.rst.txt deleted file mode 100644 index 200090a60..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. note:: By default, materialized views are built in a single thread. The initial build can be parallelized by - increasing the number of threads specified by the property ``concurrent_materialized_view_builders`` in - ``cassandra.yaml``. This property can also be manipulated at runtime through both JMX and the - ``setconcurrentviewbuilders`` and ``getconcurrentviewbuilders`` nodetool commands. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. - -MV Limitations -``````````````` - -.. Note:: Removal of columns not selected in the Materialized View (via ``UPDATE base SET unselected_column = null`` or - ``DELETE unselected_column FROM base``) may shadow missed updates to other columns received by hints or repair. - For this reason, we advise against doing deletions on base columns not selected in views until this is - fixed on CASSANDRA-13826. diff --git a/src/doc/4.0-alpha1/_sources/cql/operators.rst.txt b/src/doc/4.0-alpha1/_sources/cql/operators.rst.txt deleted file mode 100644 index 1faf0d045..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/operators.rst.txt +++ /dev/null @@ -1,74 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _arithmetic_operators: - -Arithmetic Operators --------------------- - -CQL supports the following operators: - -=============== ======================================================================================================= - Operator Description -=============== ======================================================================================================= - \- (unary) Negates operand - \+ Addition - \- Substraction - \* Multiplication - / Division - % Returns the remainder of a division -=============== ======================================================================================================= - -.. _number-arithmetic: - -Number Arithmetic -^^^^^^^^^^^^^^^^^ - -All arithmetic operations are supported on numeric types or counters. - -The return type of the operation will be based on the operand types: - -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - left/right tinyint smallint int bigint counter float double varint decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - **tinyint** tinyint smallint int bigint bigint float double varint decimal - **smallint** smallint smallint int bigint bigint float double varint decimal - **int** int int int bigint bigint float double varint decimal - **bigint** bigint bigint bigint bigint bigint double double varint decimal - **counter** bigint bigint bigint bigint bigint double double varint decimal - **float** float float float double double float double decimal decimal - **double** double double double double double double double decimal decimal - **varint** varint varint varint decimal decimal decimal decimal decimal decimal - **decimal** decimal decimal decimal decimal decimal decimal decimal decimal decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - -``*``, ``/`` and ``%`` operators have a higher precedence level than ``+`` and ``-`` operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression. - -.. _datetime--arithmetic: - -Datetime Arithmetic -^^^^^^^^^^^^^^^^^^^ - -A ``duration`` can be added (+) or substracted (-) from a ``timestamp`` or a ``date`` to create a new -``timestamp`` or ``date``. So for instance:: - - SELECT * FROM myTable WHERE t = '2017-01-01' - 2d - -will select all the records with a value of ``t`` which is in the last 2 days of 2016. diff --git a/src/doc/4.0-alpha1/_sources/cql/security.rst.txt b/src/doc/4.0-alpha1/_sources/cql/security.rst.txt deleted file mode 100644 index 429a1ef0d..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/security.rst.txt +++ /dev/null @@ -1,538 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - :| ACCESS TO DATACENTERS `set_literal` - :| ACCESS TO ALL DATACENTERS - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'}; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ``ACCESS TO ALL DATACENTERS`` can be used for -explicitness, but there's no functional difference. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ``ACCESS TO ALL DATACENTERS`` clause. - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. note:: DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain - connected and will retain the ability to perform any database actions which do not require :ref:`authorization`. - However, if authorization is enabled, :ref:`permissions` of the dropped role are also revoked, - subject to the :ref:`caching options` configured in :ref:`cassandra.yaml`. - Should a dropped role be subsequently recreated and have new :ref:`permissions` or - :ref:`roles` granted to it, any client sessions still connected will acquire the newly granted - permissions and roles. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -Because of their function in normal driver operations, certain tables cannot have their `SELECT` permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:: - -* `system_schema.keyspaces` -* `system_schema.columns` -* `system_schema.tables` -* `system.local` -* `system.peers` - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/4.0-alpha1/_sources/cql/triggers.rst.txt b/src/doc/4.0-alpha1/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/4.0-alpha1/_sources/cql/types.rst.txt b/src/doc/4.0-alpha1/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/4.0-alpha1/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/4.0-alpha1/_sources/data_modeling/index.rst.txt b/src/doc/4.0-alpha1/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/4.0-alpha1/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/4.0-alpha1/_sources/development/ci.rst.txt b/src/doc/4.0-alpha1/_sources/development/ci.rst.txt deleted file mode 100644 index 192b18862..000000000 --- a/src/doc/4.0-alpha1/_sources/development/ci.rst.txt +++ /dev/null @@ -1,72 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Jenkins CI Environment -********************** - -About CI testing and Apache Cassandra -===================================== - -Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the `dtest `_ scripts written in Python. As outlined in :doc:`testing`, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at `builds.apache.org `_, running `Jenkins `_. - - - -Setting up your own Jenkins server -================================== - -Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution. - -Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment. - -Required plugins ----------------- - -The following plugins need to be installed additionally to the standard plugins (git, ant, ..). - -You can install any missing plugins through the install manager. - -Go to ``Manage Jenkins -> Manage Plugins -> Available`` and install the following plugins and respective dependencies: - -* Job DSL -* Javadoc Plugin -* description setter plugin -* Throttle Concurrent Builds Plug-in -* Test stability history -* Hudson Post build task - - -Setup seed job --------------- - -Config ``New Item`` - -* Name it ``Cassandra-Job-DSL`` -* Select ``Freestyle project`` - -Under ``Source Code Management`` select Git using the repository: ``https://github.com/apache/cassandra-builds`` - -Under ``Build``, confirm ``Add build step`` -> ``Process Job DSLs`` and enter at ``Look on Filesystem``: ``jenkins-dsl/cassandra_job_dsl_seed.groovy`` - -Generated jobs will be created based on the Groovy script's default settings. You may want to override settings by checking ``This project is parameterized`` and add ``String Parameter`` for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches). - -**When done, confirm "Save"** - -You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message `"Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use"`. Goto ``Manage Jenkins`` -> ``In-process Script Approval`` to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates. - -Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label "cassandra", once the job is to be run. Please make sure to make any executors available by selecting ``Build Executor Status`` -> ``Configure`` -> Add "``cassandra``" as label and save. - - - diff --git a/src/doc/4.0-alpha1/_sources/development/code_style.rst.txt b/src/doc/4.0-alpha1/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/4.0-alpha1/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/4.0-alpha1/_sources/development/dependencies.rst.txt b/src/doc/4.0-alpha1/_sources/development/dependencies.rst.txt deleted file mode 100644 index 7d230d3ae..000000000 --- a/src/doc/4.0-alpha1/_sources/development/dependencies.rst.txt +++ /dev/null @@ -1,54 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dependency Management -********************* - -Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the :doc:`ci` and reported related issues on Jira/ML, in case of any project dependency changes. - -As Cassandra is an Apache product, all included libraries must follow Apache's `software license requirements `_. - -Required steps to add or update libraries -========================================= - -* Add or replace jar file in ``lib`` directory -* Add or update ``lib/license`` files -* Update dependencies in ``build.xml`` - - * Add to ``parent-pom`` with correct version - * Add to ``all-pom`` if simple Cassandra dependency (see below) - - -POM file types -============== - -* **parent-pom** - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here. -* **build-deps-pom(-sources)** + **coverage-deps-pom** - used by ``ant build`` compile target. Listed dependenices will be resolved and copied to ``build/lib/{jar,sources}`` by executing the ``maven-ant-tasks-retrieve-build`` target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution. -* **test-deps-pom** - refered by ``maven-ant-tasks-retrieve-test`` to retrieve and save dependencies to ``build/test/lib``. Exclusively used during JUnit test execution. -* **all-pom** - pom for `cassandra-all.jar `_ that can be installed or deployed to public maven repos via ``ant publish`` -* **dist-pom** - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ``ant artifacts``. Should be left as is, but needed for installing or deploying releases. - - -Troubleshooting and conflict resolution -======================================= - -Here are some useful commands that may help you out resolving conflicts. - -* ``ant realclean`` - gets rid of the build directory, including build artifacts. -* ``mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j`` - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ``ant mvn-install``. -* ``rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/`` - removes cached local Cassandra maven artifacts - - diff --git a/src/doc/4.0-alpha1/_sources/development/documentation.rst.txt b/src/doc/4.0-alpha1/_sources/development/documentation.rst.txt deleted file mode 100644 index 8b7cd4e4e..000000000 --- a/src/doc/4.0-alpha1/_sources/development/documentation.rst.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -Working on Documentation -************************* - -How Cassandra is documented -=========================== - -The official Cassandra documentation lives in the project's git repository. We use a static site generator, `Sphinx `_, to create pages hosted at `cassandra.apache.org `_. You'll also find developer centric content about Cassandra internals in our retired `wiki `_ (not covered by this guide). - -Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses `reStructuredText `_ for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at `existing documents <..>`_ to get a better idea how we use reStructuredText to write our documents. - -So how do you actually start making contributions? - -GitHub based work flow -====================== - -*Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)* - -Follow these steps to contribute using GitHub. It's assumed that you're logged in with an existing account. - -1. Fork the GitHub mirror of the `Cassandra repository `_ - -.. image:: images/docs_fork.png - -2. Create a new branch that you can use to make your edits. It's recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work. - -.. image:: images/docs_create_branch.png - -3. Navigate to document sources ``doc/source`` to find the ``.rst`` file to edit. The URL of the document should correspond to the directory structure. New files can be created using the "Create new file" button: - -.. image:: images/docs_create_file.png - -4. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing ``.rst`` files to get a better idea what format elements to use. - -.. image:: images/docs_editor.png - -Make sure to preview added content before committing any changes. - -.. image:: images/docs_preview.png - -5. Commit your work when you're done. Make sure to add a short description of all your edits since the last time you committed before. - -.. image:: images/docs_commit.png - -6. Finally if you decide that you're done working on your branch, it's time to create a pull request! - -.. image:: images/docs_pr.png - -Afterwards the GitHub Cassandra mirror will list your pull request and you're done. Congratulations! Please give us some time to look at your suggested changes before we get back to you. - - -Jira based work flow -==================== - -*Recommended for major changes* - -Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same `contribution guides `_ as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed. - -Working on documents locally using Sphinx -========================================= - -*Recommended for advanced editing* - -Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at ``doc/README.md``. Setup is very easy (at least on OSX and Linux). - -Notes for committers -==================== - -Please feel free to get involved and merge pull requests created on the GitHub mirror if you're a committer. As this is a read-only repository, you won't be able to merge a PR directly on GitHub. You'll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub. - -You may use a git work flow like this:: - - git remote add github https://github.com/apache/cassandra.git - git fetch github pull//head: - git checkout - -Now either rebase or squash the commit, e.g. for squashing:: - - git reset --soft origin/trunk - git commit --author - -Make sure to add a proper commit message including a "Closes #" text to automatically close the PR. - -Publishing ----------- - -Details for building and publishing of the site at cassandra.apache.org can be found `here `_. - diff --git a/src/doc/4.0-alpha1/_sources/development/gettingstarted.rst.txt b/src/doc/4.0-alpha1/_sources/development/gettingstarted.rst.txt deleted file mode 100644 index c2f5ef36e..000000000 --- a/src/doc/4.0-alpha1/_sources/development/gettingstarted.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _gettingstarted: - -Getting Started -************************* - -Initial Contributions -======================== - -Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we'd suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work. - * Add to or update the documentation - * Answer questions on the user list - * Review and test a submitted patch - * Investigate and fix a reported bug - * Create unit tests and d-tests - -Updating documentation -======================== - -The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (:ref:`patches`). - -Answering questions on the user list -==================================== - -Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the `community `_ page for details on how to subscribe to the mailing list. - -Reviewing and testing a submitted patch -======================================= - -Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in :ref:`_development_how_to_review` or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, "I tested this performance enhacement on our application's standard production load test and found a 3% improvement.") - -Investigate and/or fix a reported bug -===================================== - -Often, the hardest work in fixing a bug is reproducing it. Even if you don't have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (:ref:`patches`). - -Create unit tests and Dtests -============================ - -Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See :ref:`testing` and :ref:`patches` for more detail. - - - diff --git a/src/doc/4.0-alpha1/_sources/development/how_to_commit.rst.txt b/src/doc/4.0-alpha1/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index dff39832d..000000000 --- a/src/doc/4.0-alpha1/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``-atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/4.0-alpha1/_sources/development/how_to_review.rst.txt b/src/doc/4.0-alpha1/_sources/development/how_to_review.rst.txt deleted file mode 100644 index 4778b6946..000000000 --- a/src/doc/4.0-alpha1/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _how_to_review: - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/4.0-alpha1/_sources/development/ide.rst.txt b/src/doc/4.0-alpha1/_sources/development/ide.rst.txt deleted file mode 100644 index 97c73ae61..000000000 --- a/src/doc/4.0-alpha1/_sources/development/ide.rst.txt +++ /dev/null @@ -1,185 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -| - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -| - -Opening Cassandra in Apache NetBeans -======================================= - -`Apache NetBeans `_ is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans. - -Open Cassandra as a Project (C* 4.0 and newer) ------------------------------------------------ - -Please clone and build Cassandra as described above and execute the following steps: - -1. Start Apache NetBeans - -2. Open the NetBeans project from the `ide/` folder of the checked out Cassandra directory using the menu item "Open Project…" in NetBeans' File menu - -The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant `build.xml` script. - - * Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu. - * Profile Project is available via the Profile menu. In the opened Profiler tab, click the green "Profile" button. - * Cassandra's code style is honored in `ide/nbproject/project.properties` - -The `JAVA8_HOME` system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute. - -| - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/4.0-alpha1/_sources/development/index.rst.txt b/src/doc/4.0-alpha1/_sources/development/index.rst.txt deleted file mode 100644 index ffa7134dd..000000000 --- a/src/doc/4.0-alpha1/_sources/development/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contributing to Cassandra -************************* - -.. toctree:: - :maxdepth: 2 - - gettingstarted - ide - testing - patches - code_style - how_to_review - how_to_commit - documentation - ci - dependencies - release_process diff --git a/src/doc/4.0-alpha1/_sources/development/patches.rst.txt b/src/doc/4.0-alpha1/_sources/development/patches.rst.txt deleted file mode 100644 index f3a2cca0f..000000000 --- a/src/doc/4.0-alpha1/_sources/development/patches.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _patches: - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue marked as `Low Hanging Fruit `_ Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or :ref:`Slack `. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -4.0 Code freeze (see below) -3.11 Critical bug fixes only -3.0 Critical bug fixes only -2.2 Critical bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -4.0 Code Freeze -""""""""""""""" - -Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance: - - * Bug fixes - * Measurable performance improvements - * Changes not distributed as part of the release such as: - * Testing related improvements and fixes - * Build and infrastructure related changes - * Documentation - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. - - :: - - - - patch by ; reviewed by for CASSANDRA-##### - - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/4.0-alpha1/_sources/development/release_process.rst.txt b/src/doc/4.0-alpha1/_sources/development/release_process.rst.txt deleted file mode 100644 index b3c403215..000000000 --- a/src/doc/4.0-alpha1/_sources/development/release_process.rst.txt +++ /dev/null @@ -1,259 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. release_process: - -Release Process -*************** - -.. contents:: :depth: 3 - -|  -| - -.. attention:: - - WORK IN PROGRESS - * A number of these steps still have been finalised/tested. - * The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org - - -The steps for Release Managers to create, vote and publish releases for Apache Cassandra. - -While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release. - - -Prerequisites -============= - -Background docs - * `ASF Release Policy `_ - * `ASF Release Distribution Policy `_ - * `ASF Release Best Practices `_ - - -A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools. - - -Create and publish your GPG key -------------------------------- - -To create a GPG key, follow the `guidelines `_. -Include your public key in:: - - https://dist.apache.org/repos/dist/release/cassandra/KEYS - - -Publish your GPG key in a PGP key server, such as `MIT Keyserver `_. - - -Create Release Artifacts -======================== - -Any committer can perform the following steps to create and call a vote on a proposed release. - -Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there's security vulnerabilities currently being worked on in private. - -Perform the Release -------------------- - -Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:: - - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh` - - # After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout, - # on the branch/commit that we wish to tag for the tentative release along with version number to tag. - # For example here might be `3.11` and `3.11.3` - cd ~/git/cassandra/ - git checkout cassandra- - ../cassandra-builds/cassandra-release/prepare_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Call for a Vote". - -The ``prepare_release.sh`` script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:: - - cd ~/git/cassandra-build - docker build -f docker/centos7-image.docker docker/ - docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh -tentative - rpmsign --addsign dist/*.rpm - -For more information on the above steps see the `cassandra-builds documentation `_. -The next step is to copy and commit these binaries to staging svnpubsub:: - - # FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org - cd ~/git - svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev - mkdir cassandra-dist-dev/ - cp cassandra-build/dist/*.rpm cassandra-dist-dev// - - svn add cassandra-dist-dev/ - svn ci cassandra-dist-dev/ - - -Call for a Vote -=============== - -Fill out the following email template and send to the dev mailing list:: - - I propose the following artifacts for release as . - - sha1: - - Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative - - Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// - - Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ - - The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ - - The vote will be open for 72 hours (longer if needed). - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative - - - -Post-vote operations -==================== - -Any PMC can perform the following steps to formalize and publish a successfully voted release. - -Publish Artifacts ------------------ - -Run the following commands to publish the voted release artifacts:: - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # edit the variables at the top of `finish_release.sh` - - # After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, - # on the tentative release tag that we wish to tag for the final release version number tag. - cd ~/git/cassandra/ - git checkout -tentative - ../cassandra-builds/cassandra-release/finish_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Send Release Announcement". -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout. - - -Promote Nexus Repository ------------------------- - - * Login to `Nexus repository `_ again. - * Click on "Staging" and then on the repository with id "cassandra-staging". - * Find your closed staging repository, right click on it and choose "Promote". - * Select the "Releases" repository and click "Promote". - * Next click on "Repositories", select the "Releases" repository and validate that your artifacts exist as you expect them. - -Sign and Upload Distribution Packages to Bintray ---------------------------------------- - -Run the following command:: - - cd ~/git - # FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org - svn mv https://dist.apache.org/repos/dist/dev/cassandra/ https://dist.apache.org/repos/dist/release/cassandra/ - - # Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on - svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat - cd cassandra-dist-redhat/x/ - createrepo . - gpg --detach-sign --armor repodata/repomd.xml - for f in `find repodata/ -name *.bz2`; do - gpg --detach-sign --armor $f; - done - - svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist- - cd cassandra-dist- - cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist- - - -Update and Publish Website --------------------------- - -See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate. - -Release version in JIRA ------------------------ - -Release the JIRA version. - - * In JIRA go to the version that you want to release and release it. - * Create a new version, if it has not been done before. - -Update to Next Development Version ----------------------------------- - -Edit and commit ``build.xml`` so the base.version property points to the next version. - -Wait for Artifacts to Sync --------------------------- - -Wait for the artifacts to sync at http://www.apache.org/dist/cassandra/ - -Send Release Announcement -------------------------- - -Fill out the following email template and send to both user and dev mailing lists:: - - The Cassandra team is pleased to announce the release of Apache Cassandra version . - - Apache Cassandra is a fully distributed database. It is the right choice - when you need scalability and high availability without compromising - performance. - - http://cassandra.apache.org/ - - Downloads of source and binary distributions are listed in our download - section: - - http://cassandra.apache.org/download/ - - This version is release[1] on the series. As always, - please pay attention to the release notes[2] and let us know[3] if you - were to encounter any problem. - - Enjoy! - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= - [3]: https://issues.apache.org/jira/browse/CASSANDRA - -Update Slack Cassandra topic ---------------------------- - -Update topic in ``cassandra`` :ref:`Slack room ` - /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don't ask to ask - -Tweet from @Cassandra ---------------------- - -Tweet the new release, from the @Cassandra account - -Delete Old Releases -------------------- - -As described in `When to Archive `_. -Also check people.apache.org as previous release scripts used it. diff --git a/src/doc/4.0-alpha1/_sources/development/testing.rst.txt b/src/doc/4.0-alpha1/_sources/development/testing.rst.txt deleted file mode 100644 index 7f38fe590..000000000 --- a/src/doc/4.0-alpha1/_sources/development/testing.rst.txt +++ /dev/null @@ -1,98 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _testing: - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -If you see an error like this:: - - Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: - org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader - AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] - -You will need to install the ant-optional package since it contains the ``JUnitTask`` class. - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -See :ref:`cassandra_stress` - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/4.0-alpha1/_sources/faq/index.rst.txt b/src/doc/4.0-alpha1/_sources/faq/index.rst.txt deleted file mode 100644 index acb7538d6..000000000 --- a/src/doc/4.0-alpha1/_sources/faq/index.rst.txt +++ /dev/null @@ -1,299 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair -full`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. Note that you will need to run a full repair (``-full``) to make sure that already repaired - sstables are not skipped. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/4.0-alpha1/_sources/getting_started/configuring.rst.txt b/src/doc/4.0-alpha1/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index e71eeedbe..000000000 --- a/src/doc/4.0-alpha1/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the default configuration file present at ``./conf/cassandra.yaml`` is enough, -you shouldn't need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/4.0-alpha1/_sources/getting_started/drivers.rst.txt b/src/doc/4.0-alpha1/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index 9a2c1567a..000000000 --- a/src/doc/4.0-alpha1/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ - -Perl -^^^^ - -- `Cassandra::Client and DBD::Cassandra `__ - -Elixir -^^^^^^ - -- `Xandra `__ -- `CQEx `__ - -Dart -^^^^ - -- `dart_cassandra_cql `__ diff --git a/src/doc/4.0-alpha1/_sources/getting_started/index.rst.txt b/src/doc/4.0-alpha1/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/4.0-alpha1/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/4.0-alpha1/_sources/getting_started/installing.rst.txt b/src/doc/4.0-alpha1/_sources/getting_started/installing.rst.txt deleted file mode 100644 index fb8a0463f..000000000 --- a/src/doc/4.0-alpha1/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xzvf apache-cassandra-3.6-bin.tar.gz - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/4.0-alpha1/_sources/getting_started/querying.rst.txt b/src/doc/4.0-alpha1/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/4.0-alpha1/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/4.0-alpha1/_sources/index.rst.txt b/src/doc/4.0-alpha1/_sources/index.rst.txt deleted file mode 100644 index 9f8016b9b..000000000 --- a/src/doc/4.0-alpha1/_sources/index.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - plugins/index - - bugs - contactus diff --git a/src/doc/4.0-alpha1/_sources/operating/audit_logging.rst.txt b/src/doc/4.0-alpha1/_sources/operating/audit_logging.rst.txt deleted file mode 100644 index 068209ee8..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/audit_logging.rst.txt +++ /dev/null @@ -1,236 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - - - -Audit Logging ------------------- - -Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml. - -- ``BinAuditLogger`` An efficient way to log events to file in a binary format. -- ``FileAuditLogger`` Logs events to ``audit/audit.log`` file using slf4j logger. - -*Recommendation* ``BinAuditLogger`` is a community recommended logger considering the performance - -What does it capture -^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging captures following events - -- Successful as well as unsuccessful login attempts. - -- All database commands executed via Native protocol (CQL) attempted or successfully executed. - -Limitations -^^^^^^^^^^^ - -Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log. - -What does it log -^^^^^^^^^^^^^^^^^^^ -Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with `|` s to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - -How to configure -^^^^^^^^^^^^^^^^^^ -Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -cassandra.yaml configurations for AuditLog -""""""""""""""""""""""""""""""""""""""""""""" - - ``enabled``: This option enables/ disables audit log - - ``logger``: Class name of the logger/ custom logger. - - ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ - - ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces - - ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except `system`, `system_schema` and `system_virtual_schema` - - ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories - - ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category - - ``included_users``: Comma separated list of users to be included in audit log, default - includes all users - - ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -NodeTool command to enable AuditLog -""""""""""""""""""""""""""""""""""""" -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - -:: - - nodetool enableauditlog - -Options -********** - - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used. - Please remeber that `system`, `system_schema` and `system_virtual_schema` are excluded by default, - if you are overwriting this option via nodetool, - remember to add these keyspaces back if you dont want them in audit logs - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -NodeTool command to disable AuditLog -""""""""""""""""""""""""""""""""""""""" - -``disableauditlog``: Disables AuditLog. - -:: - - nodetool disableuditlog - - - - - - - -NodeTool command to reload AuditLog filters -""""""""""""""""""""""""""""""""""""""""""""" - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - -E.g., - -:: - - nodetool enableauditlog --loggername --included-keyspaces - - - -View the contents of AuditLog Files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``auditlogviewer`` is the new tool introduced to help view the contents of binlog file in human readable text format. - -:: - - auditlogviewer [...] [options] - -Options -"""""""" - -``-f,--follow`` - Upon reacahing the end of the log continue indefinitely - waiting for more records -``-r,--roll_cycle`` - How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -``-h,--help`` - display this help message - -For example, to dump the contents of audit log files on the console - -:: - - auditlogviewer /logs/cassandra/audit - -Sample output -""""""""""""" - -:: - - LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1" - - - -Configuring BinAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``BinAuditLogger`` as a logger in AuditLogging, set the logger to ``BinAuditLogger`` in cassandra.yaml under ``audit_logging_options`` section. ``BinAuditLogger`` can be futher configued using its advanced options in cassandra.yaml. - - -Adcanced Options for BinAuditLogger -"""""""""""""""""""""""""""""""""""""" - -``block`` - Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to ``true`` so that AuditLog records wont be lost - -``max_queue_weight`` - Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to ``256 * 1024 * 1024`` - -``max_log_size`` - Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to ``16L * 1024L * 1024L * 1024L`` - -``roll_cycle`` - How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to ``"HOURLY"`` - -Configuring FileAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``FileAuditLogger`` as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log - - -.. code-block:: xml - - - - ${cassandra.logdir}/audit/audit.log - - - ${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip - - 50MB - 30 - 5GB - - - %-5level [%thread] %date{ISO8601} %F:%L - %msg%n - - - - - - - diff --git a/src/doc/4.0-alpha1/_sources/operating/backups.rst.txt b/src/doc/4.0-alpha1/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/4.0-alpha1/_sources/operating/bloom_filters.rst.txt b/src/doc/4.0-alpha1/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/4.0-alpha1/_sources/operating/bulk_loading.rst.txt b/src/doc/4.0-alpha1/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/4.0-alpha1/_sources/operating/cdc.rst.txt b/src/doc/4.0-alpha1/_sources/operating/cdc.rst.txt deleted file mode 100644 index a7177b544..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,96 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property ``cdc=true`` (either when :ref:`creating the table ` or -:ref:`altering it `). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in ``cassandra.yaml``. On segment fsync to disk, if CDC data is present anywhere in the segment a -_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word "COMPLETED" will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file. - -We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable. - -A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disabling CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -Use a `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -If CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `JIRA ticket `__ -- `JIRA ticket `__ diff --git a/src/doc/4.0-alpha1/_sources/operating/compaction.rst.txt b/src/doc/4.0-alpha1/_sources/operating/compaction.rst.txt deleted file mode 100644 index ace9aa9e4..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,447 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). With ``TimeWindowCompactionStrategy`` -it is possible to remove the guarantee (not check for shadowing data) by enabling ``unsafe_aggressive_sstable_expiration``. - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. -``unsafe_aggressive_sstable_expiration`` (default: false) - Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially - risky option that can lead to data loss or deleted data re-appearing, going beyond what - `unchecked_tombstone_compaction` does for single sstable compaction. Due to the risk the jvm must also be - started with `-Dcassandra.unsafe_aggressive_sstable_expiration=true`. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled). - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/4.0-alpha1/_sources/operating/compression.rst.txt b/src/doc/4.0-alpha1/_sources/operating/compression.rst.txt deleted file mode 100644 index 42a057b24..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides four classes (``LZ4Compressor``, - ``SnappyCompressor``, ``DeflateCompressor`` and ``ZstdCompressor``). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. -- ``compression_level`` is only applicable for ``ZstdCompressor`` and accepts values between ``-131072`` and ``2``. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/4.0-alpha1/_sources/operating/hardware.rst.txt b/src/doc/4.0-alpha1/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/4.0-alpha1/_sources/operating/hints.rst.txt b/src/doc/4.0-alpha1/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha1/_sources/operating/index.rst.txt b/src/doc/4.0-alpha1/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/4.0-alpha1/_sources/operating/metrics.rst.txt b/src/doc/4.0-alpha1/_sources/operating/metrics.rst.txt deleted file mode 100644 index e87bd5ac1..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,789 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _monitoring-metrics: - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -.. _table-metrics: - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorWriteLatency Timer Coordinator write latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -BytesRepaired Gauge Size of table data repaired on disk -BytesUnrepaired Gauge Size of table data unrepaired on disk -BytesPendingRepair Gauge Size of table data isolated for an ongoing incremental repair -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -SpeculativeFailedRetries Counter Number of speculative retries that failed to prevent a timeout -SpeculativeInsufficientReplicas Counter Number of speculative retries that couldn't be attempted due to lack of replicas -SpeculativeSampleLatencyNanos Gauge Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -AnticompactionTime Timer Time spent anticompacting before a consistent repair. -ValidationTime Timer Time spent doing validation compaction during repair. -SyncTime Timer Time spent doing streaming during repair. -BytesValidated Histogram Histogram over the amount of bytes read during validation. -PartitionsValidated Histogram Histogram over the number of partitions read during validation. -BytesAnticompacted Counter How many bytes we anticompacted. -BytesMutatedAnticompaction Counter How many bytes we avoided anticompacting because the sstable was fully contained in the repaired range. -MutatedAnticompactionGauge Gauge Ratio of bytes mutated vs total bytes repaired. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -Most of these metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -WriteFailedIdeaCL Counter Number of writes that failed to achieve the configured ideal consistency level or 0 if none is configured -IdealCLWriteLatency Latency Coordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured -RepairTime Timer Total time spent as repair coordinator. -RepairPrepareTime Timer Total time spent preparing for repair. -======================================= ============== =========== - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools path= scope= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -MaxTasksQueued Gauge The maximum number of tasks queued before a task get blocked. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -ViewBuildExecutor internal Performs materialized views initial build -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - -.. _dropped-metrics: - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessage..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMessage scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -HintsService Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintsService.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintsService name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -HintsSucceeded Meter A meter of the hints successfully delivered -HintsFailed Meter A meter of the hints that failed deliver -HintsTimedOut Meter A meter of the hints that timed out -Hint_delays Histogram Histogram of hint delivery delays (in milliseconds) -Hint_delays- Histogram Histogram of hint delivery delays (in milliseconds) per peer -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -============================== =============================== =========== -Name Type Description -============================== =============================== =========== -connectedNativeClients Gauge Number of clients connected to this nodes native protocol server -connections Gauge> List of all connections and their state information -connectedNativeClientsByUser Gauge Number of connnective native clients by username -============================== =============================== =========== - - -Batch Metrics -^^^^^^^^^^^^^ - -Metrics specifc to batch statements. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Batch.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Batch name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -PartitionsPerCounterBatch Histogram Distribution of the number of partitions processed per counter batch -PartitionsPerLoggedBatch Histogram Distribution of the number of partitions processed per logged batch -PartitionsPerUnloggedBatch Histogram Distribution of the number of partitions processed per unlogged batch -=========================== ============== =========== - - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/4.0-alpha1/_sources/operating/read_repair.rst.txt b/src/doc/4.0-alpha1/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha1/_sources/operating/repair.rst.txt b/src/doc/4.0-alpha1/_sources/operating/repair.rst.txt deleted file mode 100644 index 97115dc66..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _repair: - -Repair ------- - -Cassandra is designed to remain available if one of it's nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren't guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire. - -These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes. - -Incremental and Full Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that's been written since the previous incremental repair. - -Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it's important to understand that once an incremental repair marks data as repaired, it won't -try to repair it again. This is fine for syncing up missed writes, but it doesn't protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally. - -Usage and Best Practices -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since repair can result in a lot of disk and network io, it's not run automatically by Cassandra. It is run by the operator -via nodetool. - -Incremental repair is the default and is run with the following command: - -:: - - nodetool repair - -A full repair can be run with the following command: - -:: - - nodetool repair --full - -Additionally, repair can be run on a single keyspace: - -:: - - nodetool repair [options] - -Or even on specific tables: - -:: - - nodetool repair [options] - - -The repair command only repairs token ranges on the node being repaired, it doesn't repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you're running repair on, which will cause duplicate work if you run it -on every node. The ``-pr`` flag will only repair the "primary" ranges on a node, so you can repair your entire cluster by running -``nodetool repair -pr`` on each node in a single datacenter. - -The specific frequency of repair that's right for your cluster, of course, depends on several factors. However, if you're -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don't want to run incremental repairs, a full repair every 5 days is a good place -to start. - -At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays. - -Other Options -^^^^^^^^^^^^^ - -``-pr, --partitioner-range`` - Restricts repair to the 'primary' token ranges of the node being repaired. A primary range is just a token range for - which a node is the first replica in the ring. - -``-prv, --preview`` - Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints - the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, - add the ``--full`` flag to estimate a full repair. - -``-vd, --validate`` - Verifies that the repaired data is the same across all nodes. Similiar to ``--preview``, this builds and compares merkle - trees of repaired data, but doesn't do any streaming. This is useful for troubleshooting. If this shows that the repaired - data is out of sync, a full repair should be run. - -.. seealso:: - :ref:`nodetool repair docs ` diff --git a/src/doc/4.0-alpha1/_sources/operating/security.rst.txt b/src/doc/4.0-alpha1/_sources/operating/security.rst.txt deleted file mode 100644 index c2d8b79b0..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/security.rst.txt +++ /dev/null @@ -1,441 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still: - -- Craft internode messages to insert users into authentication schema -- Craft internode messages to truncate or drop schema -- Use tools such as ``sstableloader`` to overwrite ``system_auth`` tables -- Attach to the cluster directly to capture write traffic - -Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra's -security features is crucial to configuring your cluster to meet your security needs. - - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -SSL Certificate Hot Reloading -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes. - -Certificate Hot reloading may also be triggered using the ``nodetool reloadssl`` command. Use this if you want to Cassandra to -immediately notice the changed certificates. - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -.. _authorization: - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -.. _auth-caching: - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/4.0-alpha1/_sources/operating/snitch.rst.txt b/src/doc/4.0-alpha1/_sources/operating/snitch.rst.txt deleted file mode 100644 index 5f6760a41..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this will allow 'pinning' of replicas to hosts - in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/4.0-alpha1/_sources/operating/topo_changes.rst.txt b/src/doc/4.0-alpha1/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index 6c8f8ecdf..000000000 --- a/src/doc/4.0-alpha1/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,129 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in ``nodetool netstats``. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344) - -Once the bootstrapping is complete the node will be marked "UP". - -.. Note:: If any of the following cases apply, you **MUST** run repair to make the replaced node consistent again, since - it missed ongoing writes during/prior to bootstrapping. The *replacement* timeframe refers to the period from when the - node initially dies to when a new node completes the replacement process. - - 1. The node is down for longer than ``max_hint_window_in_ms`` before being replaced. - 2. You are replacing using the same IP address as the dead node **and** replacement takes longer than ``max_hint_window_in_ms``. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/4.0-alpha1/_sources/plugins/index.rst.txt b/src/doc/4.0-alpha1/_sources/plugins/index.rst.txt deleted file mode 100644 index 4073a92cb..000000000 --- a/src/doc/4.0-alpha1/_sources/plugins/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Third-Party Plugins -=================== - -Available third-party plugins for Apache Cassandra - -CAPI-Rowcache -------------- - -The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments. - -The official page for the `CAPI-Rowcache plugin `__ contains further details how to build/run/download the plugin. - - -Stratio’s Cassandra Lucene Index --------------------------------- - -Stratio’s Lucene index is a Cassandra secondary index implementation based on `Apache Lucene `__. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or `Apache Solr `__, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed. - -The official Github repository `Cassandra Lucene Index `__ contains everything you need to build/run/configure the plugin. \ No newline at end of file diff --git a/src/doc/4.0-alpha1/_sources/tools/cassandra_stress.rst.txt b/src/doc/4.0-alpha1/_sources/tools/cassandra_stress.rst.txt deleted file mode 100644 index bcac54ec1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/cassandra_stress.rst.txt +++ /dev/null @@ -1,269 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: yaml - -.. _cassandra_stress: - -Cassandra Stress ----------------- - -cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model. - -This documentation focuses on user mode as this allows the testing of your -actual schema. - -Usage -^^^^^ -There are several operation types: - - * write-only, read-only, and mixed workloads of standard data - * write-only and read-only workloads for counter columns - * user configured workloads, running custom queries on custom schemas - -The syntax is `cassandra-stress [options]`. If you want more information on a given command -or options, just run `cassandra-stress help `. - -Commands: - read: - Multiple concurrent reads - the cluster must first be populated by a write test - write: - Multiple concurrent writes against the cluster - mixed: - Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test - counter_write: - Multiple concurrent updates of counters. - counter_read: - Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. - user: - Interleaving of user provided queries, with configurable ratio and distribution. - help: - Print help for a command or option - print: - Inspect the output of a distribution definition - legacy: - Legacy support mode - -Primary Options: - -pop: - Population distribution and intra-partition visit order - -insert: - Insert specific options relating to various methods for batching and splitting partition updates - -col: - Column details such as size and count distribution, data generator, names, comparator and if super columns should be used - -rate: - Thread count, rate limit or automatic mode (default is auto) - -mode: - Thrift or CQL with options - -errors: - How to handle errors when encountered during stress - -sample: - Specify the number of samples to collect for measuring latency - -schema: - Replication settings, compression, compaction, etc. - -node: - Nodes to connect to - -log: - Where to log progress to, and the interval at which to do it - -transport: - Custom transport factories - -port: - The port to connect to cassandra nodes on - -sendto: - Specify a stress server to send this command to - -graph: - Graph recorded metrics - -tokenrange: - Token range settings - - -Suboptions: - Every command and primary option has its own collection of suboptions. These are too numerous to list here. - For information on the suboptions for each command or option, please use the help command, - `cassandra-stress help `. - -User mode -^^^^^^^^^ - -User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn't scale. - -Profile -+++++++ - -User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname. - -An identifier for the profile:: - - specname: staff_activities - -The keyspace for the test:: - - keyspace: staff - -CQL for the keyspace. Optional if the keyspace already exists:: - - keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -The table to be stressed:: - - table: staff_activities - -CQL for the table. Optional if the table already exists:: - - table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when, what) - ) - - -Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:: - - columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -Supported types are: - -An exponential distribution over the range [min..max]:: - - EXP(min..max) - -An extreme value (Weibull) distribution over the range [min..max]:: - - EXTREME(min..max,shape) - -A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:: - - GAUSSIAN(min..max,stdvrng) - -A gaussian/normal distribution, with explicitly defined mean and stdev:: - - GAUSSIAN(min..max,mean,stdev) - -A uniform distribution over the range [min, max]:: - - UNIFORM(min..max) - -A fixed distribution, always returning the same value:: - - FIXED(val) - -If preceded by ~, the distribution is inverted - -Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) - -Insert distributions:: - - insert: - # How many partition to insert per batch - partitions: fixed(1) - # How many rows to update per partition - select: fixed(1)/500 - # UNLOGGED or LOGGED batch for insert - batchtype: UNLOGGED - - -Currently all inserts are done inside batches. - -Read statements to use during the test:: - - queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - -Running a user mode test:: - - cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once - -This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test. - -The full example can be found here :download:`yaml <./stress-example.yaml>` - -Running a user mode test with multiple yaml files:: - cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m "ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)" truncate=once - -This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table - although care must be taken that the table definition is identical (data generation specs can be different). - -Lightweight transaction support -+++++++++++++++++++++++++++++++ - -cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s). - -Lightweight transaction update query:: - - queries: - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow - -The full example can be found here :download:`yaml <./stress-lwt-example.yaml>` - -Graphing -^^^^^^^^ - -Graphs can be generated for each run of stress. - -.. image:: example-stress-graph.png - -To create a new graph:: - - cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" - -To add a new run to an existing graph point to an existing file and add a revision name:: - - cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run" - -FAQ -^^^^ - -**How do you use NetworkTopologyStrategy for the keyspace?** - -Use the schema option making sure to either escape the parenthesis or enclose in quotes:: - - cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)" - -**How do you use SSL?** - -Use the transport option:: - - cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra" \ No newline at end of file diff --git a/src/doc/4.0-alpha1/_sources/tools/cqlsh.rst.txt b/src/doc/4.0-alpha1/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/4.0-alpha1/_sources/tools/index.rst.txt b/src/doc/4.0-alpha1/_sources/tools/index.rst.txt deleted file mode 100644 index d28929c84..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 3 - - cqlsh - nodetool/nodetool - sstable/index - cassandra_stress diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/compact.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/decommission.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/describering.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/drain.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/flush.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/help.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/import.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/info.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/join.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/move.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/netstats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index 8cd0d7cf1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,249 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-pwf | --password-file )] - [(-pp | --print-port)] [(-pw | --password )] - [(-p | --port )] [(-u | --username )] - [(-h | --host )] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/profileload.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/refresh.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/removenode.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/repair.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/ring.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/scrub.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/sjk.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/status.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/stop.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/verify.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/version.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/4.0-alpha1/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/index.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/index.rst.txt deleted file mode 100644 index b9e483f45..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -SSTable Tools -============= - -This section describes the functionality of the various sstable tools. - -Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped. - -.. toctree:: - :maxdepth: 2 - - sstabledump - sstableexpiredblockers - sstablelevelreset - sstableloader - sstablemetadata - sstableofflinerelevel - sstablerepairedset - sstablescrub - sstablesplit - sstableupgrade - sstableutil - sstableverify - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstabledump.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstabledump.rst.txt deleted file mode 100644 index 8f38afa09..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstabledump.rst.txt +++ /dev/null @@ -1,294 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstabledump ------------ - -Dump contents of a given SSTable to standard output in JSON format. - -You must supply exactly one sstable. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstabledump - -=================================== ================================================================================ --d CQL row per line internal representation --e Enumerate partition keys only --k Partition key --x Excluded partition key(s) --t Print raw timestamps instead of iso8601 date strings --l Output each row as a separate JSON object -=================================== ================================================================================ - -If necessary, use sstableutil first to find out the sstables used by a table. - -Dump entire table -^^^^^^^^^^^^^^^^^ - -Dump the entire table without any options. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26 - - cat eventlog_dump_2018Jul26 - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - ] - -Dump table in a more manageable format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848 - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines - - cat eventlog_dump_2018Jul26_justlines - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Dump only keys -^^^^^^^^^^^^^^ - -Dump only the keys by using the -e option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys - - cat eventlog_dump_2018Jul26b - [ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ] - -Dump row for a single key -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a single key using the -k option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey - - cat eventlog_dump_2018Jul26_singlekey - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Exclude a key or keys in dump of rows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a table except for the rows excluded with the -x option. Multiple keys can be used. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e > eventlog_dump_2018Jul26_excludekeys - - cat eventlog_dump_2018Jul26_excludekeys - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Display raw timestamps -^^^^^^^^^^^^^^^^^^^^^^ - -By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times - - cat eventlog_dump_2018Jul26_times - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "1532118147028809" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - - -Display internal structure in output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump the table in a format that reflects the internal structure. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d - - cat eventlog_dump_2018Jul26_d - [3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]: | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711] - [d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]: | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522] - [cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]: | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809] - - - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableexpiredblockers.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableexpiredblockers.rst.txt deleted file mode 100644 index ec837944c..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableexpiredblockers.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableexpiredblockers ----------------------- - -During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable. - -This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-10015 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableexpiredblockers
- -Output blocked sstables -^^^^^^^^^^^^^^^^^^^^^^^ - -If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing. - -Otherwise, the script will return ` blocks <#> expired sstables from getting dropped` followed by a list of the blocked sstables. - -Example:: - - sstableexpiredblockers keyspace1 standard1 - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablelevelreset.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstablelevelreset.rst.txt deleted file mode 100644 index 7069094dd..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablelevelreset.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablelevelreset ------------------ - -If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration. - -See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5271 - -Usage -^^^^^ - -sstablelevelreset --really-reset
- -The really-reset flag is required, to ensure this intrusive command is not run accidentally. - -Table not found -^^^^^^^^^^^^^^^ - -If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error. - -Example:: - - ColumnFamily not found: keyspace/evenlog. - -Table has no sstables -^^^^^^^^^^^^^^^^^^^^^ - -Example:: - - Found no sstables, did you give the correct keyspace/table? - - -Table already at level 0 -^^^^^^^^^^^^^^^^^^^^^^^^ - -The script will not set the level if it is already set to 0. - -Example:: - - Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0 - -Table levels reduced to 0 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the level is not already 0, then this will reset it to 0. - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 1 - - sstablelevelreset --really-reset keyspace eventlog - Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 0 - - - - - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableloader.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableloader.rst.txt deleted file mode 100644 index a9b37342c..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableloader.rst.txt +++ /dev/null @@ -1,273 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableloader ---------------- - -Bulk-load the sstables found in the directory to the configured cluster. The parent directories of are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files. - -Several of the options listed below don't work quite as intended, and in those cases, workarounds are mentioned for specific use cases. - -To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-1278 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableloader - -=================================================== ================================================================================ --d, --nodes Required. Try to connect to these hosts (comma-separated) - initially for ring information --u, --username username for Cassandra authentication --pw, --password password for Cassandra authentication --p, --port port used for native connection (default 9042) --sp, --storage-port port used for internode communication (default 7000) --ssp, --ssl-storage-port port used for TLS internode communication (default 7001) ---no-progress don't display progress --t, --throttle throttle speed in Mbits (default unlimited) --idct, --inter-dc-throttle inter-datacenter throttle speed in Mbits (default unlimited) --cph, --connections-per-host number of concurrent connections-per-host --i, --ignore don't stream to this (comma separated) list of nodes --alg, --ssl-alg Client SSL: algorithm (default: SunX509) --ciphers, --ssl-ciphers Client SSL: comma-separated list of encryption suites to use --ks, --keystore Client SSL: full path to keystore --kspw, --keystore-password Client SSL: password of the keystore --st, --store-type Client SSL: type of store --ts, --truststore Client SSL: full path to truststore --tspw, --truststore-password Client SSL: password of the truststore --prtcl, --ssl-protocol Client SSL: connections protocol to use (default: TLS) --ap, --auth-provider custom AuthProvider class name for cassandra authentication --f, --conf-path cassandra.yaml file path for streaming throughput and client/server SSL --v, --verbose verbose output --h, --help display this help message -=================================================== ================================================================================ - -You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options. - -Load sstables from a Snapshot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Copy the snapshot sstables into an accessible directory and use sstableloader to restore them. - -Example:: - - cp snapshots/1535397029191/* /path/to/keyspace1/standard1/ - - sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4700000 - Total duration (ms): : 4390 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -The -d or --nodes option is required, or the script will not run. - -Example:: - - sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Initial hosts must be specified (-d) - -Use a Config File for SSL Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If SSL encryption is enabled in the cluster, use the --conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line. - -Example:: - - sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 9.165KiB/s (avg: 9.165KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 5.147MiB/s (avg: 18.299KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 9.751MiB/s (avg: 27.423KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 8.203MiB/s (avg: 36.524KiB/s) - ... - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 9356 ms - Average transfer rate : 480.105KiB/s - Peak transfer rate : 586.410KiB/s - -Hide Progress Output -^^^^^^^^^^^^^^^^^^^^ - -To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the --no-progress option. - -Example:: - - sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2] - -Get More Detail -^^^^^^^^^^^^^^^ - -Using the --verbose option will provide much more progress output. - -Example:: - - sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 12.056KiB/s (avg: 12.056KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 9.092MiB/s (avg: 24.081KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 18.832MiB/s (avg: 36.099KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 2.253MiB/s (avg: 47.882KiB/s) - progress: [/172.17.0.2]0:0/1 7 % total: 7% 6.388MiB/s (avg: 59.743KiB/s) - progress: [/172.17.0.2]0:0/1 8 % total: 8% 14.606MiB/s (avg: 71.635KiB/s) - progress: [/172.17.0.2]0:0/1 9 % total: 9% 8.880MiB/s (avg: 83.465KiB/s) - progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s) - progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s) - progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s) - progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s) - progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s) - progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s) - progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s) - progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s) - progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s) - progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s) - progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s) - progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s) - progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s) - progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s) - progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s) - progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s) - progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s) - progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s) - progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s) - progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s) - progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s) - progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s) - progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s) - progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s) - progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s) - progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s) - progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s) - progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s) - progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s) - progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s) - progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s) - progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s) - progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s) - progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s) - progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s) - progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s) - progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s) - progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s) - progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s) - progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s) - progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s) - progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s) - progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s) - progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s) - progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s) - progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s) - progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s) - progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s) - progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s) - progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s) - progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s) - progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s) - progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s) - progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s) - progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s) - progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s) - progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s) - progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s) - progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s) - progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s) - progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s) - progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s) - progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 6706 ms - Average transfer rate : 669.835KiB/s - Peak transfer rate : 767.802KiB/s - - -Throttling Load -^^^^^^^^^^^^^^^ - -To prevent the table loader from overloading the system resources, you can throttle the process with the --throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below. - -Example:: - - sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 0 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 37634 - Average transfer rate (MB/s): : 0 - Peak transfer rate (MB/s): : 0 - -Speeding up Load -^^^^^^^^^^^^^^^^ - -To speed up the load process, the number of connections per host can be increased. - -Example:: - - sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 100 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 3486 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -This small data set doesn't benefit much from the increase in connections per host, but note that the total duration has decreased in this example. - - - - - - - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablemetadata.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstablemetadata.rst.txt deleted file mode 100644 index 0a7a42211..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablemetadata.rst.txt +++ /dev/null @@ -1,300 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablemetadata ---------------- - -Print information about an sstable from the related Statistics.db and Summary.db files to standard output. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstablemetadata - -========================= ================================================================================ ---gc_grace_seconds The gc_grace_seconds to use when calculating droppable tombstones -========================= ================================================================================ - -Print all the metadata -^^^^^^^^^^^^^^^^^^^^^^ - -Run sstablemetadata against the *Data.db file(s) related to a table. If necessary, find the *Data.db file(s) using sstableutil. - -Example:: - - sstableutil keyspace1 standard1 | grep Data - /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - SSTable: /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big - Partitioner: org.apache.cassandra.dht.Murmur3Partitioner - Bloom Filter FP chance: 0.010000 - Minimum timestamp: 1535025576141000 - Maximum timestamp: 1535025604309000 - SSTable min local deletion time: 2147483647 - SSTable max local deletion time: 2147483647 - Compressor: org.apache.cassandra.io.compress.LZ4Compressor - TTL min: 86400 - TTL max: 86400 - First token: -9223004712949498654 (key=39373333373831303130) - Last token: 9222554117157811897 (key=4f3438394e39374d3730) - Estimated droppable tombstones: 0.9188263888888889 - SSTable Level: 0 - Repaired at: 0 - Replay positions covered: {CommitLogPosition(segmentId=1535025390651, position=226400)=CommitLogPosition(segmentId=1535025390651, position=6849139)} - totalColumnsSet: 100000 - totalRows: 20000 - Estimated tombstone drop times: - 1535039100: 80390 - 1535039160: 5645 - 1535039220: 13965 - Count Row Size Cell Count - 1 0 0 - 2 0 0 - 3 0 0 - 4 0 0 - 5 0 20000 - 6 0 0 - 7 0 0 - 8 0 0 - 10 0 0 - 12 0 0 - 14 0 0 - 17 0 0 - 20 0 0 - 24 0 0 - 29 0 0 - 35 0 0 - 42 0 0 - 50 0 0 - 60 0 0 - 72 0 0 - 86 0 0 - 103 0 0 - 124 0 0 - 149 0 0 - 179 0 0 - 215 0 0 - 258 20000 0 - 310 0 0 - 372 0 0 - 446 0 0 - 535 0 0 - 642 0 0 - 770 0 0 - 924 0 0 - 1109 0 0 - 1331 0 0 - 1597 0 0 - 1916 0 0 - 2299 0 0 - 2759 0 0 - 3311 0 0 - 3973 0 0 - 4768 0 0 - 5722 0 0 - 6866 0 0 - 8239 0 0 - 9887 0 0 - 11864 0 0 - 14237 0 0 - 17084 0 0 - 20501 0 0 - 24601 0 0 - 29521 0 0 - 35425 0 0 - 42510 0 0 - 51012 0 0 - 61214 0 0 - 73457 0 0 - 88148 0 0 - 105778 0 0 - 126934 0 0 - 152321 0 0 - 182785 0 0 - 219342 0 0 - 263210 0 0 - 315852 0 0 - 379022 0 0 - 454826 0 0 - 545791 0 0 - 654949 0 0 - 785939 0 0 - 943127 0 0 - 1131752 0 0 - 1358102 0 0 - 1629722 0 0 - 1955666 0 0 - 2346799 0 0 - 2816159 0 0 - 3379391 0 0 - 4055269 0 0 - 4866323 0 0 - 5839588 0 0 - 7007506 0 0 - 8409007 0 0 - 10090808 0 0 - 12108970 0 0 - 14530764 0 0 - 17436917 0 0 - 20924300 0 0 - 25109160 0 0 - 30130992 0 0 - 36157190 0 0 - 43388628 0 0 - 52066354 0 0 - 62479625 0 0 - 74975550 0 0 - 89970660 0 0 - 107964792 0 0 - 129557750 0 0 - 155469300 0 0 - 186563160 0 0 - 223875792 0 0 - 268650950 0 0 - 322381140 0 0 - 386857368 0 0 - 464228842 0 0 - 557074610 0 0 - 668489532 0 0 - 802187438 0 0 - 962624926 0 0 - 1155149911 0 0 - 1386179893 0 0 - 1663415872 0 0 - 1996099046 0 0 - 2395318855 0 0 - 2874382626 0 - 3449259151 0 - 4139110981 0 - 4966933177 0 - 5960319812 0 - 7152383774 0 - 8582860529 0 - 10299432635 0 - 12359319162 0 - 14831182994 0 - 17797419593 0 - 21356903512 0 - 25628284214 0 - 30753941057 0 - 36904729268 0 - 44285675122 0 - 53142810146 0 - 63771372175 0 - 76525646610 0 - 91830775932 0 - 110196931118 0 - 132236317342 0 - 158683580810 0 - 190420296972 0 - 228504356366 0 - 274205227639 0 - 329046273167 0 - 394855527800 0 - 473826633360 0 - 568591960032 0 - 682310352038 0 - 818772422446 0 - 982526906935 0 - 1179032288322 0 - 1414838745986 0 - Estimated cardinality: 20196 - EncodingStats minTTL: 0 - EncodingStats minLocalDeletionTime: 1442880000 - EncodingStats minTimestamp: 1535025565275000 - KeyType: org.apache.cassandra.db.marshal.BytesType - ClusteringTypes: [org.apache.cassandra.db.marshal.UTF8Type] - StaticColumns: {C3:org.apache.cassandra.db.marshal.BytesType, C4:org.apache.cassandra.db.marshal.BytesType, C0:org.apache.cassandra.db.marshal.BytesType, C1:org.apache.cassandra.db.marshal.BytesType, C2:org.apache.cassandra.db.marshal.BytesType} - RegularColumns: {} - -Specify gc grace seconds -^^^^^^^^^^^^^^^^^^^^^^^^ - -To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn't access the schema directly, this is a way to more accurately estimate droppable tombstones -- for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds). - -ref: https://issues.apache.org/jira/browse/CASSANDRA-12208 - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4 - Estimated tombstone drop times: - 1536599100: 1 - 1536599640: 1 - 1536599700: 2 - - echo $(date +%s) - 1536602005 - - # if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 4.0E-5 - - # if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 9.61111111111111E-6 - - # if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 0.0 - -Explanation of each value printed above -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -=================================== ================================================================================ - Value Explanation -=================================== ================================================================================ -SSTable prefix of the sstable filenames related to this sstable -Partitioner partitioner type used to distribute data across nodes; defined in cassandra.yaml -Bloom Filter FP precision of Bloom filter used in reads; defined in the table definition -Minimum timestamp minimum timestamp of any entry in this sstable, in epoch microseconds -Maximum timestamp maximum timestamp of any entry in this sstable, in epoch microseconds -SSTable min local deletion time minimum timestamp of deletion date, based on TTL, in epoch seconds -SSTable max local deletion time maximum timestamp of deletion date, based on TTL, in epoch seconds -Compressor blank (-) by default; if not blank, indicates type of compression enabled on the table -TTL min time-to-live in seconds; default 0 unless defined in the table definition -TTL max time-to-live in seconds; default 0 unless defined in the table definition -First token lowest token and related key found in the sstable summary -Last token highest token and related key found in the sstable summary -Estimated droppable tombstones ratio of tombstones to columns, using configured gc grace seconds if relevant -SSTable level compaction level of this sstable, if leveled compaction (LCS) is used -Repaired at the timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds -Replay positions covered the interval of time and commitlog positions related to this sstable -totalColumnsSet number of cells in the table -totalRows number of rows in the table -Estimated tombstone drop times approximate number of rows that will expire, ordered by epoch seconds -Count Row Size Cell Count two histograms in two columns; one represents distribution of Row Size - and the other represents distribution of Cell Count -Estimated cardinality an estimate of unique values, used for compaction -EncodingStats* minTTL in epoch milliseconds -EncodingStats* minLocalDeletionTime in epoch seconds -EncodingStats* minTimestamp in epoch microseconds -KeyType the type of partition key, useful in reading and writing data - from/to storage; defined in the table definition -ClusteringTypes the type of clustering key, useful in reading and writing data - from/to storage; defined in the table definition -StaticColumns a list of the shared columns in the table -RegularColumns a list of non-static, non-key columns in the table -=================================== ================================================================================ -* For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way. - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableofflinerelevel.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableofflinerelevel.rst.txt deleted file mode 100644 index c031d2987..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableofflinerelevel.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableofflinerelevel ---------------------- - -When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-8301 - -The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):: - - L3 [][][][][][][][][][][] - L2 [ ][ ][ ][ ] - L1 [ ][ ] - L0 [ ] - -Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):: - - [][][] - [ ][][][] - [ ] - [ ] - ... - -Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below. - -If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableofflinerelevel [--dry-run]
- -Doing a dry run -^^^^^^^^^^^^^^^ - -Use the --dry-run option to see the current level distribution and predicted level after the change. - -Example:: - - sstableofflinerelevel --dry-run keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - Potential leveling: - L0=1 - L1=1 - -Running a relevel -^^^^^^^^^^^^^^^^^ - -Example:: - - sstableofflinerelevel keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - New leveling: - L0=1 - L1=1 - -Keyspace or table not found -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an invalid keyspace and/or table is provided, an exception will be thrown. - -Example:: - - sstableofflinerelevel --dry-run keyspace evenlog - - Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog - at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96) - - - - - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablerepairedset.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstablerepairedset.rst.txt deleted file mode 100644 index ebacef335..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablerepairedset.rst.txt +++ /dev/null @@ -1,79 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablerepairedset ------------------- - -Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired. - -Note that running a repair (e.g., via nodetool repair) doesn't set the status of this metadata. Only setting the status of this metadata via this tool does. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5351 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablerepairedset --really-set [-f | ] - -=================================== ================================================================================ ---really-set required if you want to really set the status ---is-repaired set the repairedAt status to the last modified time ---is-unrepaired set the repairedAt status to 0 --f use a file containing a list of sstables as the input -=================================== ================================================================================ - -Set a lot of sstables to unrepaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many ways to do this programmatically. This way would likely include variables for the keyspace and table. - -Example:: - - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired % - -Set one to many sstables to repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice. - -Example:: - - nodetool repair keyspace1 standard1 - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired % - -Print metadata showing repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -sstablemetadata can be used to view the status set or unset using this command. - -Example: - - sstablerepairedset --really-set --is-repaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 1534443974000 - - sstablerepairedset --really-set --is-unrepaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 0 - -Using command in a script -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you know you ran repair 2 weeks ago, you can do something like the following:: - - sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablescrub.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstablescrub.rst.txt deleted file mode 100644 index 0bbda9f32..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablescrub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablescrub ------------- - -Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4321 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablescrub
- -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --m,--manifest-check only check and repair the leveled manifest, without actually scrubbing the sstables --n,--no-validate do not validate columns using column validator --r,--reinsert-overflowed-ttl Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 - with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows. --s,--skip-corrupted skip corrupt rows in counter tables --v,--verbose verbose output -=================================== ================================================================================ - -Basic Scrub -^^^^^^^^^^^ - -The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable. - -Example:: - - sstablescrub keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped - Checking leveled manifest - -Scrub without Validation -^^^^^^^^^^^^^^^^^^^^^^^^ -ref: https://issues.apache.org/jira/browse/CASSANDRA-9406 - -Use the --no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client. - -Example:: - - sstablescrub --no-validate keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned - -Skip Corrupted Counter Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5930 - -If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the --skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+. - -Example:: - - sstablescrub --skip-corrupted keyspace1 counter1 - -Dealing with Overflow Dates -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-14092 - -Using the option --reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow). - -Example:: - - sstablescrub --reinsert-overflowed-ttl keyspace1 counter1 - -Manifest Check -^^^^^^^^^^^^^^ - -As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata. - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablesplit.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstablesplit.rst.txt deleted file mode 100644 index 5386fa48b..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstablesplit.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablesplit ------------- - -Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4766 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablesplit - -=================================== ================================================================================ ---debug display stack traces --h, --help display this help message ---no-snapshot don't snapshot the sstables before splitting --s, --size maximum size in MB for the output sstables (default: 50) -=================================== ================================================================================ - -This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped. - -Split a File -^^^^^^^^^^^^ - -Split a large sstable into smaller sstables. By default, unless the option --no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - Pre-split sstables snapshotted into snapshot pre-split-1533144514795 - -Split Multiple Files -^^^^^^^^^^^^^^^^^^^^ - -Wildcards can be used in the filename portion of the command to split multiple files. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1* - -Attempt to Split a Small File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the file is already smaller than the split size provided, the sstable will not be split. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB) - No sstables needed splitting. - -Split a File into Specified Size -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default size used for splitting is 50MB. Specify another size with the --size option. The size is in megabytes (MB). Specify only the number, not the units. For example --size 50 is correct, but --size 50MB is not. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db - Pre-split sstables snapshotted into snapshot pre-split-1533144996008 - - -Split Without Snapshot -^^^^^^^^^^^^^^^^^^^^^^ - -By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the --no-snapshot option to skip it. - -Example:: - - sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db - -Note: There is no output, but you can see the results in your file system. - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableupgrade.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableupgrade.rst.txt deleted file mode 100644 index 66386aca1..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableupgrade.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableupgrade --------------- - -Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version. - -The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableupgrade
[snapshot_name] - -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --k,--keep-source do not delete the source sstables -=================================== ================================================================================ - -Rewrite tables to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start with a set of sstables in one version of Cassandra:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables:: - - sstableupgrade keyspace1 standard1 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 13:48 backups - -rw-r--r-- 1 user wheel 292 Aug 22 13:48 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4599475 Aug 22 13:48 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:48 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 13:48 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330807 Aug 22 13:48 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 13:48 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 13:48 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 13:48 mc-2-big-TOC.txt - -Rewrite tables to the current Cassandra version, and keep tables in old version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Again, starting with a set of sstables in one version:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:: - - sstableupgrade keyspace1 standard1 -k - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 14:00 backups - -rw-r--r--@ 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r--@ 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r--@ 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r--@ 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r--@ 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r--@ 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r--@ 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r--@ 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -rw-r--r-- 1 user wheel 292 Aug 22 14:01 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4596370 Aug 22 14:01 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 14:01 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 14:01 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330801 Aug 22 14:01 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 14:01 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 14:01 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 14:01 mc-2-big-TOC.txt - - -Rewrite a snapshot to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Find the snapshot name:: - - nodetool listsnapshots - - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - ... - 1534962986979 keyspace1 standard1 5.85 MB 5.85 MB - -Then rewrite the snapshot:: - - sstableupgrade keyspace1 standard1 1534962986979 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete. - - - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableutil.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableutil.rst.txt deleted file mode 100644 index 30becd0e0..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableutil.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableutil ------------ - -List sstable files for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7066 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableutil
- -=================================== ================================================================================ --c, --cleanup clean up any outstanding transactions --d, --debug display stack traces --h, --help display this help message --o, --oplog include operation logs --t, --type all (list all files, final or temporary), tmp (list temporary files only), - final (list final files only), --v, --verbose verbose output -=================================== ================================================================================ - -List all sstables -^^^^^^^^^^^^^^^^^ - -The basic command lists the sstables associated with a given keyspace/table. - -Example:: - - sstableutil keyspace eventlog - Listing files... - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt - -List only temporary sstables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `tmp` will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra. - -List only final sstables -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `final` will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option. - -Include transaction logs -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -o option will include transaction logs in the listing, in the format above. - -Clean up sstables -^^^^^^^^^^^^^^^^^ - -Using the -c option removes any transactions left over from incomplete writes or compactions. - -From the 3.0 upgrade notes: - -New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix "add:" or "remove:". They also contain a special line "commit", only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the "add" prefix) and delete the old sstables (those with the "remove" prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first. - - - diff --git a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableverify.rst.txt b/src/doc/4.0-alpha1/_sources/tools/sstable/sstableverify.rst.txt deleted file mode 100644 index dad3f4487..000000000 --- a/src/doc/4.0-alpha1/_sources/tools/sstable/sstableverify.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableverify -------------- - -Check sstable(s) for errors or corruption, for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5791 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableverify
- -=================================== ================================================================================ ---debug display stack traces --e, --extended extended verification --h, --help display this help message --v, --verbose verbose output -=================================== ================================================================================ - -Basic Verification -^^^^^^^^^^^^^^^^^^ - -This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - -Extended Verification -^^^^^^^^^^^^^^^^^^^^^ - -During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time. - -Example:: - - root@DC1C1:/# sstableverify -e keyspace eventlog - WARN 14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully - -Corrupted File -^^^^^^^^^^^^^^ - -Corrupted files are listed if they are detected by the script. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db - -A similar (but less verbose) tool will show the suggested actions:: - - nodetool verify keyspace eventlog - error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair - - - diff --git a/src/doc/4.0-alpha1/_sources/troubleshooting/finding_nodes.rst.txt b/src/doc/4.0-alpha1/_sources/troubleshooting/finding_nodes.rst.txt deleted file mode 100644 index df5e16c93..000000000 --- a/src/doc/4.0-alpha1/_sources/troubleshooting/finding_nodes.rst.txt +++ /dev/null @@ -1,149 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Find The Misbehaving Nodes -========================== - -The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware). - -There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below. - -Client Logs and Errors ----------------------- -Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter's nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with. - -Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax :ref:`drivers `: - -* ``SyntaxError`` (**client**). This and other ``QueryValidationException`` - indicate that the client sent a malformed request. These are rarely server - issues and usually indicate bad queries. -* ``UnavailableException`` (**server**): This means that the Cassandra - coordinator node has rejected the query as it believes that insufficent - replica nodes are available. If many coordinators are throwing this error it - likely means that there really are (typically) multiple nodes down in the - cluster and you can identify them using :ref:`nodetool status - ` If only a single coordinator is throwing this error it may - mean that node has been partitioned from the rest. -* ``OperationTimedOutException`` (**server**): This is the most frequent - timeout message raised when clients set timeouts and means that the query - took longer than the supplied timeout. This is a *client side* timeout - meaning that it took longer than the client specified timeout. The error - message will include the coordinator node that was last tried which is - usually a good starting point. This error usually indicates either - aggressive client timeout values or latent server coordinators/replicas. -* ``ReadTimeoutException`` or ``WriteTimeoutException`` (**server**): These - are raised when clients do not specify lower timeouts and there is a - *coordinator* timeouts based on the values supplied in the ``cassandra.yaml`` - configuration file. They usually indicate a serious server side problem as - the default values are usually multiple seconds. - -Metrics -------- - -If you have Cassandra :ref:`metrics ` reporting to a -centralized location such as `Graphite `_ or -`Grafana `_ you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are: - -Errors -^^^^^^ -Cassandra refers to internode messaging errors as "drops", and provided a -number of :ref:`Dropped Message Metrics ` to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue. - -Latency -^^^^^^^ -For timeouts or latency related issues you can start with :ref:`Table -Metrics ` by comparing Coordinator level metrics e.g. -``CoordinatorReadLatency`` or ``CoordinatorWriteLatency`` with their associated -replica metrics e.g. ``ReadLatency`` or ``WriteLatency``. Issues usually show -up on the ``99th`` percentile before they show up on the ``50th`` percentile or -the ``mean``. While ``maximum`` coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, ``maximum`` replica latencies that correlate with increased ``99th`` -percentiles on coordinators can help narrow down the problem. - -There are usually three main possibilities: - -1. Coordinator latencies are high on all nodes, but only a few node's local - read latencies are high. This points to slow replica nodes and the - coordinator's are just side-effects. This usually happens when clients are - not token aware. -2. Coordinator latencies and replica latencies increase at the - same time on the a few nodes. If clients are token aware this is almost - always what happens and points to slow replicas of a subset of token - ranges (only part of the ring). -3. Coordinator and local latencies are high on many nodes. This usually - indicates either a tipping point in the cluster capacity (too many writes or - reads per second), or a new query pattern. - -It's important to remember that depending on the client's load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use ``TokenAware`` policies the same -node's coordinator and replica latencies will often increase together, but if -you just use normal ``DCAwareRoundRobin`` coordinator latencies can increase -with unrelated replica node's latencies. For example: - -* ``TokenAware`` + ``LOCAL_ONE``: should always have coordinator and replica - latencies on the same node rise together -* ``TokenAware`` + ``LOCAL_QUORUM``: should always have coordinator and - multiple replica latencies rise together in the same datacenter. -* ``TokenAware`` + ``QUORUM``: replica latencies in other datacenters can - affect coordinator latencies. -* ``DCAwareRoundRobin`` + ``LOCAL_ONE``: coordinator latencies and unrelated - replica node's latencies will rise together. -* ``DCAwareRoundRobin`` + ``LOCAL_QUORUM``: different coordinator and replica - latencies will rise together with little correlation. - -Query Rates -^^^^^^^^^^^ -Sometimes the :ref:`Table ` query rate metrics can help -narrow down load issues as "small" increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with ``BATCH`` writes, where a client may send a single ``BATCH`` -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator ``BATCH`` write turns into 450 -replica writes! This is why keeping ``BATCH``'s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a "single" -query. - - -Next Step: Investigate the Node(s) ----------------------------------- - -Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -:ref:`logs `, :ref:`nodetool `, and -:ref:`os tools `. If you are not able to login you may still -have access to :ref:`logs ` and :ref:`nodetool ` -remotely. diff --git a/src/doc/4.0-alpha1/_sources/troubleshooting/index.rst.txt b/src/doc/4.0-alpha1/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 79b46d636..000000000 --- a/src/doc/4.0-alpha1/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you. - -These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don't -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use. - -.. toctree:: - :maxdepth: 2 - - finding_nodes - reading_logs - use_nodetool - use_tools diff --git a/src/doc/4.0-alpha1/_sources/troubleshooting/reading_logs.rst.txt b/src/doc/4.0-alpha1/_sources/troubleshooting/reading_logs.rst.txt deleted file mode 100644 index 08f7d4da6..000000000 --- a/src/doc/4.0-alpha1/_sources/troubleshooting/reading_logs.rst.txt +++ /dev/null @@ -1,267 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _reading-logs: - -Cassandra Logs -============== -Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs. - -Common Log Files ----------------- -Cassandra has three main logs, the ``system.log``, ``debug.log`` and -``gc.log`` which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively. - -These logs by default live in ``${CASSANDRA_HOME}/logs``, but most Linux -distributions relocate logs to ``/var/log/cassandra``. Operators can tune -this location as well as what levels are logged using the provided -``logback.xml`` file. - -``system.log`` -^^^^^^^^^^^^^^ -This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log: - -* Uncaught exceptions. These can be very useful for debugging errors. -* ``GCInspector`` messages indicating long garbage collector pauses. When long - pauses happen Cassandra will print how long and also what was the state of - the system (thread state) at the time of that pause. This can help narrow - down a capacity issue (either not enough heap or not enough spare CPU). -* Information about nodes joining and leaving the cluster as well as token - metadata (data ownersip) changes. This is useful for debugging network - partitions, data movements, and more. -* Keyspace/Table creation, modification, deletion. -* ``StartupChecks`` that ensure optimal configuration of the operating system - to run Cassandra -* Information about some background operational tasks (e.g. Index - Redistribution). - -As with any application, looking for ``ERROR`` or ``WARN`` lines can be a -great first step:: - - $ # Search for warnings or errors in the latest system.log - $ grep 'WARN\|ERROR' system.log | tail - ... - - $ # Search for warnings or errors in all rotated system.log - $ zgrep 'WARN\|ERROR' system.log.* | less - ... - -``debug.log`` -^^^^^^^^^^^^^^ -This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal ``system.log``. Some -examples of activities logged to this log: - -* Information about compactions, including when they start, which sstables - they contain, and when they finish. -* Information about memtable flushes to disk, including when they happened, - how large the flushes were, and which commitlog segments the flush impacted. - -This log can be *very* noisy, so it is highly recommended to use ``grep`` and -other log analysis tools to dive deep. For example:: - - $ # Search for messages involving a CompactionTask with 5 lines of context - $ grep CompactionTask debug.log -C 5 - ... - - $ # Look at the distribution of flush tasks per keyspace - $ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c - 6 compaction_history: - 1 test_keyspace: - 2 local: - 17 size_estimates: - 17 sstable_activity: - - -``gc.log`` -^^^^^^^^^^^^^^ -The gc log is a standard Java GC log. With the default ``jvm.options`` -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:: - - $ grep stopped gc.log.0.current | tail - 2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds - 2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds - 2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds - 2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds - 2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds - 2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds - 2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds - 2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds - 2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds - 2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds - - -This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current | sort -k 1 - 2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds - 2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds - 2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds - 2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds - 2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds - 2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds - 2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds - 2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds - 2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds - 2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds - -In this case any client waiting on a query would have experienced a `56ms` -latency at 17:13:41. - -Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn't know could have disk latency, so the JVM safepoint logic -doesn't handle a blocking memory mapped read particularly well). - -Using these logs you can even get a pause distribution with something like -`histogram.py `_:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py - # NumSamples = 410293; Min = 0.00; Max = 11.49 - # Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498 - # each ∎ represents a count of 5470 - 0.0001 - 1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ - 1.1496 - 2.2991 [ 15]: - 2.2991 - 3.4486 [ 5]: - 3.4486 - 4.5981 [ 1]: - 4.5981 - 5.7475 [ 5]: - 5.7475 - 6.8970 [ 9]: - 6.8970 - 8.0465 [ 1]: - 8.0465 - 9.1960 [ 0]: - 9.1960 - 10.3455 [ 0]: - 10.3455 - 11.4949 [ 2]: - -We can see in this case while we have very good average performance something -is causing multi second JVM pauses ... In this case it was mostly safepoint -pauses caused by slow disks:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current| sort -k 1 - 2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds - 2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds - 2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds - 2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds - 2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds - 2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds - 2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds - 2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds - 2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds - 2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds - -Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as `GCViewer -`_ which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -`200ms` and GC throughput greater than `99%` (ymmv). - -Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues. - - -Getting More Information ------------------------- - -If the default logging levels are insuficient, ``nodetool`` can set higher -or lower logging levels for various packages and classes using the -``nodetool setlogginglevel`` command. Start by viewing the current levels:: - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - -Perhaps the ``Gossiper`` is acting up and we wish to enable it at ``TRACE`` -level for even more insight:: - - - $ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - org.apache.cassandra.gms.Gossiper TRACE - - $ grep TRACE debug.log | tail -2 - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating - heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ... - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local - heartbeat version 2341 greater than 2340 for 127.0.0.1:7000 - - -Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -``logback.xml``. - -.. code-block:: diff - - diff --git a/conf/logback.xml b/conf/logback.xml - index b2c5b10..71b0a49 100644 - --- a/conf/logback.xml - +++ b/conf/logback.xml - @@ -98,4 +98,5 @@ appender reference in the root level section below. - - - - + - - -Full Query Logger -^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -``nodetool`` and the logs are read with the provided ``bin/fqltool`` utility:: - - $ mkdir /var/tmp/fql_logs - $ nodetool enablefullquerylog --path /var/tmp/fql_logs - - # ... do some querying - - $ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail - Query time: 1530750927224 - Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = - 'system_views' AND table_name = 'sstable_tasks'; - Values: - - Type: single - Protocol version: 4 - Query time: 1530750934072 - Query: select * from keyspace1.standard1 ; - Values: - - $ nodetool disablefullquerylog - -Note that if you want more information than this tool provides, there are other -live capture options available such as :ref:`packet capture `. diff --git a/src/doc/4.0-alpha1/_sources/troubleshooting/use_nodetool.rst.txt b/src/doc/4.0-alpha1/_sources/troubleshooting/use_nodetool.rst.txt deleted file mode 100644 index 5072f85d1..000000000 --- a/src/doc/4.0-alpha1/_sources/troubleshooting/use_nodetool.rst.txt +++ /dev/null @@ -1,245 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-nodetool: - -Use Nodetool -============ - -Cassandra's ``nodetool`` allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see ``nodetool help`` -for all the commands), but briefly some of the most useful for troubleshooting: - -.. _nodetool-status: - -Cluster Status --------------- - -You can use ``nodetool status`` to assess status of the cluster:: - - $ nodetool status - - Datacenter: dc1 - ======================= - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - UN 127.0.1.1 4.69 GiB 1 100.0% 35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e r1 - UN 127.0.1.2 4.71 GiB 1 100.0% 752e278f-b7c5-4f58-974b-9328455af73f r2 - UN 127.0.1.3 4.69 GiB 1 100.0% 9dc1a293-2cc0-40fa-a6fd-9e6054da04a7 r3 - -In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all "up". The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -``nodetool status`` on multiple nodes in a cluster to see the full view. - -You can use ``nodetool status`` plus a little grep to see which nodes are -down:: - - $ nodetool status | grep -v '^UN' - Datacenter: dc1 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - Datacenter: dc2 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - DN 127.0.0.5 105.73 KiB 1 33.3% df303ac7-61de-46e9-ac79-6e630115fd75 r1 - -In this case there are two datacenters and there is one node down in datacenter -``dc2`` and rack ``r1``. This may indicate an issue on ``127.0.0.5`` -warranting investigation. - -.. _nodetool-proxyhistograms: - -Coordinator Query Latency -------------------------- -You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using ``nodetool proxyhistograms``:: - - $ nodetool proxyhistograms - Percentile Read Latency Write Latency Range Latency CAS Read Latency CAS Write Latency View Write Latency - (micros) (micros) (micros) (micros) (micros) (micros) - 50% 454.83 219.34 0.00 0.00 0.00 0.00 - 75% 545.79 263.21 0.00 0.00 0.00 0.00 - 95% 654.95 315.85 0.00 0.00 0.00 0.00 - 98% 785.94 379.02 0.00 0.00 0.00 0.00 - 99% 3379.39 2346.80 0.00 0.00 0.00 0.00 - Min 42.51 105.78 0.00 0.00 0.00 0.00 - Max 25109.16 43388.63 0.00 0.00 0.00 0.00 - -Here you can see the full latency distribution of reads, writes, range requests -(e.g. ``select * from keyspace.table``), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds). - -.. _nodetool-tablehistograms: - -Local Query Latency -------------------- - -If you know which table is having latency/error issues, you can use -``nodetool tablehistograms`` to get a better idea of what is happening -locally on a node:: - - $ nodetool tablehistograms keyspace table - Percentile SSTables Write Latency Read Latency Partition Size Cell Count - (micros) (micros) (bytes) - 50% 0.00 73.46 182.79 17084 103 - 75% 1.00 88.15 315.85 17084 103 - 95% 2.00 126.93 545.79 17084 103 - 98% 2.00 152.32 654.95 17084 103 - 99% 2.00 182.79 785.94 17084 103 - Min 0.00 42.51 24.60 14238 87 - Max 2.00 12108.97 17436.92 17084 103 - -This shows you percentile breakdowns particularly critical metrics. - -The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. ``SizeTieredCompactionStrategy`` typically has many more reads -per read than ``LeveledCompactionStrategy`` does for update heavy workloads. - -The second column shows you a latency breakdown of *local* write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments. - -The third column shows you a latency breakdown of *local* read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read. - -The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it's read. - -.. _nodetool-tpstats: - -Threadpool State ----------------- - -You can use ``nodetool tpstats`` to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:: - - $ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 2 0 12 0 0 - MiscStage 0 0 0 0 0 - CompactionExecutor 0 0 1940 0 0 - MutationStage 0 0 0 0 0 - GossipStage 0 0 10293 0 0 - Repair-Task 0 0 0 0 0 - RequestResponseStage 0 0 16 0 0 - ReadRepairStage 0 0 0 0 0 - CounterMutationStage 0 0 0 0 0 - MemtablePostFlush 0 0 83 0 0 - ValidationExecutor 0 0 0 0 0 - MemtableFlushWriter 0 0 30 0 0 - ViewMutationStage 0 0 0 0 0 - CacheCleanupExecutor 0 0 0 0 0 - MemtableReclaimMemory 0 0 30 0 0 - PendingRangeCalculator 0 0 11 0 0 - SecondaryIndexManagement 0 0 0 0 0 - HintsDispatcher 0 0 0 0 0 - Native-Transport-Requests 0 0 192 0 0 - MigrationStage 0 0 14 0 0 - PerDiskMemtableFlushWriter_0 0 0 30 0 0 - Sampler 0 0 0 0 0 - ViewBuildExecutor 0 0 0 0 0 - InternalResponseStage 0 0 0 0 0 - AntiEntropyStage 0 0 0 0 0 - - Message type Dropped Latency waiting in queue (micros) - 50% 95% 99% Max - READ 0 N/A N/A N/A N/A - RANGE_SLICE 0 0.00 0.00 0.00 0.00 - _TRACE 0 N/A N/A N/A N/A - HINT 0 N/A N/A N/A N/A - MUTATION 0 N/A N/A N/A N/A - COUNTER_MUTATION 0 N/A N/A N/A N/A - BATCH_STORE 0 N/A N/A N/A N/A - BATCH_REMOVE 0 N/A N/A N/A N/A - REQUEST_RESPONSE 0 0.00 0.00 0.00 0.00 - PAGED_RANGE 0 N/A N/A N/A N/A - READ_REPAIR 0 N/A N/A N/A N/A - -This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the ``RequestResponseState`` queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ``ALL`` ties up RF -``RequestResponseState`` threads whereas ``LOCAL_ONE`` only uses a single -thread in the ``ReadStage`` threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the ``concurrent_compactors`` or ``compaction_throughput`` options. - -The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation. - -.. _nodetool-compactionstats: - -Compaction State ----------------- - -As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS `page cache `_, -and can put a lot of load on your disk drives. There are great -:ref:`os tools ` to determine if this is the case, but often it's a -good idea to check if compactions are even running using -``nodetool compactionstats``:: - - $ nodetool compactionstats - pending tasks: 2 - - keyspace.table: 2 - - id compaction type keyspace table completed total unit progress - 2062b290-7f3a-11e8-9358-cd941b956e60 Compaction keyspace table 21848273 97867583 bytes 22.32% - Active compaction remaining time : 0h00m04s - -In this case there is a single compaction running on the ``keyspace.table`` -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass ``-H`` to get the units in a human readable format. - -Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don't take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra's ``concurrent_compactors`` -or ``compaction_throughput`` options. diff --git a/src/doc/4.0-alpha1/_sources/troubleshooting/use_tools.rst.txt b/src/doc/4.0-alpha1/_sources/troubleshooting/use_tools.rst.txt deleted file mode 100644 index b1347cc6d..000000000 --- a/src/doc/4.0-alpha1/_sources/troubleshooting/use_tools.rst.txt +++ /dev/null @@ -1,542 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-os-tools: - -Diving Deep, Use External Tools -=============================== - -Machine access allows operators to dive even deeper than logs and ``nodetool`` -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes. - -JVM Tooling ------------ -The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks. - -**NOTE**: There are two common gotchas with JVM tooling and Cassandra: - -1. By default Cassandra ships with ``-XX:+PerfDisableSharedMem`` set to prevent - long pauses (see ``CASSANDRA-9242`` and ``CASSANDRA-9483`` for details). If - you want to use JVM tooling you can instead have ``/tmp`` mounted on an in - memory ``tmpfs`` which also effectively works around ``CASSANDRA-9242``. -2. Make sure you run the tools as the same user as Cassandra is running as, - e.g. if the database is running as ``cassandra`` the tool also has to be - run as ``cassandra``, e.g. via ``sudo -u cassandra ``. - -Garbage Collection State (jstat) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you suspect heap pressure you can use ``jstat`` to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):: - - - jstat -gcutil 500ms - S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - 0.00 0.00 81.53 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.94 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - -In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies. - -Thread Information (jstack) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To get a point in time snapshot of exactly what Cassandra is doing, run -``jstack`` against the Cassandra PID. **Note** that this does pause the JVM for -a very brief period (<20ms).:: - - $ jstack > threaddump - - # display the threaddump - $ cat threaddump - ... - - # look at runnable threads - $grep RUNNABLE threaddump -B 1 - "Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000] - java.lang.Thread.State: RUNNABLE - -- - "Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - ... - - # Note that the nid is the Linux thread id - -Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on. - -Basic OS Tooling ----------------- -A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of: - -* CPU cores. For executing concurrent user queries -* CPU processing time. For query activity (data decompression, row merging, - etc...) -* CPU processing time (low priority). For background tasks (compaction, - streaming, etc ...) -* RAM for Java Heap. Used to hold internal data-structures and by default the - Cassandra memtables. Heap space is a crucial component of write performance - as well as generally. -* RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS - disk cache is a crucial component of read performance. -* Disks. Cassandra cares a lot about disk read latency, disk write throughput, - and of course disk space. -* Network latency. Cassandra makes many internode requests, so network latency - between nodes can directly impact performance. -* Network throughput. Cassandra (as other databases) frequently have the - so called "incast" problem where a small request (e.g. ``SELECT * from - foo.bar``) returns a massively large result set (e.g. the entire dataset). - In such situations outgoing bandwidth is crucial. - -Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource. - -High Level Resource Usage (top/htop) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra makes signifiant use of system resources, and often the very first -useful action is to run ``top`` or ``htop`` (`website -`_)to see the state of the machine. - -Useful things to look at: - -* System load levels. While these numbers can be confusing, generally speaking - if the load average is greater than the number of CPU cores, Cassandra - probably won't have very good (sub 100 millisecond) latencies. See - `Linux Load Averages `_ - for more information. -* CPU utilization. ``htop`` in particular can help break down CPU utilization - into ``user`` (low and normal priority), ``system`` (kernel), and ``io-wait`` - . Cassandra query threads execute as normal priority ``user`` threads, while - compaction threads execute as low priority ``user`` threads. High ``system`` - time could indicate problems like thread contention, and high ``io-wait`` - may indicate slow disk drives. This can help you understand what Cassandra - is spending processing resources doing. -* Memory usage. Look for which programs have the most resident memory, it is - probably Cassandra. The number for Cassandra is likely inaccurately high due - to how Linux (as of 2018) accounts for memory mapped file memory. - -.. _os-iostat: - -IO Usage (iostat) -^^^^^^^^^^^^^^^^^ -Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:: - - $ sudo iostat -xdm 2 - Linux 4.13.0-13-generic (hostname) 07/03/2018 _x86_64_ (8 CPU) - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.28 0.32 5.42 0.01 0.13 48.55 0.01 2.21 0.26 2.32 0.64 0.37 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 79.34 0.00 0.20 0.20 0.00 0.16 0.00 - sdc 0.34 0.27 0.76 0.36 0.01 0.02 47.56 0.03 26.90 2.98 77.73 9.21 1.03 - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.00 2.00 32.00 0.01 4.04 244.24 0.54 16.00 0.00 17.00 1.06 3.60 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - sdc 0.00 24.50 0.00 114.00 0.00 11.62 208.70 5.56 48.79 0.00 48.79 1.12 12.80 - - -In this case we can see that ``/dev/sdc1`` is a very slow drive, having an -``await`` close to 50 milliseconds and an ``avgqu-sz`` close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user. - -Important metrics to assess using iostat: - -* Reads and writes per second. These numbers will change with the workload, - but generally speaking the more reads Cassandra has to do from disk the - slower Cassandra read latencies are. Large numbers of reads per second - can be a dead giveaway that the cluster has insufficient memory for OS - page caching. -* Write throughput. Cassandra's LSM model defers user writes and batches them - together, which means that throughput to the underlying medium is the most - important write metric for Cassandra. -* Read latency (``r_await``). When Cassandra missed the OS page cache and reads - from SSTables, the read latency directly determines how fast Cassandra can - respond with the data. -* Write latency. Cassandra is less sensitive to write latency except when it - syncs the commit log. This typically enters into the very high percentiles of - write latency. - -Note that to get detailed latency breakdowns you will need a more advanced -tool such as :ref:`bcc-tools `. - -OS page Cache Usage -^^^^^^^^^^^^^^^^^^^ -As Cassandra makes heavy use of memory mapped files, the health of the -operating system's `Page Cache `_ is -crucial to performance. Start by finding how much available cache is in the -system:: - - $ free -g - total used free shared buff/cache available - Mem: 15 9 2 0 3 5 - Swap: 0 0 0 - -In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap. - -If you suspect that you are missing the OS page cache frequently you can use -advanced tools like :ref:`cachestat ` or -:ref:`vmtouch ` to dive deeper. - -Network Latency and Reliability -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Whenever Cassandra does writes or reads that involve other replicas, -``LOCAL_QUORUM`` reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ``ping`` and ``traceroute`` or most -effectively ``mtr``:: - - $ mtr -nr www.google.com - Start: Sun Jul 22 13:10:28 2018 - HOST: hostname Loss% Snt Last Avg Best Wrst StDev - 1.|-- 192.168.1.1 0.0% 10 2.0 1.9 1.1 3.7 0.7 - 2.|-- 96.123.29.15 0.0% 10 11.4 11.0 9.0 16.4 1.9 - 3.|-- 68.86.249.21 0.0% 10 10.6 10.7 9.0 13.7 1.1 - 4.|-- 162.141.78.129 0.0% 10 11.5 10.6 9.6 12.4 0.7 - 5.|-- 162.151.78.253 0.0% 10 10.9 12.1 10.4 20.2 2.8 - 6.|-- 68.86.143.93 0.0% 10 12.4 12.6 9.9 23.1 3.8 - 7.|-- 96.112.146.18 0.0% 10 11.9 12.4 10.6 15.5 1.6 - 9.|-- 209.85.252.250 0.0% 10 13.7 13.2 12.5 13.9 0.0 - 10.|-- 108.170.242.238 0.0% 10 12.7 12.4 11.1 13.0 0.5 - 11.|-- 74.125.253.149 0.0% 10 13.4 13.7 11.8 19.2 2.1 - 12.|-- 216.239.62.40 0.0% 10 13.4 14.7 11.5 26.9 4.6 - 13.|-- 108.170.242.81 0.0% 10 14.4 13.2 10.9 16.0 1.7 - 14.|-- 72.14.239.43 0.0% 10 12.2 16.1 11.0 32.8 7.1 - 15.|-- 216.58.195.68 0.0% 10 25.1 15.3 11.1 25.1 4.8 - -In this example of ``mtr``, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between ``200ms`` and ``3s`` of additional latency, so that -can be a common cause of latency issues. - -Network Throughput -^^^^^^^^^^^^^^^^^^ -As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is `iftop `_ which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ``ccm`` cluster:: - - $ # remove the -t for ncurses instead of pure text - $ sudo iftop -nNtP -i lo - interface: lo - IP address is: 127.0.0.1 - MAC address is: 00:00:00:00:00:00 - Listening on lo - # Host name (port/service if enabled) last 2s last 10s last 40s cumulative - -------------------------------------------------------------------------------------------- - 1 127.0.0.1:58946 => 869Kb 869Kb 869Kb 217KB - 127.0.0.3:9042 <= 0b 0b 0b 0B - 2 127.0.0.1:54654 => 736Kb 736Kb 736Kb 184KB - 127.0.0.1:9042 <= 0b 0b 0b 0B - 3 127.0.0.1:51186 => 669Kb 669Kb 669Kb 167KB - 127.0.0.2:9042 <= 0b 0b 0b 0B - 4 127.0.0.3:9042 => 3.30Kb 3.30Kb 3.30Kb 845B - 127.0.0.1:58946 <= 0b 0b 0b 0B - 5 127.0.0.1:9042 => 2.79Kb 2.79Kb 2.79Kb 715B - 127.0.0.1:54654 <= 0b 0b 0b 0B - 6 127.0.0.2:9042 => 2.54Kb 2.54Kb 2.54Kb 650B - 127.0.0.1:51186 <= 0b 0b 0b 0B - 7 127.0.0.1:36894 => 1.65Kb 1.65Kb 1.65Kb 423B - 127.0.0.5:7000 <= 0b 0b 0b 0B - 8 127.0.0.1:38034 => 1.50Kb 1.50Kb 1.50Kb 385B - 127.0.0.2:7000 <= 0b 0b 0b 0B - 9 127.0.0.1:56324 => 1.50Kb 1.50Kb 1.50Kb 383B - 127.0.0.1:7000 <= 0b 0b 0b 0B - 10 127.0.0.1:53044 => 1.43Kb 1.43Kb 1.43Kb 366B - 127.0.0.4:7000 <= 0b 0b 0b 0B - -------------------------------------------------------------------------------------------- - Total send rate: 2.25Mb 2.25Mb 2.25Mb - Total receive rate: 0b 0b 0b - Total send and receive rate: 2.25Mb 2.25Mb 2.25Mb - -------------------------------------------------------------------------------------------- - Peak rate (sent/received/total): 2.25Mb 0b 2.25Mb - Cumulative (sent/received/total): 576KB 0B 576KB - ============================================================================================ - -In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring. - -Advanced tools --------------- -Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy. - -.. _use-bcc-tools: - -bcc-tools -^^^^^^^^^ -Most modern Linux distributions (kernels newer than ``4.1``) support `bcc-tools -`_ for diving deep into performance problems. -First install ``bcc-tools``, e.g. via ``apt`` on Debian:: - - $ apt install bcc-tools - -Then you can use all the tools that ``bcc-tools`` contains. One of the most -useful tools is ``cachestat`` -(`cachestat examples `_) -which allows you to determine exactly how many OS page cache hits and misses -are happening:: - - $ sudo /usr/share/bcc/tools/cachestat -T 1 - TIME TOTAL MISSES HITS DIRTIES BUFFERS_MB CACHED_MB - 18:44:08 66 66 0 64 88 4427 - 18:44:09 40 40 0 75 88 4427 - 18:44:10 4353 45 4308 203 88 4427 - 18:44:11 84 77 7 13 88 4428 - 18:44:12 2511 14 2497 14 88 4428 - 18:44:13 101 98 3 18 88 4428 - 18:44:14 16741 0 16741 58 88 4428 - 18:44:15 1935 36 1899 18 88 4428 - 18:44:16 89 34 55 18 88 4428 - -In this case there are not too many page cache ``MISSES`` which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node's "hot" dataset. If you don't have enough cache, ``MISSES`` will -be high and performance will be slow. If you have enough cache, ``MISSES`` will -be low and performance will be fast (as almost all reads are being served out -of memory). - -You can also measure disk latency distributions using ``biolatency`` -(`biolatency examples `_) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:: - - $ sudo /usr/share/bcc/tools/biolatency -D 10 - Tracing block device I/O... Hit Ctrl-C to end. - - - disk = 'sda' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 12 |****************************************| - 32 -> 63 : 9 |****************************** | - 64 -> 127 : 1 |*** | - 128 -> 255 : 3 |********** | - 256 -> 511 : 7 |*********************** | - 512 -> 1023 : 2 |****** | - - disk = 'sdc' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 0 | | - 32 -> 63 : 0 | | - 64 -> 127 : 41 |************ | - 128 -> 255 : 17 |***** | - 256 -> 511 : 13 |*** | - 512 -> 1023 : 2 | | - 1024 -> 2047 : 0 | | - 2048 -> 4095 : 0 | | - 4096 -> 8191 : 56 |***************** | - 8192 -> 16383 : 131 |****************************************| - 16384 -> 32767 : 9 |** | - -In this case most ios on the data drive (``sdc``) are fast, but many take -between 8 and 16 milliseconds. - -Finally ``biosnoop`` (`examples `_) -can be used to dive even deeper and see per IO latencies:: - - $ sudo /usr/share/bcc/tools/biosnoop | grep java | head - 0.000000000 java 17427 sdc R 3972458600 4096 13.58 - 0.000818000 java 17427 sdc R 3972459408 4096 0.35 - 0.007098000 java 17416 sdc R 3972401824 4096 5.81 - 0.007896000 java 17416 sdc R 3972489960 4096 0.34 - 0.008920000 java 17416 sdc R 3972489896 4096 0.34 - 0.009487000 java 17427 sdc R 3972401880 4096 0.32 - 0.010238000 java 17416 sdc R 3972488368 4096 0.37 - 0.010596000 java 17427 sdc R 3972488376 4096 0.34 - 0.011236000 java 17410 sdc R 3972488424 4096 0.32 - 0.011825000 java 17427 sdc R 3972488576 16384 0.65 - ... time passes - 8.032687000 java 18279 sdc R 10899712 122880 3.01 - 8.033175000 java 18279 sdc R 10899952 8192 0.46 - 8.073295000 java 18279 sdc R 23384320 122880 3.01 - 8.073768000 java 18279 sdc R 23384560 8192 0.46 - - -With ``biosnoop`` you see every single IO and how long they take. This data -can be used to construct the latency distributions in ``biolatency`` but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (``128kb``) of ``read_ahead_kb``. To improve point read -performance you may may want to decrease ``read_ahead_kb`` on fast data volumes -such as SSDs while keeping the a higher value like ``128kb`` value is probably -right for HDs. There are tradeoffs involved, see `queue-sysfs -`_ docs for more -information, but regardless ``biosnoop`` is useful for understanding *how* -Cassandra uses drives. - -.. _use-vmtouch: - -vmtouch -^^^^^^^ -Sometimes it's useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -`vmtouch `_. - -First install it:: - - $ git clone https://github.com/hoytech/vmtouch.git - $ cd vmtouch - $ make - -Then run it on the Cassandra data directory:: - - $ ./vmtouch /var/lib/cassandra/data/ - Files: 312 - Directories: 92 - Resident Pages: 62503/64308 244M/251M 97.2% - Elapsed: 0.005657 seconds - -In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn't really matter unless reads are missing the -cache (per e.g. :ref:`cachestat `), in which case having -additional memory may help read performance. - -CPU Flamegraphs -^^^^^^^^^^^^^^^ -Cassandra often uses a lot of CPU, but telling *what* it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -`CPU Flamegraphs `_ -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a "compaction problem dropping -tombstones" or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -`Java Flamegraphs -`_. - -Generally: - -1. Enable the ``-XX:+PreserveFramePointer`` option in Cassandra's - ``jvm.options`` configuation file. This has a negligible performance impact - but allows you actually see what Cassandra is doing. -2. Run ``perf`` to get some data. -3. Send that data through the relevant scripts in the FlameGraph toolset and - convert the data into a pretty flamegraph. View the resulting SVG image in - a browser or other image browser. - -For example just cloning straight off github we first install the -``perf-map-agent`` to the location of our JVMs (assumed to be -``/usr/lib/jvm``):: - - $ sudo bash - $ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/ - $ cd /usr/lib/jvm - $ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent - $ cd perf-map-agent - $ cmake . - $ make - -Now to get a flamegraph:: - - $ git clone --depth=1 https://github.com/brendangregg/FlameGraph - $ sudo bash - $ cd FlameGraph - $ # Record traces of Cassandra and map symbols for all java processes - $ perf record -F 49 -a -g -p -- sleep 30; ./jmaps - $ # Translate the data - $ perf script > cassandra_stacks - $ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \ - ./flamegraph.pl --color=java --hash > cassandra_flames.svg - - -The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser. - -.. _packet-capture: - -Packet Capture -^^^^^^^^^^^^^^ -Sometimes you have to understand what queries a Cassandra node is performing -*right now* to troubleshoot an issue. For these times trusty packet capture -tools like ``tcpdump`` and `Wireshark -`_ can be very helpful to dissect packet captures. -Wireshark even has native `CQL support -`_ although it sometimes has -compatibility issues with newer Cassandra protocol releases. - -To get a packet capture first capture some packets:: - - $ sudo tcpdump -U -s0 -i -w cassandra.pcap -n "tcp port 9042" - -Now open it up with wireshark:: - - $ wireshark cassandra.pcap - -If you don't see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> ``Decode as`` -> select CQL from the -dropdown for port 9042. - -If you don't want to do this manually or use a GUI, you can also use something -like `cqltrace `_ to ease obtaining and -parsing CQL packet captures. diff --git a/src/doc/4.0-alpha1/_static/ajax-loader.gif b/src/doc/4.0-alpha1/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/4.0-alpha1/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/basic.css b/src/doc/4.0-alpha1/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/4.0-alpha1/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/4.0-alpha1/_static/comment-bright.png b/src/doc/4.0-alpha1/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/4.0-alpha1/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/comment-close.png b/src/doc/4.0-alpha1/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/4.0-alpha1/_static/comment-close.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/comment.png b/src/doc/4.0-alpha1/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/4.0-alpha1/_static/comment.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/doctools.js b/src/doc/4.0-alpha1/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/4.0-alpha1/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/4.0-alpha1/_static/documentation_options.js b/src/doc/4.0-alpha1/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/4.0-alpha1/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/4.0-alpha1/_static/down-pressed.png b/src/doc/4.0-alpha1/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/4.0-alpha1/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/down.png b/src/doc/4.0-alpha1/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/4.0-alpha1/_static/down.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/extra.css b/src/doc/4.0-alpha1/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/4.0-alpha1/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/4.0-alpha1/_static/file.png b/src/doc/4.0-alpha1/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/4.0-alpha1/_static/file.png and /dev/null differ diff --git a/src/doc/4.0-alpha1/_static/jquery-3.2.1.js b/src/doc/4.0-alpha1/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/4.0-alpha1/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-

Transient Replication

-

Transient replication allows you to configure a subset of replicas to only replicate data that hasn’t been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn’t been incrementally repaired.

-

To use transient replication, you first need to enable it in cassandra.yaml. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as <total_replicas>/<transient_replicas Both SimpleStrategy and NetworkTopologyStrategy support configuring transient -replication.

-

Transiently replicated keyspaces only support tables created with read_repair set to NONE and monotonic reads are not currently supported. -You also can’t use LWT, logged batches, and counters in 4.0. You will possibly never be able to use materialized views -with transiently replicated keyspaces and probably never be able to use 2i with them.

-

Transient replication is an experimental feature that may not be ready for production use. The expected audienced is experienced -users of Cassandra capable of fully validating a deployment of their particular application. That means being able check -that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, -configuration, operational practices, and availability requirements.

-

It is anticipated that 4.next will support monotonic reads with transient replication as well as LWT, logged batches, and -counters.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level, with one exception. Speculative retry may issue a redundant read request to an extra replica if the other replicas -have not responded within a specified time window.

-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/architecture/guarantees.html b/src/doc/4.0-alpha1/architecture/guarantees.html deleted file mode 100644 index 18bfded39..000000000 --- a/src/doc/4.0-alpha1/architecture/guarantees.html +++ /dev/null @@ -1,114 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/architecture/index.html b/src/doc/4.0-alpha1/architecture/index.html deleted file mode 100644 index 0ebcb8a78..000000000 --- a/src/doc/4.0-alpha1/architecture/index.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/architecture/overview.html b/src/doc/4.0-alpha1/architecture/overview.html deleted file mode 100644 index 27cc5a101..000000000 --- a/src/doc/4.0-alpha1/architecture/overview.html +++ /dev/null @@ -1,114 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/architecture/storage_engine.html b/src/doc/4.0-alpha1/architecture/storage_engine.html deleted file mode 100644 index dc7f6b321..000000000 --- a/src/doc/4.0-alpha1/architecture/storage_engine.html +++ /dev/null @@ -1,293 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables.

-

All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the “commitlog_segment_size_in_mb” option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running “nodetool drain” before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup.

-
    -
  • commitlog_segment_size_in_mb: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
  • -
-

*NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*

-

Default Value: 32

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied.

-
    -
  • commitlog_sync: may be either “periodic” or “batch.”

    -
      -
    • batch: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait “commitlog_sync_batch_window_in_ms” milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason.

      -
        -
      • commitlog_sync_batch_window_in_ms: Time to wait between “batch” fsyncs
      • -
      -

      Default Value: 2

      -
    • -
    • periodic: In periodic mode, writes are immediately ack’ed, and the CommitLog is simply synced every “commitlog_sync_period_in_ms” milliseconds.

      -
        -
      • commitlog_sync_period_in_ms: Time to wait between “periodic” fsyncs
      • -
      -

      Default Value: 10000

      -
    • -
    -
  • -
-

Default Value: batch

-

* NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using “batch” mode, it is recommended to store commitlogs in a separate, dedicated device.

-
    -
  • commitlog_directory: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
  • -
-

Default Value: /var/lib/cassandra/commitlog

-
    -
  • commitlog_compression: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported.
  • -
-

(Default Value: (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
    -
  • commitlog_total_space_in_mb: Total space to use for commit logs on disk.
  • -
-

If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume.

-

Default Value: 8192

-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
-

SSTable Versions

-

This section was created using the following -gist -which utilized this original -source.

-

The version numbers, to date are:

-
-

Version 0

-
    -
  • b (0.7.0): added version to sstable filenames
  • -
  • c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings
  • -
  • d (0.7.0): row size in data component becomes a long instead of int
  • -
  • e (0.7.0): stores undecorated keys in data and index components
  • -
  • f (0.7.0): switched bloom filter implementations in data component
  • -
  • g (0.8): tracks flushed-at context in metadata component
  • -
-
-
-

Version 1

-
    -
  • h (1.0): tracks max client timestamp in metadata component
  • -
  • hb (1.0.3): records compression ration in metadata component
  • -
  • hc (1.0.4): records partitioner in metadata component
  • -
  • hd (1.0.10): includes row tombstones in maxtimestamp
  • -
  • he (1.1.3): includes ancestors generation in metadata component
  • -
  • hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782)
  • -
  • ia (1.2.0):
      -
    • column indexes are promoted to the index file
    • -
    • records estimated histogram of deletion times in tombstones
    • -
    • bloom filter (keys and columns) upgraded to Murmur3
    • -
    -
  • -
  • ib (1.2.1): tracks min client timestamp in metadata component
  • -
  • ic (1.2.5): omits per-row bloom filter of column names
  • -
-
-
-

Version 2

-
    -
  • ja (2.0.0):
      -
    • super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format)
    • -
    • tracks max local deletiontime in sstable metadata
    • -
    • records bloom_filter_fp_chance in metadata component
    • -
    • remove data size and column count from data file (CASSANDRA-4180)
    • -
    • tracks max/min column values (according to comparator)
    • -
    -
  • -
  • jb (2.0.1):
      -
    • switch from crc32 to adler32 for compression checksums
    • -
    • checksum the compressed data
    • -
    -
  • -
  • ka (2.1.0):
      -
    • new Statistics.db file format
    • -
    • index summaries can be downsampled and the sampling level is persisted
    • -
    • switch uncompressed checksums to adler32
    • -
    • tracks presense of legacy (local and remote) counter shards
    • -
    -
  • -
  • la (2.2.0): new file name format
  • -
  • lb (2.2.7): commit log lower bound included
  • -
-
-
-

Version 3

-
    -
  • ma (3.0.0):
      -
    • swap bf hash order
    • -
    • store rows natively
    • -
    -
  • -
  • mb (3.0.7, 3.7): commit log lower bound included
  • -
  • mc (3.0.8, 3.9): commit log intervals included
  • -
-
-
-

Example Code

-

The following example is useful for finding all sstables that do not match the “ib” SSTable version

-
find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots"
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/bugs.html b/src/doc/4.0-alpha1/bugs.html deleted file mode 100644 index 9dead540a..000000000 --- a/src/doc/4.0-alpha1/bugs.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the cassandra Slack room.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Contributing to Cassandra section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/configuration/cassandra_config_file.html b/src/doc/4.0-alpha1/configuration/cassandra_config_file.html deleted file mode 100644 index e26162331..000000000 --- a/src/doc/4.0-alpha1/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1948 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replication strategy used by the specified -keyspace.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Default Value: KEYSPACE

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

network_authorizer

-

Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}.

-
    -
  • AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization.
  • -
  • CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllNetworkAuthorizer

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using.

-

The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down the node and kill the JVM, so the node can be replaced.
-
stop
-
shut down the node, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

commitlog_sync may be either “periodic”, “group”, or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed.

-

Default Value: 2

-
-
-

commitlog_sync_group_window_in_ms

-

This option is commented out by default.

-

group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes.

-

Default Value: 1000

-
-
-

commitlog_sync

-

the default option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

periodic_commitlog_sync_lag_block_in_ms

-

This option is commented out by default.

-

When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete.

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1:7000"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_space_in_mb

-

This option is commented out by default.

-

Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_frame_block_size_in_kb

-

This option is commented out by default.

-

If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed.

-

Default Value: 32

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_allow_older_protocols

-

Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored.

-

Default Value: true

-
-
-

native_transport_idle_timeout_in_ms

-

This option is commented out by default.

-

Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period.

-

Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side.

-

Idle connection timeouts are disabled by default.

-

Default Value: 60000

-
-
-

rpc_address

-

The address or interface to bind the native transport server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

concurrent_validations

-

This option is commented out by default.

-

Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default)

-

Default Value: 0

-
-
-

concurrent_materialized_view_builders

-

Number of simultaneous materialized view builder tasks to allow.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_entire_sstables

-

This option is commented out by default.

-

When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696.

-

Default Value: true

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms.

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms.

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

internode_application_send_queue_capacity_in_bytes

-

This option is commented out by default.

-

Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details.

-

The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000

-

The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000

-

The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000

-

Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received.

-

The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth.

-

The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_send_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_send_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

internode_application_receive_queue_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_receive_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_receive_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

streaming_connections_per_host

-

This option is commented out by default.

-

Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files).

-

Default Value: 1

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html

-

NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
# set to true for allowing secure incoming connections
-enabled: false
-# If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port
-optional: false
-# if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used
-# during upgrade to 4.0; otherwise, set to false.
-enable_legacy_ssl_storage_port: false
-# on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true.
-internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client-to-server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

gc_warn_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature.

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

ideal_consistency_level

-

This option is commented out by default.

-

Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability.

-

Default Value: EACH_QUORUM

-
-
-

full_query_log_dir

-

This option is commented out by default.

-

Path to write full query log data to when the full query log is enabled -The full query log will recrusively delete the contents of this path at -times. Don’t place links in this directory to other parts of the filesystem.

-

Default Value: /tmp/cassandrafullquerylog

-
-
-

automatic_sstable_upgrade

-

This option is commented out by default.

-

Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version

-

Default Value: false

-
-
-

max_concurrent_automatic_sstable_upgrades

-

This option is commented out by default. -Limit the number of concurrent sstable upgrades

-

Default Value: 1

-
-
-

audit_logging_options

-

Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options.

-
-
-

full_query_logging_options

-

This option is commented out by default.

-

default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog

-
-
-

corrupted_tombstone_strategy

-

This option is commented out by default.

-

validate tombstones on reads and compaction -can be either “disabled”, “warn” or “exception”

-

Default Value: disabled

-
-
-

diagnostic_events_enabled

-

Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX.

-

Default Value: false

-
-
-

native_transport_flush_in_batches_legacy

-

This option is commented out by default.

-

Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.

-

Default Value: false

-
-
-

repaired_data_tracking_for_range_reads_enabled

-

Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don’t use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads

-

Default Value: false

-
-
-

repaired_data_tracking_for_partition_reads_enabled

-

Default Value: false

-
-
-

report_unconfirmed_repaired_data_mismatches

-

If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones.

-

Default Value: false

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-

enable_transient_replication

-

Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use.

-

Default Value: false

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/configuration/index.html b/src/doc/4.0-alpha1/configuration/index.html deleted file mode 100644 index 1ac2678b5..000000000 --- a/src/doc/4.0-alpha1/configuration/index.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/contactus.html b/src/doc/4.0-alpha1/contactus.html deleted file mode 100644 index 85e0a6a82..000000000 --- a/src/doc/4.0-alpha1/contactus.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or Slack rooms.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

Slack

-

To chat with developers or users in real-time, join our rooms on ASF Slack:

-
    -
  • cassandra - for user questions and general discussions.
  • -
  • cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/appendices.html b/src/doc/4.0-alpha1/cql/appendices.html deleted file mode 100644 index c6b7241cb..000000000 --- a/src/doc/4.0-alpha1/cql/appendices.html +++ /dev/null @@ -1,567 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/changes.html b/src/doc/4.0-alpha1/cql/changes.html deleted file mode 100644 index 586049b18..000000000 --- a/src/doc/4.0-alpha1/cql/changes.html +++ /dev/null @@ -1,363 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.5

- -
-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

- -
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/ddl.html b/src/doc/4.0-alpha1/cql/ddl.html deleted file mode 100644 index 24736f5d3..000000000 --- a/src/doc/4.0-alpha1/cql/ddl.html +++ /dev/null @@ -1,856 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-    AND durable_writes = false;
-
-
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
-

SimpleStrategy

-

A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -NetworkTopologyStrategy. SimpleStrategy supports a single mandatory argument:

- ------ - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'replication_factor'intallThe number of replicas to store per range
-
-
-

NetworkTopologyStrategy

-

A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options:

- ------ - - - - - - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'<datacenter>'intallThe number of replicas to store per range in -the provided datacenter.
'replication_factor'int4.0The number of replicas to use as a default -per datacenter if not specifically provided. -Note that this always defers to existing -definitions or explicit datacenter settings. -For example, to have three replicas per -datacenter, supply this with a value of 3.
-

Note that when ALTER ing keyspaces and supplying replication_factor, -auto-expansion will only add new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying replication_factor, -explicitly zero out the datacenter you want to have zero replicas.

-

An example of auto-expanding datacenters with two datacenters: DC1 and DC2:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true;
-
-
-

An example of auto-expanding and overriding a datacenter:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true;
-
-
-

An example that excludes a datacenter while using replication_factor:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ;
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
-
-
-

If transient replication has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format '<total_replicas>/<transient_replicas>'

-

For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-
-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records';
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, b, c)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage. It only exists for historical reason and is preserved for backward compatibility And as COMPACT -STORAGE cannot, as of Cassandra 4.0-alpha1, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
speculative_retrysimple99PERCENTILESpeculative retry options.
additional_write_policysimple99PERCENTILESpeculative retry options.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
memtable_flush_period_in_mssimple0Time (in ms) before Cassandra flushes memtables to disk.
read_repairsimpleBLOCKINGSets read repair behavior (see below)
-

By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ONE, a quorum for QUORUM, and so on. -speculative_retry determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. additional_write_policy specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive):

-

This setting does not affect reads with consistency level ALL because they already query all replicas.

-

Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default 99PERCENTILE.

-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-

The read_repair options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back -in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of -replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. -Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement -is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it -is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a -batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-

The default setting. When read_repair is set to BLOCKING, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table';
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/definitions.html b/src/doc/4.0-alpha1/cql/definitions.html deleted file mode 100644 index cbd2f632b..000000000 --- a/src/doc/4.0-alpha1/cql/definitions.html +++ /dev/null @@ -1,316 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term                 ::=  constant | literal | function_call | arithmetic_operation | type_hint | bind_marker
-literal              ::=  collection_literal | udt_literal | tuple_literal
-function_call        ::=  identifier '(' [ term (',' term)* ] ')'
-arithmetic_operation ::=  '-' term | term ('+' | '-' | '*' | '/' | '%') term
-type_hint            ::=  '(' cql_type `)` term
-bind_marker          ::=  '?' | ':' identifier
-
-

A term is thus one of:

- -
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/dml.html b/src/doc/4.0-alpha1/cql/dml.html deleted file mode 100644 index 6a9a6b34f..000000000 --- a/src/doc/4.0-alpha1/cql/dml.html +++ /dev/null @@ -1,560 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/functions.html b/src/doc/4.0-alpha1/cql/functions.html deleted file mode 100644 index 760c7145f..000000000 --- a/src/doc/4.0-alpha1/cql/functions.html +++ /dev/null @@ -1,705 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-

currentTimeUUID is an alias of now.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Datetime functions

-
-
Retrieving the current date/time
-

The following functions can be used to retrieve the date/time at the time where the function is invoked:

- ---- - - - - - - - - - - - - - - - - - - - -
Function nameOutput type
currentTimestamptimestamp
currentDatedate
currentTimetime
currentTimeUUIDtimeUUID
-

For example the last 2 days of data can be retrieved using:

-
SELECT * FROM myTable WHERE date >= currentDate() - 2d
-
-
-
-
-
Time conversion functions
-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/index.html b/src/doc/4.0-alpha1/cql/index.html deleted file mode 100644 index d9bdc1067..000000000 --- a/src/doc/4.0-alpha1/cql/index.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL.

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/indexes.html b/src/doc/4.0-alpha1/cql/indexes.html deleted file mode 100644 index 8de351256..000000000 --- a/src/doc/4.0-alpha1/cql/indexes.html +++ /dev/null @@ -1,170 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/json.html b/src/doc/4.0-alpha1/cql/json.html deleted file mode 100644 index 619f554cf..000000000 --- a/src/doc/4.0-alpha1/cql/json.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/mvs.html b/src/doc/4.0-alpha1/cql/mvs.html deleted file mode 100644 index acf7e2d18..000000000 --- a/src/doc/4.0-alpha1/cql/mvs.html +++ /dev/null @@ -1,260 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

Note

-

By default, materialized views are built in a single thread. The initial build can be parallelized by -increasing the number of threads specified by the property concurrent_materialized_view_builders in -cassandra.yaml. This property can also be manipulated at runtime through both JMX and the -setconcurrentviewbuilders and getconcurrentviewbuilders nodetool commands.

-
-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-

MV Limitations

-
-

Note

-

Removal of columns not selected in the Materialized View (via UPDATE base SET unselected_column = null or -DELETE unselected_column FROM base) may shadow missed updates to other columns received by hints or repair. -For this reason, we advise against doing deletions on base columns not selected in views until this is -fixed on CASSANDRA-13826.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/operators.html b/src/doc/4.0-alpha1/cql/operators.html deleted file mode 100644 index b642097ab..000000000 --- a/src/doc/4.0-alpha1/cql/operators.html +++ /dev/null @@ -1,300 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Arithmetic Operators" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Arithmetic Operators

-

CQL supports the following operators:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - -
OperatorDescription
- (unary)Negates operand
+Addition
-Substraction
*Multiplication
/Division
%Returns the remainder of a division
-
-

Number Arithmetic

-

All arithmetic operations are supported on numeric types or counters.

-

The return type of the operation will be based on the operand types:

- ------------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
left/righttinyintsmallintintbigintcounterfloatdoublevarintdecimal
tinyinttinyintsmallintintbigintbigintfloatdoublevarintdecimal
smallintsmallintsmallintintbigintbigintfloatdoublevarintdecimal
intintintintbigintbigintfloatdoublevarintdecimal
bigintbigintbigintbigintbigintbigintdoubledoublevarintdecimal
counterbigintbigintbigintbigintbigintdoubledoublevarintdecimal
floatfloatfloatfloatdoubledoublefloatdoubledecimaldecimal
doubledoubledoubledoubledoubledoubledoubledoubledecimaldecimal
varintvarintvarintvarintdecimaldecimaldecimaldecimaldecimaldecimal
decimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimal
-

*, / and % operators have a higher precedence level than + and - operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression.

-
-
-

Datetime Arithmetic

-

A duration can be added (+) or substracted (-) from a timestamp or a date to create a new -timestamp or date. So for instance:

-
SELECT * FROM myTable WHERE t = '2017-01-01' - 2d
-
-
-

will select all the records with a value of t which is in the last 2 days of 2016.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/security.html b/src/doc/4.0-alpha1/cql/security.html deleted file mode 100644 index 5e78523c9..000000000 --- a/src/doc/4.0-alpha1/cql/security.html +++ /dev/null @@ -1,742 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-                          | ACCESS TO DATACENTERS set_literal
-                          | ACCESS TO ALL DATACENTERS
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS;
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ACCESS TO ALL DATACENTERS can be used for -explicitness, but there’s no functional difference.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ACCESS TO ALL DATACENTERS clause.

-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-

Note

-

DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain -connected and will retain the ability to perform any database actions which do not require authorization. -However, if authorization is enabled, permissions of the dropped role are also revoked, -subject to the caching options configured in cassandra.yaml. -Should a dropped role be subsequently recreated and have new permissions or -roles granted to it, any client sessions still connected will acquire the newly granted -permissions and roles.

-
-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-

Because of their function in normal driver operations, certain tables cannot have their SELECT permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:

-
* `system_schema.keyspaces`
-* `system_schema.columns`
-* `system_schema.tables`
-* `system.local`
-* `system.peers`
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/triggers.html b/src/doc/4.0-alpha1/cql/triggers.html deleted file mode 100644 index 3e523392f..000000000 --- a/src/doc/4.0-alpha1/cql/triggers.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/cql/types.html b/src/doc/4.0-alpha1/cql/types.html deleted file mode 100644 index 445f8cb77..000000000 --- a/src/doc/4.0-alpha1/cql/types.html +++ /dev/null @@ -1,699 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 4.0-alpha1, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/data_modeling/index.html b/src/doc/4.0-alpha1/data_modeling/index.html deleted file mode 100644 index 1e0df9f8c..000000000 --- a/src/doc/4.0-alpha1/data_modeling/index.html +++ /dev/null @@ -1,105 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/ci.html b/src/doc/4.0-alpha1/development/ci.html deleted file mode 100644 index bd7cf4aeb..000000000 --- a/src/doc/4.0-alpha1/development/ci.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Jenkins CI Environment" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Jenkins CI Environment

-
-

About CI testing and Apache Cassandra

-

Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the dtest scripts written in Python. As outlined in Testing, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at builds.apache.org, running Jenkins.

-
-
-

Setting up your own Jenkins server

-

Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution.

-

Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment.

-
-

Required plugins

-

The following plugins need to be installed additionally to the standard plugins (git, ant, ..).

-

You can install any missing plugins through the install manager.

-

Go to Manage Jenkins -> Manage Plugins -> Available and install the following plugins and respective dependencies:

-
    -
  • Job DSL
  • -
  • Javadoc Plugin
  • -
  • description setter plugin
  • -
  • Throttle Concurrent Builds Plug-in
  • -
  • Test stability history
  • -
  • Hudson Post build task
  • -
-
-
-

Setup seed job

-

Config New Item

-
    -
  • Name it Cassandra-Job-DSL
  • -
  • Select Freestyle project
  • -
-

Under Source Code Management select Git using the repository: https://github.com/apache/cassandra-builds

-

Under Build, confirm Add build step -> Process Job DSLs and enter at Look on Filesystem: jenkins-dsl/cassandra_job_dsl_seed.groovy

-

Generated jobs will be created based on the Groovy script’s default settings. You may want to override settings by checking This project is parameterized and add String Parameter for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches).

-

When done, confirm “Save”

-

You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message “Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use”. Goto Manage Jenkins -> In-process Script Approval to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates.

-

Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label “cassandra”, once the job is to be run. Please make sure to make any executors available by selecting Build Executor Status -> Configure -> Add “cassandra” as label and save.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/code_style.html b/src/doc/4.0-alpha1/development/code_style.html deleted file mode 100644 index 3856195e8..000000000 --- a/src/doc/4.0-alpha1/development/code_style.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/dependencies.html b/src/doc/4.0-alpha1/development/dependencies.html deleted file mode 100644 index 991c9b7c0..000000000 --- a/src/doc/4.0-alpha1/development/dependencies.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Dependency Management" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Dependency Management

-

Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the Jenkins CI Environment and reported related issues on Jira/ML, in case of any project dependency changes.

-

As Cassandra is an Apache product, all included libraries must follow Apache’s software license requirements.

-
-

Required steps to add or update libraries

-
    -
  • Add or replace jar file in lib directory
  • -
  • Add or update lib/license files
  • -
  • Update dependencies in build.xml
      -
    • Add to parent-pom with correct version
    • -
    • Add to all-pom if simple Cassandra dependency (see below)
    • -
    -
  • -
-
-
-

POM file types

-
    -
  • parent-pom - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here.
  • -
  • build-deps-pom(-sources) + coverage-deps-pom - used by ant build compile target. Listed dependenices will be resolved and copied to build/lib/{jar,sources} by executing the maven-ant-tasks-retrieve-build target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution.
  • -
  • test-deps-pom - refered by maven-ant-tasks-retrieve-test to retrieve and save dependencies to build/test/lib. Exclusively used during JUnit test execution.
  • -
  • all-pom - pom for cassandra-all.jar that can be installed or deployed to public maven repos via ant publish
  • -
  • dist-pom - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ant artifacts. Should be left as is, but needed for installing or deploying releases.
  • -
-
-
-

Troubleshooting and conflict resolution

-

Here are some useful commands that may help you out resolving conflicts.

-
    -
  • ant realclean - gets rid of the build directory, including build artifacts.
  • -
  • mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ant mvn-install.
  • -
  • rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/ - removes cached local Cassandra maven artifacts
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/documentation.html b/src/doc/4.0-alpha1/development/documentation.html deleted file mode 100644 index 0d319adfd..000000000 --- a/src/doc/4.0-alpha1/development/documentation.html +++ /dev/null @@ -1,192 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Working on Documentation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Working on Documentation

-
-

How Cassandra is documented

-

The official Cassandra documentation lives in the project’s git repository. We use a static site generator, Sphinx, to create pages hosted at cassandra.apache.org. You’ll also find developer centric content about Cassandra internals in our retired wiki (not covered by this guide).

-

Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses reStructuredText for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at existing documents to get a better idea how we use reStructuredText to write our documents.

-

So how do you actually start making contributions?

-
-
-

GitHub based work flow

-

Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)

-

Follow these steps to contribute using GitHub. It’s assumed that you’re logged in with an existing account.

-
    -
  1. Fork the GitHub mirror of the Cassandra repository
  2. -
-../_images/docs_fork.png -
    -
  1. Create a new branch that you can use to make your edits. It’s recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work.
  2. -
-../_images/docs_create_branch.png -
    -
  1. Navigate to document sources doc/source to find the .rst file to edit. The URL of the document should correspond to the directory structure. New files can be created using the “Create new file” button:
  2. -
-../_images/docs_create_file.png -
    -
  1. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing .rst files to get a better idea what format elements to use.
  2. -
-../_images/docs_editor.png -

Make sure to preview added content before committing any changes.

-../_images/docs_preview.png -
    -
  1. Commit your work when you’re done. Make sure to add a short description of all your edits since the last time you committed before.
  2. -
-../_images/docs_commit.png -
    -
  1. Finally if you decide that you’re done working on your branch, it’s time to create a pull request!
  2. -
-../_images/docs_pr.png -

Afterwards the GitHub Cassandra mirror will list your pull request and you’re done. Congratulations! Please give us some time to look at your suggested changes before we get back to you.

-
-
-

Jira based work flow

-

Recommended for major changes

-

Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same contribution guides as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed.

-
-
-

Working on documents locally using Sphinx

-

Recommended for advanced editing

-

Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at doc/README.md. Setup is very easy (at least on OSX and Linux).

-
-
-

Notes for committers

-

Please feel free to get involved and merge pull requests created on the GitHub mirror if you’re a committer. As this is a read-only repository, you won’t be able to merge a PR directly on GitHub. You’ll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub.

-

You may use a git work flow like this:

-
git remote add github https://github.com/apache/cassandra.git
-git fetch github pull/<PR-ID>/head:<PR-ID>
-git checkout <PR-ID>
-
-
-

Now either rebase or squash the commit, e.g. for squashing:

-
git reset --soft origin/trunk
-git commit --author <PR Author>
-
-
-

Make sure to add a proper commit message including a “Closes #<PR-ID>” text to automatically close the PR.

-
-

Publishing

-

Details for building and publishing of the site at cassandra.apache.org can be found here.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/gettingstarted.html b/src/doc/4.0-alpha1/development/gettingstarted.html deleted file mode 100644 index 31273e3b4..000000000 --- a/src/doc/4.0-alpha1/development/gettingstarted.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Getting Started" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Getting Started

-
-

Initial Contributions

-
-
Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we’d suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work.
-
    -
  • Add to or update the documentation
  • -
  • Answer questions on the user list
  • -
  • Review and test a submitted patch
  • -
  • Investigate and fix a reported bug
  • -
  • Create unit tests and d-tests
  • -
-
-
-
-
-

Updating documentation

-

The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (Contributing Code Changes).

-
-
-

Answering questions on the user list

-

Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the community page for details on how to subscribe to the mailing list.

-
-
-

Reviewing and testing a submitted patch

-

Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in _development_how_to_review or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, “I tested this performance enhacement on our application’s standard production load test and found a 3% improvement.”)

-
-
-

Investigate and/or fix a reported bug

-

Often, the hardest work in fixing a bug is reproducing it. Even if you don’t have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (Contributing Code Changes).

-
-
-

Create unit tests and Dtests

-

Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See Testing and Contributing Code Changes for more detail.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/how_to_commit.html b/src/doc/4.0-alpha1/development/how_to_commit.html deleted file mode 100644 index 5345546e7..000000000 --- a/src/doc/4.0-alpha1/development/how_to_commit.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

-atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/how_to_review.html b/src/doc/4.0-alpha1/development/how_to_review.html deleted file mode 100644 index ffb014f56..000000000 --- a/src/doc/4.0-alpha1/development/how_to_review.html +++ /dev/null @@ -1,178 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/ide.html b/src/doc/4.0-alpha1/development/ide.html deleted file mode 100644 index 524e16498..000000000 --- a/src/doc/4.0-alpha1/development/ide.html +++ /dev/null @@ -1,267 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-

-
-
-
-
-

Opening Cassandra in Apache NetBeans

-

Apache NetBeans is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans.

-
-

Open Cassandra as a Project (C* 4.0 and newer)

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Start Apache NetBeans
  2. -
  3. Open the NetBeans project from the ide/ folder of the checked out Cassandra directory using the menu item “Open Project…” in NetBeans’ File menu
  4. -
-

The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant build.xml script.

-
-
    -
  • Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu.
  • -
  • Profile Project is available via the Profile menu. In the opened Profiler tab, click the green “Profile” button.
  • -
  • Cassandra’s code style is honored in ide/nbproject/project.properties
  • -
-
-

The JAVA8_HOME system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute.

-
-

-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/index.html b/src/doc/4.0-alpha1/development/index.html deleted file mode 100644 index 1d4d01b2b..000000000 --- a/src/doc/4.0-alpha1/development/index.html +++ /dev/null @@ -1,184 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contributing to Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- - -
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/patches.html b/src/doc/4.0-alpha1/development/patches.html deleted file mode 100644 index 09b5d05ec..000000000 --- a/src/doc/4.0-alpha1/development/patches.html +++ /dev/null @@ -1,273 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue marked as Low Hanging Fruit Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or Slack.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - - - - -
VersionPolicy
4.0Code freeze (see below)
3.11Critical bug fixes only
3.0Critical bug fixes only
2.2Critical bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

4.0 Code Freeze

-

Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance:

-
-
    -
  • Bug fixes
  • -
  • Measurable performance improvements
  • -
  • Changes not distributed as part of the release such as:
  • -
  • Testing related improvements and fixes
  • -
  • Build and infrastructure related changes
  • -
  • Documentation
  • -
-
-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below.

    -
    <One sentence description, usually Jira title and CHANGES.txt summary>
    -<Optional lengthier description>
    -patch by <Authors>; reviewed by <Reviewers> for CASSANDRA-#####
    -
    -
    -
  2. -
  3. When you’re happy with the result, create a patch:

    -
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/release_process.html b/src/doc/4.0-alpha1/development/release_process.html deleted file mode 100644 index 7e9835929..000000000 --- a/src/doc/4.0-alpha1/development/release_process.html +++ /dev/null @@ -1,380 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Release Process" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Release Process

- -
-

-

-
-
-

Attention

-
-
WORK IN PROGRESS
-
    -
  • A number of these steps still have been finalised/tested.
  • -
  • The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org
  • -
-
-
-
-

The steps for Release Managers to create, vote and publish releases for Apache Cassandra.

-

While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release.

-
-

Prerequisites

-
-
Background docs
-
-
-
-

A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools.

-
-

Create and publish your GPG key

-

To create a GPG key, follow the guidelines. -Include your public key in:

-
https://dist.apache.org/repos/dist/release/cassandra/KEYS
-
-
-

Publish your GPG key in a PGP key server, such as MIT Keyserver.

-
-
-
-

Create Release Artifacts

-

Any committer can perform the following steps to create and call a vote on a proposed release.

-

Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there’s security vulnerabilities currently being worked on in private.

-
-

Perform the Release

-

Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh`
-
-# After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout,
-# on the branch/commit that we wish to tag for the tentative release along with version number to tag.
-# For example here <version-branch> might be `3.11` and <version> `3.11.3`
-cd ~/git/cassandra/
-git checkout cassandra-<version-branch>
-../cassandra-builds/cassandra-release/prepare_release.sh -v <version>
-
-
-

If successful, take note of the email text output which can be used in the next section “Call for a Vote”.

-

The prepare_release.sh script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:

-
cd ~/git/cassandra-build
-docker build -f docker/centos7-image.docker docker/
-docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh <version>-tentative
-rpmsign --addsign dist/*.rpm
-
-
-

For more information on the above steps see the cassandra-builds documentation. -The next step is to copy and commit these binaries to staging svnpubsub:

-
# FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org
-cd ~/git
-svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev
-mkdir cassandra-dist-dev/<version>
-cp cassandra-build/dist/*.rpm cassandra-dist-dev/<version>/
-
-svn add cassandra-dist-dev/<version>
-svn ci cassandra-dist-dev/<version>
-
-
-
-
-
-

Call for a Vote

-

Fill out the following email template and send to the dev mailing list:

-
I propose the following artifacts for release as <version>.
-
-sha1: <git-sha>
-
-Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/<version>-tentative
-
-Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/org/apache/cassandra/apache-cassandra/<version>/
-
-Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/
-
-The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/
-
-The vote will be open for 72 hours (longer if needed).
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>-tentative
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>-tentative
-
-
-
-
-

Post-vote operations

-

Any PMC can perform the following steps to formalize and publish a successfully voted release.

-
-

Publish Artifacts

-

Run the following commands to publish the voted release artifacts:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# edit the variables at the top of `finish_release.sh`
-
-# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout,
-# on the tentative release tag that we wish to tag for the final release version number tag.
-cd ~/git/cassandra/
-git checkout <version>-tentative
-../cassandra-builds/cassandra-release/finish_release.sh -v <version> <staging_number>
-
-
-

If successful, take note of the email text output which can be used in the next section “Send Release Announcement”. -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout.

-
-
-

Promote Nexus Repository

-
-
    -
  • Login to Nexus repository again.
  • -
  • Click on “Staging” and then on the repository with id “cassandra-staging”.
  • -
  • Find your closed staging repository, right click on it and choose “Promote”.
  • -
  • Select the “Releases” repository and click “Promote”.
  • -
  • Next click on “Repositories”, select the “Releases” repository and validate that your artifacts exist as you expect them.
  • -
-
-
-
-

Sign and Upload Distribution Packages to Bintray

-

Run the following command:

-
cd ~/git
-# FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org
-svn mv https://dist.apache.org/repos/dist/dev/cassandra/<version> https://dist.apache.org/repos/dist/release/cassandra/
-
-# Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on
-svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat
-cd cassandra-dist-redhat/<abbreviated-version>x/
-createrepo .
-gpg --detach-sign --armor repodata/repomd.xml
-for f in `find repodata/ -name *.bz2`; do
-  gpg --detach-sign --armor $f;
-done
-
-svn co https://dist.apache.org/repos/dist/release/cassandra/<version> cassandra-dist-<version>
-cd cassandra-dist-<version>
-cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist-<version>
-
-
-
-
-

Update and Publish Website

-

See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate.

-
-
-

Release version in JIRA

-

Release the JIRA version.

-
-
    -
  • In JIRA go to the version that you want to release and release it.
  • -
  • Create a new version, if it has not been done before.
  • -
-
-
-
-

Update to Next Development Version

-

Edit and commit build.xml so the base.version property points to the next version.

-
- -
-

Send Release Announcement

-

Fill out the following email template and send to both user and dev mailing lists:

-
The Cassandra team is pleased to announce the release of Apache Cassandra version <version>.
-
-Apache Cassandra is a fully distributed database. It is the right choice
-when you need scalability and high availability without compromising
-performance.
-
- http://cassandra.apache.org/
-
-Downloads of source and binary distributions are listed in our download
-section:
-
- http://cassandra.apache.org/download/
-
-This version is <the first|a bug fix> release[1] on the <version-base> series. As always,
-please pay attention to the release notes[2] and let us know[3] if you
-were to encounter any problem.
-
-Enjoy!
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>
-[3]: https://issues.apache.org/jira/browse/CASSANDRA
-
-
-
-
-

Update Slack Cassandra topic

-
-
Update topic in cassandra Slack room
-
/topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don’t ask to ask
-
-
-
-

Tweet from @Cassandra

-

Tweet the new release, from the @Cassandra account

-
-
-

Delete Old Releases

-

As described in When to Archive. -Also check people.apache.org as previous release scripts used it.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/development/testing.html b/src/doc/4.0-alpha1/development/testing.html deleted file mode 100644 index 32912d932..000000000 --- a/src/doc/4.0-alpha1/development/testing.html +++ /dev/null @@ -1,184 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-

If you see an error like this:

-
Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found:
-org/apache/tools/ant/taskdefs/optional/junit/JUnitTask  using the classloader
-AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar]
-
-
-

You will need to install the ant-optional package since it contains the JUnitTask class.

-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

See Cassandra Stress

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/faq/index.html b/src/doc/4.0-alpha1/faq/index.html deleted file mode 100644 index 322a11ac3..000000000 --- a/src/doc/4.0-alpha1/faq/index.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
- -
-
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair -full to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it. Note that you will need to run a full repair (-full) to make sure that already repaired -sstables are not skipped.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/genindex.html b/src/doc/4.0-alpha1/genindex.html deleted file mode 100644 index ca6a21665..000000000 --- a/src/doc/4.0-alpha1/genindex.html +++ /dev/null @@ -1,94 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/getting_started/configuring.html b/src/doc/4.0-alpha1/getting_started/configuring.html deleted file mode 100644 index 493474ce1..000000000 --- a/src/doc/4.0-alpha1/getting_started/configuring.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the default configuration file present at ./conf/cassandra.yaml is enough, -you shouldn’t need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/getting_started/drivers.html b/src/doc/4.0-alpha1/getting_started/drivers.html deleted file mode 100644 index 28706aa4b..000000000 --- a/src/doc/4.0-alpha1/getting_started/drivers.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
- -
-

Elixir

- -
-
-

Dart

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/getting_started/index.html b/src/doc/4.0-alpha1/getting_started/index.html deleted file mode 100644 index cf402d61a..000000000 --- a/src/doc/4.0-alpha1/getting_started/index.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/getting_started/installing.html b/src/doc/4.0-alpha1/getting_started/installing.html deleted file mode 100644 index 42a1c94f3..000000000 --- a/src/doc/4.0-alpha1/getting_started/installing.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xzvf apache-cassandra-3.6-bin.tar.gz
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/getting_started/querying.html b/src/doc/4.0-alpha1/getting_started/querying.html deleted file mode 100644 index f52cf735f..000000000 --- a/src/doc/4.0-alpha1/getting_started/querying.html +++ /dev/null @@ -1,145 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/index.html b/src/doc/4.0-alpha1/index.html deleted file mode 100644 index ac1f77b1b..000000000 --- a/src/doc/4.0-alpha1/index.html +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v4.0-alpha1

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- -
-
-
- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/4.0-alpha1/objects.inv b/src/doc/4.0-alpha1/objects.inv deleted file mode 100644 index 489d9416c..000000000 Binary files a/src/doc/4.0-alpha1/objects.inv and /dev/null differ diff --git a/src/doc/4.0-alpha1/operating/audit_logging.html b/src/doc/4.0-alpha1/operating/audit_logging.html deleted file mode 100644 index 4d38ea9a6..000000000 --- a/src/doc/4.0-alpha1/operating/audit_logging.html +++ /dev/null @@ -1,280 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Audit Logging" -doc-header-links: ' - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml.

-
    -
  • BinAuditLogger An efficient way to log events to file in a binary format.
  • -
  • FileAuditLogger Logs events to audit/audit.log file using slf4j logger.
  • -
-

Recommendation BinAuditLogger is a community recommended logger considering the performance

-
-

What does it capture

-

Audit logging captures following events

-
    -
  • Successful as well as unsuccessful login attempts.
  • -
  • All database commands executed via Native protocol (CQL) attempted or successfully executed.
  • -
-
-
-

Limitations

-

Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log.

-
-
-

What does it log

-

Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with | s to yield the final message.

-
-
    -
  • user: User name(if available)
  • -
  • host: Host IP, where the command is being executed
  • -
  • source ip address: Source IP address from where the request initiated
  • -
  • source port: Source port number from where the request initiated
  • -
  • timestamp: unix time stamp
  • -
  • type: Type of the request (SELECT, INSERT, etc.,)
  • -
  • category - Category of the request (DDL, DML, etc.,)
  • -
  • keyspace - Keyspace(If applicable) on which request is targeted to be executed
  • -
  • scope - Table/Aggregate name/ function name/ trigger name etc., as applicable
  • -
  • operation - CQL command being executed
  • -
-
-
-
-

How to configure

-

Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using nodetool.

-
-

cassandra.yaml configurations for AuditLog

-
-
    -
  • enabled: This option enables/ disables audit log
  • -
  • logger: Class name of the logger/ custom logger.
  • -
  • audit_logs_dir: Auditlogs directory location, if not set, default to cassandra.logdir.audit or cassandra.logdir + /audit/
  • -
  • included_keyspaces: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces
  • -
  • excluded_keyspaces: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except system, system_schema and system_virtual_schema
  • -
  • included_categories: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories
  • -
  • excluded_categories: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category
  • -
  • included_users: Comma separated list of users to be included in audit log, default - includes all users
  • -
  • excluded_users: Comma separated list of users to be excluded from audit log, default - excludes no user
  • -
-
-

List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE

-
-
-

NodeTool command to enable AuditLog

-

enableauditlog: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command.

-
nodetool enableauditlog
-
-
-
-

Options

-
-
--excluded-categories
-
Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
-
--excluded-keyspaces
-
Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used. -Please remeber that system, system_schema and system_virtual_schema are excluded by default, -if you are overwriting this option via nodetool, -remember to add these keyspaces back if you dont want them in audit logs
-
--excluded-users
-
Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
-
--included-categories
-
Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
-
--included-keyspaces
-
Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
-
--included-users
-
Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
-
--logger
-
Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
-
-
-
-
-

NodeTool command to disable AuditLog

-

disableauditlog: Disables AuditLog.

-
nodetool disableuditlog
-
-
-
-
-

NodeTool command to reload AuditLog filters

-

enableauditlog: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous loggername and updated filters

-

E.g.,

-
nodetool enableauditlog --loggername <Default/ existing loggerName> --included-keyspaces <New Filter values>
-
-
-
-
-
-

View the contents of AuditLog Files

-

auditlogviewer is the new tool introduced to help view the contents of binlog file in human readable text format.

-
auditlogviewer <path1> [<path2>...<pathN>] [options]
-
-
-
-

Options

-
-
-f,--follow
-
-
Upon reacahing the end of the log continue indefinitely
-
waiting for more records
-
-
-
-r,--roll_cycle
-
-
How often to roll the log file was rolled. May be
-
necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, -DAILY). Default HOURLY.
-
-
-
-h,--help
-
display this help message
-
-

For example, to dump the contents of audit log files on the console

-
auditlogviewer /logs/cassandra/audit
-
-
-
-
-

Sample output

-
LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1"
-
-
-
-
-
-

Configuring BinAuditLogger

-

To use BinAuditLogger as a logger in AuditLogging, set the logger to BinAuditLogger in cassandra.yaml under audit_logging_options section. BinAuditLogger can be futher configued using its advanced options in cassandra.yaml.

-
-

Adcanced Options for BinAuditLogger

-
-
block
-
Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to true so that AuditLog records wont be lost
-
max_queue_weight
-
Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to 256 * 1024 * 1024
-
max_log_size
-
Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to 16L * 1024L * 1024L * 1024L
-
roll_cycle
-
How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to "HOURLY"
-
-
-
-
-

Configuring FileAuditLogger

-

To use FileAuditLogger as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log

-
<!-- Audit Logging (FileAuditLogger) rolling file appender to audit.log -->
-<appender name="AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
-  <file>${cassandra.logdir}/audit/audit.log</file>
-  <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
-    <!-- rollover daily -->
-    <fileNamePattern>${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
-    <!-- each file should be at most 50MB, keep 30 days worth of history, but at most 5GB -->
-    <maxFileSize>50MB</maxFileSize>
-    <maxHistory>30</maxHistory>
-    <totalSizeCap>5GB</totalSizeCap>
-  </rollingPolicy>
-  <encoder>
-    <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-  </encoder>
-</appender>
-
-<!-- Audit Logging additivity to redirect audt logging events to audit/audit.log -->
-<logger name="org.apache.cassandra.audit" additivity="false" level="INFO">
-        <appender-ref ref="AUDIT"/>
-</logger>
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/backups.html b/src/doc/4.0-alpha1/operating/backups.html deleted file mode 100644 index ed84955f0..000000000 --- a/src/doc/4.0-alpha1/operating/backups.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/bloom_filters.html b/src/doc/4.0-alpha1/operating/bloom_filters.html deleted file mode 100644 index d647fb92c..000000000 --- a/src/doc/4.0-alpha1/operating/bloom_filters.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/bulk_loading.html b/src/doc/4.0-alpha1/operating/bulk_loading.html deleted file mode 100644 index f79183a7f..000000000 --- a/src/doc/4.0-alpha1/operating/bulk_loading.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/cdc.html b/src/doc/4.0-alpha1/operating/cdc.html deleted file mode 100644 index b6275d6dc..000000000 --- a/src/doc/4.0-alpha1/operating/cdc.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property cdc=true (either when creating the table or -altering it). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in cassandra.yaml. On segment fsync to disk, if CDC data is present anywhere in the segment a -<segment_name>_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word “COMPLETED” will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file.

-

We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable.

-

A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory.

-
-
-

Configuration

-
-

Enabling or disabling CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

Use a CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

If CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/compaction.html b/src/doc/4.0-alpha1/operating/compaction.html deleted file mode 100644 index e00a4cf48..000000000 --- a/src/doc/4.0-alpha1/operating/compaction.html +++ /dev/null @@ -1,520 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy). With TimeWindowCompactionStrategy -it is possible to remove the guarantee (not check for shadowing data) by enabling unsafe_aggressive_sstable_expiration.

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
unsafe_aggressive_sstable_expiration (default: false)
-
Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially -risky option that can lead to data loss or deleted data re-appearing, going beyond what -unchecked_tombstone_compaction does for single sstable compaction. Due to the risk the jvm must also be -started with -Dcassandra.unsafe_aggressive_sstable_expiration=true.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled).

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/compression.html b/src/doc/4.0-alpha1/operating/compression.html deleted file mode 100644 index 89ba3313a..000000000 --- a/src/doc/4.0-alpha1/operating/compression.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides four classes (LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
  • compression_level is only applicable for ZstdCompressor and accepts values between -131072 and 2.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/hardware.html b/src/doc/4.0-alpha1/operating/hardware.html deleted file mode 100644 index 56157f9a0..000000000 --- a/src/doc/4.0-alpha1/operating/hardware.html +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/hints.html b/src/doc/4.0-alpha1/operating/hints.html deleted file mode 100644 index 4ca83a3be..000000000 --- a/src/doc/4.0-alpha1/operating/hints.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/index.html b/src/doc/4.0-alpha1/operating/index.html deleted file mode 100644 index f21ead791..000000000 --- a/src/doc/4.0-alpha1/operating/index.html +++ /dev/null @@ -1,224 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/metrics.html b/src/doc/4.0-alpha1/operating/metrics.html deleted file mode 100644 index 58a550b19..000000000 --- a/src/doc/4.0-alpha1/operating/metrics.html +++ /dev/null @@ -1,1800 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorWriteLatencyTimerCoordinator write latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
BytesRepairedGauge<Long>Size of table data repaired on disk
BytesUnrepairedGauge<Long>Size of table data unrepaired on disk
BytesPendingRepairGauge<Long>Size of table data isolated for an ongoing incremental repair
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
SpeculativeFailedRetriesCounterNumber of speculative retries that failed to prevent a timeout
SpeculativeInsufficientReplicasCounterNumber of speculative retries that couldn’t be attempted due to lack of replicas
SpeculativeSampleLatencyNanosGauge<Long>Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
AnticompactionTimeTimerTime spent anticompacting before a consistent repair.
ValidationTimeTimerTime spent doing validation compaction during repair.
SyncTimeTimerTime spent doing streaming during repair.
BytesValidatedHistogramHistogram over the amount of bytes read during validation.
PartitionsValidatedHistogramHistogram over the number of partitions read during validation.
BytesAnticompactedCounterHow many bytes we anticompacted.
BytesMutatedAnticompactionCounterHow many bytes we avoided anticompacting because the sstable was fully contained in the repaired range.
MutatedAnticompactionGaugeGauge<Double>Ratio of bytes mutated vs total bytes repaired.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

Most of these metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
WriteFailedIdeaCLCounterNumber of writes that failed to achieve the configured ideal consistency level or 0 if none is configured
IdealCLWriteLatencyLatencyCoordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured
RepairTimeTimerTotal time spent as repair coordinator.
RepairPrepareTimeTimerTotal time spent preparing for repair.
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools path=<Path> scope=<ThreadPoolName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
MaxTasksQueuedGauge<Integer>The maximum number of tasks queued before a task get blocked.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
ViewBuildExecutorinternalPerforms materialized views initial build
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessage.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMessage scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

HintsService Metrics

-

Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintsService.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintsService name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
HintsSucceededMeterA meter of the hints successfully delivered
HintsFailedMeterA meter of the hints that failed deliver
HintsTimedOutMeterA meter of the hints that timed out
Hint_delaysHistogramHistogram of hint delivery delays (in milliseconds)
Hint_delays-<PeerIP>HistogramHistogram of hint delivery delays (in milliseconds) per peer
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsGauge<Integer>Number of clients connected to this nodes native protocol server
connectionsGauge<List<Map<String, String>>List of all connections and their state information
connectedNativeClientsByUserGauge<Map<String, Int>Number of connnective native clients by username
-
-
-

Batch Metrics

-

Metrics specifc to batch statements.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Batch.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Batch name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PartitionsPerCounterBatchHistogramDistribution of the number of partitions processed per counter batch
PartitionsPerLoggedBatchHistogramDistribution of the number of partitions processed per logged batch
PartitionsPerUnloggedBatchHistogramDistribution of the number of partitions processed per unlogged batch
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/read_repair.html b/src/doc/4.0-alpha1/operating/read_repair.html deleted file mode 100644 index 5387f0a5b..000000000 --- a/src/doc/4.0-alpha1/operating/read_repair.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/repair.html b/src/doc/4.0-alpha1/operating/repair.html deleted file mode 100644 index 6c678807e..000000000 --- a/src/doc/4.0-alpha1/operating/repair.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Repair

-

Cassandra is designed to remain available if one of it’s nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren’t guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire.

-

These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes.

-
-

Incremental and Full Repairs

-

There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that’s been written since the previous incremental repair.

-

Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it’s important to understand that once an incremental repair marks data as repaired, it won’t -try to repair it again. This is fine for syncing up missed writes, but it doesn’t protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally.

-
-
-

Usage and Best Practices

-

Since repair can result in a lot of disk and network io, it’s not run automatically by Cassandra. It is run by the operator -via nodetool.

-

Incremental repair is the default and is run with the following command:

-
nodetool repair
-
-
-

A full repair can be run with the following command:

-
nodetool repair --full
-
-
-

Additionally, repair can be run on a single keyspace:

-
nodetool repair [options] <keyspace_name>
-
-
-

Or even on specific tables:

-
nodetool repair [options] <keyspace_name> <table1> <table2>
-
-
-

The repair command only repairs token ranges on the node being repaired, it doesn’t repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you’re running repair on, which will cause duplicate work if you run it -on every node. The -pr flag will only repair the “primary” ranges on a node, so you can repair your entire cluster by running -nodetool repair -pr on each node in a single datacenter.

-

The specific frequency of repair that’s right for your cluster, of course, depends on several factors. However, if you’re -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don’t want to run incremental repairs, a full repair every 5 days is a good place -to start.

-

At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays.

-
-
-

Other Options

-
-
-pr, --partitioner-range
-
Restricts repair to the ‘primary’ token ranges of the node being repaired. A primary range is just a token range for -which a node is the first replica in the ring.
-
-prv, --preview
-
Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints -the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, -add the --full flag to estimate a full repair.
-
-vd, --validate
-
Verifies that the repaired data is the same across all nodes. Similiar to --preview, this builds and compares merkle -trees of repaired data, but doesn’t do any streaming. This is useful for troubleshooting. If this shows that the repaired -data is out of sync, a full repair should be run.
-
-
-

See also

-

nodetool repair docs

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/security.html b/src/doc/4.0-alpha1/operating/security.html deleted file mode 100644 index 55efe265c..000000000 --- a/src/doc/4.0-alpha1/operating/security.html +++ /dev/null @@ -1,473 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-

By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still:

-
    -
  • Craft internode messages to insert users into authentication schema
  • -
  • Craft internode messages to truncate or drop schema
  • -
  • Use tools such as sstableloader to overwrite system_auth tables
  • -
  • Attach to the cluster directly to capture write traffic
  • -
-

Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra’s -security features is crucial to configuring your cluster to meet your security needs.

-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-
-

SSL Certificate Hot Reloading

-

Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes.

-

Certificate Hot reloading may also be triggered using the nodetool reloadssl command. Use this if you want to Cassandra to -immediately notice the changed certificates.

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/snitch.html b/src/doc/4.0-alpha1/operating/snitch.html deleted file mode 100644 index e8178721d..000000000 --- a/src/doc/4.0-alpha1/operating/snitch.html +++ /dev/null @@ -1,177 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero, this will allow ‘pinning’ of replicas to hosts -in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/operating/topo_changes.html b/src/doc/4.0-alpha1/operating/topo_changes.html deleted file mode 100644 index dbf39c1c2..000000000 --- a/src/doc/4.0-alpha1/operating/topo_changes.html +++ /dev/null @@ -1,221 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in nodetool netstats.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344)

-

Once the bootstrapping is complete the node will be marked “UP”.

-
-

Note

-

If any of the following cases apply, you MUST run repair to make the replaced node consistent again, since -it missed ongoing writes during/prior to bootstrapping. The replacement timeframe refers to the period from when the -node initially dies to when a new node completes the replacement process.

-
    -
  1. The node is down for longer than max_hint_window_in_ms before being replaced.
  2. -
  3. You are replacing using the same IP address as the dead node and replacement takes longer than max_hint_window_in_ms.
  4. -
-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/plugins/index.html b/src/doc/4.0-alpha1/plugins/index.html deleted file mode 100644 index 5a31da591..000000000 --- a/src/doc/4.0-alpha1/plugins/index.html +++ /dev/null @@ -1,116 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Third-Party Plugins" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Third-Party Plugins

-

Available third-party plugins for Apache Cassandra

-
-

CAPI-Rowcache

-

The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments.

-

The official page for the CAPI-Rowcache plugin contains further details how to build/run/download the plugin.

-
-
-

Stratio’s Cassandra Lucene Index

-

Stratio’s Lucene index is a Cassandra secondary index implementation based on Apache Lucene. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or Apache Solr, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed.

-

The official Github repository Cassandra Lucene Index contains everything you need to build/run/configure the plugin.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/search.html b/src/doc/4.0-alpha1/search.html deleted file mode 100644 index 2386872b2..000000000 --- a/src/doc/4.0-alpha1/search.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/searchindex.js b/src/doc/4.0-alpha1/searchindex.js deleted file mode 100644 index 0f0bd0d1e..000000000 --- a/src/doc/4.0-alpha1/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/operators","cql/security","cql/triggers","cql/types","data_modeling/index","development/ci","development/code_style","development/dependencies","development/documentation","development/gettingstarted","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/release_process","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/audit_logging","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","plugins/index","tools/cassandra_stress","tools/cqlsh","tools/index","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","tools/sstable/index","tools/sstable/sstabledump","tools/sstable/sstableexpiredblockers","tools/sstable/sstablelevelreset","tools/sstable/sstableloader","tools/sstable/sstablemetadata","tools/sstable/sstableofflinerelevel","tools/sstable/sstablerepairedset","tools/sstable/sstablescrub","tools/sstable/sstablesplit","tools/sstable/sstableupgrade","tools/sstable/sstableutil","tools/sstable/sstableverify","troubleshooting/finding_nodes","troubleshooting/index","troubleshooting/reading_logs","troubleshooting/use_nodetool","troubleshooting/use_tools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/operators.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/ci.rst","development/code_style.rst","development/dependencies.rst","development/documentation.rst","development/gettingstarted.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/release_process.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/audit_logging.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","plugins/index.rst","tools/cassandra_stress.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","tools/sstable/index.rst","tools/sstable/sstabledump.rst","tools/sstable/sstableexpiredblockers.rst","tools/sstable/sstablelevelreset.rst","tools/sstable/sstableloader.rst","tools/sstable/sstablemetadata.rst","tools/sstable/sstableofflinerelevel.rst","tools/sstable/sstablerepairedset.rst","tools/sstable/sstablescrub.rst","tools/sstable/sstablesplit.rst","tools/sstable/sstableupgrade.rst","tools/sstable/sstableutil.rst","tools/sstable/sstableverify.rst","troubleshooting/finding_nodes.rst","troubleshooting/index.rst","troubleshooting/reading_logs.rst","troubleshooting/use_nodetool.rst","troubleshooting/use_tools.rst"],objects:{},objnames:{},objtypes:{},terms:{"000kib":186,"00t89":22,"011mib":186,"018kib":186,"01t02":197,"021kib":186,"028809z":183,"031mib":186,"03t04":22,"054mib":186,"055z":183,"056kib":186,"061kib":186,"062mib":186,"063kib":186,"064kib":186,"0665ae80b2d711e886c66d2c86545d91":184,"06t22":197,"077mib":186,"078kib":186,"081kib":186,"082kib":186,"090kib":186,"092mib":186,"096gib":194,"0974e5a0aa5811e8a0a06d2c86545d91":186,"099kib":186,"0ee8b91fdd0":198,"0h00m04":198,"0x0000000000000000":199,"0x0000000000000003":14,"0x00000004":13,"0x00007f829c001000":199,"0x00007f82d0856000":199,"0x00007f82e800e000":199,"0x00007f82e80cc000":199,"0x00007f82e80d7000":199,"0x00007f82e84d0800":199,"0x2a19":199,"0x2a29":199,"0x2a2a":199,"0x2a2c":199,"0x3a74":199,"100b":60,"100k":60,"100mb":6,"1024l":43,"105kib":186,"10mb":6,"10s":[61,199],"10x":[6,48],"115mib":186,"11e6":61,"11e8":198,"122kib":186,"128kb":199,"128mib":6,"128th":4,"12gb":50,"12h30m":22,"130mib":186,"142mib":190,"147mib":186,"14t00":197,"150kib":186,"155kib":186,"15m":53,"160mb":48,"162kib":186,"165kib":186,"167kb":199,"16l":43,"16mb":[36,48],"16th":6,"173kib":186,"176kib":186,"17t06":197,"184kb":199,"19t03":[147,190],"1mo":22,"1st":22,"200m":[197,199],"203mib":186,"2062b290":198,"20m":199,"20t20":183,"217kb":199,"217mib":186,"22z":183,"232mib":186,"23t06":197,"23z":183,"244m":199,"245mib":186,"247mib":186,"24h":22,"25005289beb2":183,"250m":6,"251m":199,"253mib":186,"256mb":6,"256th":6,"258mib":186,"25mb":199,"265kib":186,"270mib":186,"27t04":197,"280mib":186,"28t17":197,"295kib":186,"299kib":186,"29d":22,"29t00":197,"2cc0":198,"2e10":10,"2gb":50,"2nd":[6,11,57],"2xlarg":50,"300mib":186,"300s":6,"307kib":186,"30kb":199,"30s":6,"30t23":197,"314kib":186,"322kib":186,"325kib":186,"327e":61,"32gb":50,"32mb":[6,36],"331mib":186,"333kib":186,"33m":197,"348mib":186,"353mib":194,"3578d7de":183,"35ea8c9f":198,"361kib":186,"366b":199,"36x":40,"370mib":186,"378711z":183,"383b":199,"384z":183,"385b":199,"386kib":186,"387mib":186,"388mib":186,"392kib":186,"392mib":186,"394kib":186,"3f22a07b2bc6":183,"3ff3e5109f22":13,"3gb":[49,199],"3ms":199,"3rd":[6,53,57],"401mib":186,"406mib":186,"40a7":198,"40f3":13,"40fa":198,"40s":199,"410kib":186,"412kib":186,"416mib":194,"41b52700b4ed11e896476d2c86545d91":187,"423b":199,"423kib":186,"4248dc9d790e":183,"431kib":186,"43kb":199,"440kib":186,"443kib":186,"449mib":186,"452kib":186,"457mib":186,"458mib":186,"461mib":186,"465kib":186,"46e9":198,"476mib":186,"481mib":186,"482mib":190,"48d6":183,"4ae3":13,"4d40":183,"4f3438394e39374d3730":187,"4f58":198,"4kb":11,"4mib":6,"4xlarg":50,"500m":199,"501mib":186,"50kb":[6,199],"50m":199,"50mb":[6,43,48,191],"50th":195,"512mb":6,"512mib":6,"513kib":186,"521kib":186,"524kib":186,"536kib":186,"543mib":186,"545kib":186,"54kb":199,"550mib":186,"5573e5b09f14":13,"559kib":186,"561mib":186,"563kib":186,"563mib":186,"56m":197,"571kib":186,"576kb":199,"5850e9f0a63711e8a5c5091830ac5256":192,"591mib":186,"592kib":186,"5gb":43,"5kb":6,"5level":43,"5mb":48,"603kib":186,"606mib":186,"61111111111111e":187,"613mib":186,"619kib":186,"61de":198,"635kib":186,"6365332094dd11e88f324f9c503e4753":[185,188,190,191,193,194],"638mib":186,"640kib":186,"646mib":186,"64k":6,"64kb":49,"650b":199,"65c429e08c5a11e8939edf4f403979ef":[183,185],"65kb":199,"663kib":186,"665kib":186,"669kb":199,"684mib":186,"688kib":186,"690mib":186,"6e630115fd75":198,"6gb":198,"6ms":6,"6tb":50,"701mib":186,"715b":199,"718mib":186,"71b0a49":197,"725mib":186,"730kib":186,"732mib":186,"734mib":186,"736kb":199,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":29,"737mib":186,"738mib":186,"743kib":186,"744mib":186,"751mib":186,"752e278f":198,"75th":53,"771mib":186,"775mib":194,"780mib":186,"782kib":186,"783522z":183,"789z":183,"791mib":186,"793kib":186,"798mib":186,"79kb":199,"7f3a":198,"802kib":186,"812mib":186,"813kib":186,"814kib":186,"832mib":186,"835kib":186,"840kib":186,"843mib":186,"845b":199,"846kib":186,"848kib":186,"84fc":183,"861mib":186,"86400s":48,"869kb":199,"872kib":186,"877mib":186,"880mib":186,"882kib":186,"889mib":186,"892kib":186,"894mib":186,"89h4m48":22,"8gb":[50,199],"8th":[6,47],"903mib":186,"90th":53,"911kib":186,"920kib":186,"920mib":186,"9328455af73f":198,"938kib":186,"954kib":186,"957mib":186,"95ac6470":61,"95th":53,"965kib":186,"9695b790a63211e8a6fb091830ac5256":192,"974b":198,"975kib":186,"983kib":186,"98th":53,"993mib":186,"996kib":186,"99percentil":11,"99th":[53,195],"9dc1a293":198,"9e6054da04a7":198,"9gb":199,"9th":53,"\u00eatre":9,"abstract":[25,30],"boolean":[9,12,14,17,20,22,61],"break":[11,33,48,192,196,199],"byte":[4,6,9,13,22,53,70,88,124,174,186,198],"case":[4,6,10,11,12,13,14,16,17,18,22,26,29,30,33,35,36,45,50,56,58,60,61,186,197,198,199],"catch":[25,188],"class":[6,11,14,22,25,31,35,43,48,49,52,56,60,125,137,158,186,197],"default":[4,6,10,11,13,14,17,18,20,22,24,31,35,36,37,40,43,45,47,48,49,53,55,56,58,60,61,65,84,88,95,124,125,127,130,140,141,147,162,164,175,183,186,187,191,195,197,198,199],"enum":9,"export":[31,53,61,199],"final":[14,20,25,27,31,34,43,47,48,50,56,62,141,182,199],"float":[9,10,11,12,14,17,19,22,45,49],"function":[6,9,10,11,12,15,16,18,20,22,30,38,42,43,56,57,59,61,182],"goto":24,"import":[11,14,22,31,32,35,37,48,50,53,55,61,125,195,198,199],"int":[4,9,10,11,13,14,17,18,19,20,22,35,47,49,53],"long":[4,6,13,22,29,30,36,48,53,60,189,190,197,199],"new":[0,4,6,10,11,14,16,17,18,19,20,21,22,24,25,27,28,30,31,33,34,35,39,42,43,45,48,50,56,58,60,116,123,125,185,186,188,190,193,195],"null":[9,10,12,13,14,17,18,22,25,61],"public":[6,14,25,26,34,35,36,40,56,57],"return":[6,9,11,13,14,16,17,18,19,20,22,30,60,140,184,185,199],"short":[4,22,27],"static":[6,9,10,11,18,27,53,57,187],"super":[4,56,59,60],"switch":[4,6,10,20,31,36,52,53,56,57],"throw":[6,14,25,35,195],"transient":[6,11],"true":[6,11,12,17,20,22,31,36,43,47,48,56,58,61,122,125,192],"try":[6,11,25,26,31,33,36,43,48,55,140,186,198,199],"var":[4,6,25,40,183,184,185,186,187,188,189,190,191,192,193,194,197,199],"void":35,"while":[4,6,10,11,12,13,22,29,33,34,45,48,49,50,55,56,61,186,195,197,198,199],AES:6,AND:[9,11,13,14,18,20,56,60,61,197],AWS:50,Added:10,Adding:[6,11,20,22,36,42,52,56],And:[11,14,20,56],Are:30,Ave:22,BUT:25,But:[13,20,22,24,25,33,36,61],CAS:[6,198],CCS:199,CFs:[140,147],CLS:61,CMS:199,DCs:6,DNS:36,Doing:[10,62,182],EBS:50,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,21,22,28,33,34,35,36,37,40,41,43,48,50,55,56,57,60,61,186,187,188,191,195,197,198,199],GCs:6,HDs:199,Has:30,IDE:[28,32,42],IDEs:[31,32],IDs:[125,165],INTO:[6,9,11,13,14,17,22],IPs:[6,57,146,165],Ids:171,JKS:6,JPS:199,KBs:6,LCS:[11,187],NFS:50,NOT:[6,9,10,11,13,14,16,18,20,21,22],Not:[13,20,33,48,49],ONE:[0,6,11,53,60,61],One:[33,35,36,48,199],PFS:6,Pis:50,QPS:195,Such:22,THE:6,TLS:[6,52,186],That:[0,11,12,18,22,33,36,48,61,199],The:[0,4,6,8,9,10,11,12,14,16,18,19,20,21,22,24,25,27,28,29,31,33,34,35,36,37,40,41,42,45,47,49,50,53,55,56,57,58,59,60,61,65,68,73,75,81,85,91,94,95,98,103,107,109,111,116,123,125,127,131,132,138,140,147,150,151,158,164,165,166,173,175,178,179,181,185,186,187,188,190,191,192,193,196,197,198,199],Their:22,Then:[13,35,36,40,48,56,188,192,199],There:[6,10,11,12,13,14,22,31,33,35,36,48,53,55,56,60,189,191,195,198,199],These:[4,6,11,14,31,53,55,56,60,61,193,195,196,197,198,199],USE:[9,14,15,43],USING:[9,13,16,21,22,48],Use:[6,11,13,20,36,41,42,47,52,56,60,61,62,63,68,125,130,140,171,178,182,183,188,189,190,193,196],Used:[53,199],Useful:199,Uses:[6,17,52,57],Using:[11,13,27,35,36,56,62,182,183,186,190,193,197],WILL:6,WITH:[9,11,12,16,18,20,45,47,48,49,56,60,61],Will:[6,42,88,125,158,188],With:[6,13,17,36,48,55,58,64,197,199],Yes:36,_cache_max_entri:56,_cdc:47,_development_how_to_review:28,_if_:6,_must_:6,_only_:197,_trace:[53,198],_udt:14,_update_interval_in_m:56,_use:14,_validity_in_m:56,_x86_64_:199,a278b781fe4b2bda:40,a6fd:198,abbrevi:34,abil:[14,20,36,49],abilityid:16,abl:[0,6,14,22,24,27,31,35,36,48,56,195,196],abort:24,about:[4,6,20,27,31,32,33,35,36,45,48,57,61,67,125,146,187,197,198,199],abov:[4,6,8,11,12,13,14,22,26,31,33,34,36,48,53,60,62,182,186,193,199],absenc:12,abstracttyp:22,ac79:198,acceler:59,accept:[0,6,10,11,12,13,17,33,35,45,49,58,83,125],access:[6,10,20,22,31,33,43,50,52,53,59,186,187,195,196,199],accident:185,accompani:6,accord:[4,6,36],accordingli:[6,14,36],account:[6,22,27,34,35,199],accru:[48,53],accumul:[6,48,53],accur:[6,36,45,58,146,187],accuraci:[45,127,175],acheiv:56,achiev:[6,48,53],achil:38,ack:[4,6],acoount:53,acquir:[20,53],across:[6,11,20,33,53,55,56,57,60,125,129,187,194],act:197,action:[6,13,20,31,194,199],activ:[4,6,28,33,47,53,55,61,125,127,175,195,197,198,199],activetask:53,actor:56,actual:[4,6,13,21,25,27,30,34,36,40,43,48,55,57,60,140,190,199],acycl:20,add:[0,6,9,10,11,22,24,27,28,29,30,32,33,34,37,40,42,43,48,55,56,60,188,193,197],addamsfamili:11,added:[0,4,6,10,11,14,19,27,30,47,48,191],adding:[6,13,14,30,50,61,188,194],addit:[0,6,9,11,13,19,20,22,31,33,37,43,48,50,53,56,61,197,199],addition:[11,13,24,48,55,60,197],additional_write_polici:11,address:[6,8,17,22,24,31,33,37,42,43,53,57,58,59,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,198,199],addsign:34,adher:10,adjac:48,adjust:[6,45],adler32:4,adv:40,advanc:[6,27,43,52,56,196],advantag:50,advers:[36,198],advic:[33,36],advis:[6,12,18,22,36],aefb:183,af08:13,afd:22,affect:[11,30,33,36,48,147,190,195,199],afford:6,after:[5,6,10,11,12,13,14,16,17,18,31,33,34,36,47,48,50,52,53,56,57,61,188,189,192],afterward:[24,27,31,35],afunct:14,again:[6,33,34,48,55,58,61,189,192],against:[6,11,14,18,27,33,35,36,50,55,58,60,61,140,187,199],agent:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,199],agentlib:31,aggreg:[6,9,10,13,15,18,20,43,53,61],aggress:195,ago:189,aid:12,aim:[6,197],akeyspac:14,alg:186,algorithm:[6,11,58,186,197],alia:[10,13,14,38],alias:[6,10,18],alic:20,align:25,aliv:6,all:[0,4,6,9,11,12,13,14,17,18,19,22,24,25,26,27,29,30,31,33,35,42,43,45,47,48,53,55,56,58,60,61,62,65,66,67,83,95,100,116,117,122,125,127,129,138,141,147,162,164,166,175,177,178,179,182,184,188,190,194,195,197,198,199],allmemtableslivedatas:53,allmemtablesoffheaps:53,allmemtablesonheaps:53,alloc:[6,36,47,50,53],allocate_tokens_for_keyspac:58,allow:[0,4,6,9,10,11,12,14,16,17,18,22,24,27,28,37,45,47,48,49,50,55,57,60,190,198,199],allowallauthent:[6,56],allowallauthor:[6,56],allowallinternodeauthent:6,allowallnetworkauthor:6,almost:[4,6,14,22,48,195,199],alon:25,along:[6,13,28,34,43,122,125,197],alongsid:[41,61],alpha1:[11,22,42],alphabet:25,alphanumer:[11,20],alreadi:[6,11,14,16,18,22,33,36,48,56,60,62,178,182,191],also:[0,4,6,10,11,12,13,14,17,18,20,22,24,27,31,33,34,35,36,37,43,47,48,50,53,56,58,61,95,179,192,193,197,198,199],alter:[0,9,10,15,17,36,45,47,48,49,56],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:22,alter_type_stat:[12,22],alter_user_stat:12,altern:[10,11,12,13,17,22,31,33,37,50,56,186],although:[6,33,60,197,199],alwai:[0,4,6,9,10,11,13,14,18,22,25,27,33,34,35,36,48,50,60,195,199],amend:29,amongst:11,amount:[6,11,13,22,31,33,35,36,48,49,50,53,55,58,61,140,199],amplif:[48,50],anaggreg:14,analogu:13,analysi:[196,197],analyt:45,analyz:[35,199],ancestor:[4,193],ani:[0,4,6,10,11,12,13,14,17,18,20,21,22,24,26,27,29,30,31,33,34,35,37,40,42,48,50,53,55,56,58,60,61,63,116,125,130,147,162,183,187,190,192,193,196,197,198],annot:25,anonym:[12,22,43],anoth:[6,11,14,20,22,35,48,56,61,184,191,196,199],anotherarg:14,answer:[32,199],ant:[24,26,31,33,35],antclassload:35,anti:[6,22],anticip:[0,11],anticompact:[48,53,191],anticompactiontim:53,antientropystag:[53,198],antipattern:50,anymor:[29,48],anyon:25,anyth:48,anywai:6,anywher:[13,47],apach:[2,5,6,7,14,21,25,26,27,29,30,32,33,34,35,36,39,40,43,48,49,53,56,59,62,183,184,185,186,187,188,189,190,191,193,194,197],apart:43,api:[6,8,17,41,57],appear:[6,11,12,14,48,61],append:[4,22,29,43,50,53,61,197],appendic:[15,42],appendix:[12,15],appl:22,appli:[4,6,9,10,11,12,13,20,22,29,33,35,36,53,58,60,61],applic:[0,6,11,20,25,28,30,31,43,49,56,60,197],appreci:33,approach:[4,48,58],appropri:[6,11,20,22,30,33,34,56,57,58,197],approv:24,approxim:[48,53,187],apt:[40,199],arbitrari:[11,12,22,60],architectur:[36,42,59],archiv:[4,6,34,47,88],archive_command:88,archive_retri:88,area:[28,199],aren:[13,55],arg:[14,125,163,183,187,193],argnam:14,argnum:14,argument:[6,11,13,14,16,17,36,37,49,60,61,63,64,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181],arguments_declar:14,arguments_signatur:14,arithmet:[10,12,15,42],arithmetic_oper:12,armor:34,around:[6,20,48,50,57,199],arrai:[6,36],arriv:[6,33,36],artifact:[26,31,32],artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,22],asdf:183,asf:[8,31,34],ask:[5,33,34,35,42,56],aspect:11,assassin:125,assertionerror:25,assertrow:35,assess:[198,199],assign:[6,13,20,36],associ:[6,11,59,193,195],assum:[6,11,14,27,31,56,57,195,199],assumpt:56,astyanax:38,async:[6,56],asynchron:[16,36,50],asynchroni:53,atabl:14,atom:[11,13,21,29],atomiclong:53,attach:[28,33,56,59,199],attack:56,attemp:53,attempt:[0,6,11,16,18,20,22,36,43,48,53,55,56,61,62,141,182,192,197],attent:[25,26,33,34],attribut:[43,48],audienc:0,audit:[6,74,84,125],audit_log:6,audit_logging_opt:43,audit_logs_dir:43,auditlog:84,auditlogview:43,audt:43,aug:192,auth:[6,43,186],authent:[10,43,52,61,186],authenticatedus:6,author:[9,20,22,27,33,52,60],authorizationproxi:56,authprovid:186,auto:[6,11,36,60,166],auto_bootstrap:58,autocompact:[48,75,85,125,166],autom:[25,28],automat:[6,13,14,16,24,27,31,35,36,40,48,55,56,58,60],avail:[0,6,8,11,14,20,24,31,33,34,35,40,43,47,55,56,57,59,61,65,95,138,147,158,178,195,197,199],availabil:6,averag:[6,14,48,53,186,197,198,199],average_live_cells_per_slice_last_five_minut:174,average_s:11,average_tombstones_per_slice_last_five_minut:174,averagefin:14,averagest:14,avg:[186,199],avg_bucket_s:48,avgqu:199,avgrq:199,avoid:[6,11,12,25,30,33,45,48,50,53,56,57,61,179,186],awai:[31,58,61,198],await:199,awar:[0,6,11,33,45,49,146,195,198],awesom:60,azur:50,b124:13,b2c5b10:197,b70de1d0:13,b7a2:198,b7c5:198,b957:183,b9c5:198,back:[6,11,27,43,48,53,58,198],backend:6,background:[34,36,40,56,197,199],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,42,48,52,58,61,76,86,125,167,192,193],backward:[6,10,11,15,20,22],bad:[6,14,36,56,57,195,198],balanc:[6,58,195,198],banana:22,band:22,bandwidth:[6,59,199],bar:[12,25,199],bardet:22,bare:6,base:[4,6,10,11,13,14,18,19,20,22,24,26,28,29,32,33,34,35,36,43,48,50,53,56,58,59,187,195,198],bash:[36,199],basi:[6,24,36,49],basic:[6,11,48,50,60,62,182,193,196],batch:[0,4,6,9,11,15,35,42,52,60,61,195,199],batch_remov:[53,198],batch_stat:12,batch_stor:[53,198],batchlog:[13,53,97,125,142,148],batchtyp:60,bc9cf530b1da11e886c66d2c86545d91:190,be34:13,beatl:22,beca:61,becaus:[0,4,6,11,13,14,20,40,48,49,53,56,187,190,199],becom:[4,6,11,14,20,33,48,53,56,58],been:[0,4,6,10,11,13,14,15,20,22,30,33,34,48,50,55,56,147,190,193,195],befor:[0,4,6,10,11,13,14,16,19,21,22,24,27,28,31,32,34,35,38,43,48,53,56,57,58,60,61,88,164,182,183,184,185,186,187,188,189,190,191,192,193,194,195],begin:[9,12,13,35,56,61],beginn:33,begintoken:61,behav:6,behavior:[0,6,10,11,14,17,22,25,30,45,48,58,141,195],behind:[6,25,35,36,43,48],being:[0,4,6,11,13,17,22,30,34,35,36,43,45,48,53,55,58,188,197,198,199],believ:195,belong:[11,13,14,53,65,125],below:[6,11,12,13,17,20,22,26,33,40,43,48,53,61,71,186,188,195,197],benchmark:[50,60],benefici:48,benefit:[6,28,45,48,50,52,186],best:[6,27,34,35,48,52,56,57,195,199],best_effort:6,better:[6,25,27,28,33,48,50,186,198,199],between:[0,4,6,9,10,11,12,13,15,33,36,45,48,49,53,55,56,59,60,140,162,199],beyond:[6,48,61,179],big:[6,48,68,183,184,185,186,187,189,190,191,192,193,194],bigger:[11,48],biggest:14,bigint:[9,14,17,19,22],bigintasblob:14,bigtableread:[184,190,192,194],bin:[26,31,40,41,61,197],binari:[14,34,39,43,56,77,87,125,168,197],binauditlogg:84,bind:[6,10,12,14,36],bind_mark:[12,13,18,22],binlog:43,biolat:199,biolog:11,biosnoop:199,birth:13,birth_year:13,bit:[14,17,22,26,33,36,49,50],bite:36,bitempor:59,bitrot:11,bitstr:9,black:6,blank:[6,25,36,187],blindli:36,blob:[9,10,12,17,22,42,49,60],blob_plain:34,blobasbigint:14,blobastyp:14,block:[4,6,11,29,37,43,48,50,53,56,62,88,182,197,198,199],blockedonalloc:6,blog:[6,13],blog_til:13,blog_titl:13,blogpost:60,bloom:[4,11,42,50,52,53,187],bloom_filter_false_posit:174,bloom_filter_false_ratio:174,bloom_filter_fp_ch:[4,11,45],bloom_filter_off_heap_memory_us:174,bloom_filter_space_us:174,bloomfilterdiskspaceus:53,bloomfilterfalseposit:53,bloomfilterfalseratio:53,bloomfilteroffheapmemoryus:53,blunt:56,bnf:12,bob:[13,20],bodi:[6,11,12,60],boilerpl:32,boolstyl:61,boost:6,boot:36,bootstrap:[0,6,42,49,52,53,56,125,130,158,188],born:13,both:[0,6,11,13,14,18,22,28,29,30,33,34,36,37,45,48,49,50,53,56,58,60,61,193,199],bottleneck:6,bottom:36,bound:[4,6,11,12,22,43,50,56],boundari:188,box:[6,56,57],brace:25,bracket:12,braket:12,branch:[24,27,29,30,31,34,35],branchnam:33,breadcrumb:195,breakdown:[198,199],breakpoint:31,breed:35,brendangregg:199,brief:199,briefli:198,bring:6,brk:36,broadcast:6,broadcast_address:57,broken:[6,48,53,190],brows:[6,34,183,184,185,186,187,188,189,190,191,193,194],browser:[61,199],bucket:48,bucket_high:48,bucket_low:48,buff:199,buffer:[4,6,47,53],bufferpool:52,buffers_mb:199,bug:[10,29,32,34,35,36,42,55],build:[18,24,26,27,28,32,33,34,35,42,53,55,59,60,125,181],buildenv:34,builder:[6,102,125,155],built:[18,31,53],bulk:[42,52,186],bump:[4,10,188],bunch:25,burn:47,button:[27,31,36],bytebuff:14,byteorderedpartition:[6,14],bytesanticompact:53,bytescompact:53,bytesflush:53,bytesmutatedanticompact:53,bytespendingrepair:53,bytesrepair:53,bytestyp:[9,187],bytesunrepair:53,bytesvalid:53,bz2:34,c60d:183,c73de1d3:13,cach:[6,11,20,26,36,37,50,52,57,116,118,119,120,125,149,150,198],cachecleanupexecutor:[53,198],cached_mb:199,cachenam:53,cachestat:199,calcul:[6,45,47,48,53,57,186,187],call:[9,11,12,13,14,20,25,27,32,37,42,43,48,50,53,58,125,158,199],callback:53,caller:25,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,33,34,35,37,40,41,42,43,45,47,48,49,50,53,55,56,57,58,60,61,63,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,184,185,186,188,189,190,191,192,195,196,197,198,199],cancel:[10,141],candid:48,cannot:[6,9,11,13,14,17,18,20,22,35,48,56,63,125,198],cap:[12,99,104,110,125,152,157,160],capabl:[0,6,36,59,61],capac:[6,47,53,57,59,125,127,149,175,194,195,197,199],capacit:195,capi:42,captur:[6,42,52,56,62,197],cardin:187,care:[6,48,60,140,199],carefulli:26,carlo:20,carri:[25,140],cascommit:53,cascontent:[111,161],casprepar:53,caspropos:53,casread:53,cassablanca:22,cassafort:38,cassandra:[0,2,4,5,8,10,11,13,14,18,20,21,22,25,26,28,29,33,38,39,41,45,48,49,50,53,55,57,58,61,84,88,125,136,140,143,147,172,180,182,183,184,185,186,187,188,189,190,191,193,194,195,196,198,199],cassandra_flam:199,cassandra_hom:[4,6,47,56,197],cassandra_job_dsl_se:24,cassandra_stack:199,cassandraauthor:[6,56],cassandradaemon:[31,40],cassandrafullquerylog:6,cassandralogin:56,cassandranetworkauthor:6,cassandrarolemanag:[6,56],casser:38,cassi:38,cast:[10,13,18],caswrit:53,cat:[22,183,199],categor:53,categori:[11,12,13,14,43,84],caught:[30,53],caus:[4,6,18,36,48,55,56,188,190,197,198,199],caution:6,caveat:56,cbc:6,ccm:[30,35,199],ccmlib:35,cd941b956e60:198,cdc:[6,11],cdc_enabl:47,cdc_free_space_check_interval_m:47,cdc_free_space_in_mb:47,cdc_raw:[6,47],cdc_raw_directori:47,cdccompactor:6,cell:[6,22,53,95,179,183,187,198],center:[6,11,20,22,36,57,58,81,91,125,140],cento:34,centos7:34,central:[31,56,61,195],centric:[20,27],certain:[4,6,9,11,20,27,35,48,56,184],certainli:14,certif:[52,125,136],cf188983:183,cfname:[109,127,175],cfs:25,chain:20,challeng:[28,59],chanc:[28,45,187],chang:[4,6,11,12,15,20,22,24,26,27,28,29,31,32,34,39,40,42,49,52,53,56,158,185,188,197,199],charact:[11,12,13,17,20,22,25,60,61],character:6,chat:8,cheap:[6,11],check:[0,6,11,13,24,25,30,31,33,34,35,36,45,47,48,53,56,62,116,125,140,179,182,194,198],checklist:[32,33,42],checkout:[27,31,33,34],checksum:[4,6,11,49,125,179,193],cherri:29,chess:13,child:61,chmod:56,choic:[6,11,34,42,48,52,189],choos:[0,6,11,32,34,38,50,53],chosen:[0,6,11,14,198],chown:56,christoph:22,chrome:61,chronicl:43,chunk:[4,6,36,49,61],chunk_length_in_kb:[11,49],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:53,chunksiz:61,churn:6,cipher:[6,56,186],cipher_suit:6,circular:20,circumst:11,citi:22,clash:12,class_nam:[4,6],classload:35,classpath:[6,14,22,53],claus:[10,11,14,16,17,18,20,25],clean:[6,25,53,62,65,125,143,182,186],cleanli:33,cleanup:[36,48,52,53,95,125,171,193],clear:[30,33,62,67,116],clearsnapshot:125,click:[13,31,33,34,35,199],client:[0,4,6,8,10,11,13,17,20,22,30,36,37,39,42,43,47,50,52,61,67,125,186,190,196,197,198,199],client_encryption_opt:[56,186],clientrequest:53,clientstat:125,clock:6,clockr:6,clojur:39,clone:[31,34,36,61,199],close:[6,15,27,34,56,199],closer:45,cloud:52,clue:199,cluster:[0,4,6,9,10,11,13,14,21,22,30,35,37,41,42,48,50,53,55,56,57,58,60,61,62,72,93,97,113,125,148,165,182,187,194,195,196,197,199],cluster_nam:[37,41],clustering_column:11,clustering_ord:11,clusteringtyp:187,cmake:199,cmd:199,cmsparallelremarken:31,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,21,24,27,28,29,30,31,32,35,42,49,53,195,199],codestyl:25,coher:59,col:[14,60],cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,50,52,53,60,95,197],collection_liter:12,collection_typ:22,collector:197,color:[22,61,199],column1:9,column:[4,6,9,10,11,12,13,14,15,16,17,18,20,22,49,53,59,60,61,109,127,147,164,175,187,190,192,197,198],column_definit:11,column_nam:[11,13,16],columnfamili:[4,6,9,25,48,185,188],columnspec:60,colupdatetimedeltahistogram:53,com:[6,14,24,25,27,29,34,56,199],combin:[4,6,10,48],come:[6,9,56,199],comingl:48,comma:[6,11,12,13,37,43,56,58,61,84,127,130,175,186],command:[0,6,18,26,29,34,35,36,37,40,41,49,52,55,56,60,62,63,64,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,185,186,191,192,193,196,197,198,199],comment:[4,6,11,15,18,25,27,28,30,56],commit:[4,6,8,11,27,32,33,34,42,53,193,199],commitlog:[2,6,36,37,50,52,187,197,198],commitlog_archiv:[4,6],commitlog_compress:4,commitlog_directori:[4,37,50],commitlog_segment_size_in_mb:[4,36],commitlog_sync:4,commitlog_sync_batch_window_in_m:4,commitlog_sync_period_in_m:4,commitlog_total_space_in_mb:4,commitlogposit:187,commitlogread:47,commitlogreadhandl:47,commitlogseg:[6,52,53],committ:[28,29,32,33,34,35],common:[0,14,15,25,27,30,33,52,55,61,195,196,199],common_nam:11,commonli:125,commun:[6,8,28,30,31,33,36,37,41,43,56,186],commut:36,compact:[4,6,11,15,36,42,45,49,50,52,60,65,69,70,95,98,99,125,138,147,151,152,158,166,171,178,184,185,186,187,188,190,193,196,197,199],compacted_partition_maximum_byt:174,compacted_partition_mean_byt:174,compacted_partition_minimum_byt:174,compaction_:171,compaction_histori:197,compaction_throughput:198,compaction_window_s:48,compaction_window_unit:48,compactionbyteswritten:53,compactionexecutor:[53,198],compactionhistori:[48,125],compactionid:171,compactionparamet:48,compactionparametersjson:48,compactions_in_progress:193,compactionstat:[48,125,198],compactionstrategi:52,compactiontask:197,compactor:[101,125,154],compar:[4,6,26,33,48,53,55,60,195,198],comparison:6,compat:[6,9,10,11,13,15,20,30,33,62,199],compatilibi:22,compet:6,compil:[25,26,31,61],compilerthread3:199,complain:31,complet:[6,13,14,33,34,36,47,48,53,56,58,61,125,139,141,190,192,193,198],completedtask:53,complex:[4,6,9,14,22,33],complexarg:14,compliant:[6,14,56],complic:33,compon:[4,6,11,30,45,53,56,125,158,199],compos:[11,13,22],composit:[4,11],compound:17,comprehens:30,compress:[4,6,11,35,42,48,50,52,53,60,187],compression_level:49,compression_metadata_off_heap_memory_us:174,compressioninfo:4,compressionmetadataoffheapmemoryus:53,compressionratio:53,compressor:[4,6,11,187],compris:[4,11,49],compromis:[34,56],comput:[4,6,14,194],concaten:[14,43],concept:[20,48],concern:[13,14,199],conclus:6,concret:[12,22],concurr:[6,24,50,60,100,101,102,125,140,153,154,155,186,198,199],concurrent_compactor:198,concurrent_materialized_view_build:18,concurrent_writ:4,concurrentmarksweep:50,condens:13,condit:[6,10,12,13,20,22,25,29,48,53,56,60,61,199],conditionnotmet:53,conf:[6,36,37,40,53,56,61,186,197],config:[24,53,56,61,62,182],configu:[43,199],configur:[0,4,11,20,22,24,31,35,36,39,40,42,52,53,56,57,59,60,61,71,88,125,143,158,185,186,187,195,197,198],confirm:[6,8,24,30,31],conflict:[13,22,29,32],conform:[18,30],confus:[10,12,36,199],congratul:27,conjunct:61,connect:[6,11,22,31,41,42,53,56,57,60,61,67,71,124,125,186,199],connectednativecli:53,connectednativeclientsbyus:53,connectionsperhost:186,connector:[36,38,56],connnect:53,consecut:37,consequ:[11,13,19,22,50],conserv:6,consid:[0,6,13,22,28,33,37,43,45,48,50,194],consider:[13,22],consist:[2,6,11,12,13,14,30,53,56,58,62,195,198],consol:[31,37,43,61],constant:[10,11,15,17,22],constantli:[6,48],construct:[12,199],constructor:[6,25],consum:[6,35,45,47,53,198],consumpt:47,contact:[6,11,36,42,195],contain:[0,6,8,9,10,11,12,13,15,16,18,20,22,26,31,33,35,48,49,53,56,59,61,164,184,189,193,195,197,198,199],contend:[6,53],content:[4,6,11,12,13,27,42,48,61,88,183,199],contentionhistogram:53,context:[4,6,9,20,22,31,33,36,56,197],contigu:13,continu:[0,6,25,35,43,48,56,57],contrarili:12,contrast:[35,56],contribut:[5,24,27,29,35,42],contributor:[27,29,33,40],control:[0,6,10,11,13,15,30,37,40,48,56,57,61],conveni:[9,12,14,17,35,58],convent:[6,11,14,15,27,29,32,33,35,56,57],convers:10,convert:[10,13,14,48,199],coordin:[0,6,11,13,14,22,36,53,141,195,196],coordinatorreadlat:[53,195],coordinatorscanlat:53,coordinatorwritelat:[53,195],cop:25,copi:[0,6,26,34,36,48,62,186,195],core:[6,14,43,50,59,153,198,199],correct:[10,26,30,40,48,49,56,125,138,185,191],correctli:[6,11,27,36,43,48,56],correl:[6,10,57,195,198],correspond:[0,4,6,9,11,13,14,18,22,27,33,35,36,47,57,186],corrupt:[6,11,48,49,50,55,62,147,179,182],cost:[6,13,22,49,55],could:[6,12,22,28,30,33,48,55,61,197,199],couldn:[40,53],count:[4,6,9,13,22,36,48,53,58,60,187,197,198,199],counter1:190,counter:[0,4,6,9,14,19,50,53,60,62,118,125,147,149,150,182],counter_mut:[53,198],counter_read:60,counter_writ:60,countercach:53,countermutationstag:[53,198],counterwrit:[60,111,161],countri:[13,22],country_cod:22,coupl:6,cours:[13,55,194,199],cover:[11,27,30,33,35,36,39,48,53,187],coverag:[26,28],cph:186,cpu:[6,11,47,49,52,195,197,198],cpu_idl:199,cq4:197,cqerl:38,cqex:38,cql3:[14,30,35,61],cql:[6,10,11,12,13,14,16,17,19,20,22,34,35,38,41,42,43,48,52,56,60,62,158,183,199],cql_type:[11,12,13,14,20,22],cqlc:38,cqldefinit:14,cqlsh:[36,39,40,42,56,62],cqlshrc:62,cqltester:[30,35],cqltrace:199,craft:56,crash:50,crc32:[4,192,193],crc:[4,192,193],crc_check_chanc:[11,49],creat:[0,4,6,9,10,12,13,15,17,19,24,26,27,31,32,35,36,47,48,49,56,58,60,61,68,186,191,199],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,22],create_user_stat:12,createkeystor:6,createrepo:34,createt:35,creation:[6,10,11,13,14,18,22,47,197],creator:20,credenti:[6,56],critic:[30,33,56,195,198],cross:[6,36,57],crossnodedroppedlat:53,crucial:[56,197,198,199],cryptographi:6,csv:61,ctrl:199,cuddli:22,culprit:195,cumul:[198,199],curent:187,curl:[29,40],current:[0,6,9,11,13,20,22,31,33,34,40,43,48,53,58,60,61,62,90,108,112,114,116,125,139,170,178,182,187,188,193,197,198],currentd:[10,14],currentlyblockedtask:53,currenttim:[10,14],currenttimestamp:[10,14],currenttimeuuid:[10,14],custom:[6,9,10,11,14,15,16,20,24,33,43,57,60,61,186],custom_option1:20,custom_option2:20,custom_typ:[14,22],cut:197,cute:22,cvh:30,cycl:[6,47,88],cython:62,d18250c0:183,d85b:183,d936bd20a17c11e8bc92a55ed562cd82:189,daemon:[31,125,172,199],dai:[14,17,19,22,43,48,55],daili:[24,43,88],danger:6,dart:39,dart_cassandra_cql:38,dash:12,data:[0,4,6,10,12,14,15,16,18,30,37,40,42,45,49,50,52,53,55,56,57,59,60,61,63,68,81,88,91,95,116,125,130,140,164,179,183,184,185,186,187,188,189,190,191,192,193,194,197,198,199],data_file_directori:[37,50],data_read:20,data_writ:20,databas:[12,13,15,21,34,43,48,50,56,196,197,199],datacent:[0,6,11,55,57,81,91,104,125,140,157,186,195,198],datacenter1:[6,60],dataset:[6,55,199],datastax:[6,14,38,195],datastor:198,datatyp:14,date:[4,9,10,15,17,19,43,62,147,182,183,187],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,48],datetim:15,daylight:22,db532690a63411e8b4ae091830ac5256:192,db_user:56,dba:56,dbd:38,dc1:[6,11,20,56,198],dc1c1:194,dc2:[6,11,56,198],dc3:20,dcassandra:[48,53,56,58],dcawareroundrobin:195,dcl:43,dcom:56,dcpar:140,ddl:[11,43,61],ddl_statement:12,deactiv:6,dead:[6,52,63,125,199],dead_node_ip:58,deal:[62,182],deb:40,debian:[34,36,39,199],debug:[37,61,190,191,192,193,194,195,196,199],decai:195,decid:[9,27,48,57],decim:[9,14,17,19,22,61],decimalsep:61,declar:[11,12,14,22],decod:[17,22,199],decommiss:[0,6,58,125],decompress:[49,199],decoupl:0,decreas:[6,48,186,199],decrement:[13,22],decrypt:6,dedic:[4,6],deem:6,deep:[42,196,197],deeper:[33,199],default_time_to_l:[10,11,13],defend:36,defens:6,defer:[11,199],defin:[0,6,9,10,11,12,13,15,16,17,18,20,21,31,48,53,56,57,58,60,61,68,125,187],definit:[9,13,14,15,18,22,42,45,60,187],deflat:[4,6],deflatecompressor:[11,49],degrad:6,delai:[4,53,55],deleg:31,delet:[4,6,9,10,11,12,15,17,18,20,22,33,42,43,55,61,88,95,125,177,187,192,193,197],delete_stat:[12,13],deletiontim:4,delimit:6,deliv:[0,6,53],deliveri:[6,53,125,126,145,156],delta:[53,187],demand:56,demonstr:196,deni:36,denorm:22,denot:[6,12],dens:45,dep:26,depend:[4,6,11,12,13,14,22,24,27,30,31,32,33,35,42,48,55,62,195],dependenic:26,deploi:[26,36,37,199],deploy:[0,6,56,57,59],deprec:[6,10,11,14,15,48],depth:199,desc:[9,11,13,61],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,20,22,30,31,33,34,45,56,62,125,182],describeclust:125,descript:[10,11,14,19,22,24,27,33,53,61],descriptor:[53,193],deseri:194,design:[14,48,50,55],desir:[16,22,24,36,189],destin:[47,61],destroyjavavm:199,detach:34,detail:[5,6,10,11,12,13,14,22,24,27,28,36,52,56,59,60,61,62,182,192,197,198,199],detect:[2,6,11,29,36,56,194],detector:[93,125],determin:[0,6,11,13,20,45,49,57,140,195,198,199],determinist:36,dev1:43,dev:[6,8,34,36,199],develop:[8,27,28,31,33,35,50],devic:[4,59,199],df303ac7:198,dfb660d92ad8:61,dfp:179,dht:[6,187],diagnost:6,dictat:[6,56],did:[30,53,185],die:6,dies:[42,58],diff:[15,25,197],differ:[0,6,11,12,13,14,15,20,22,24,29,31,33,35,36,37,40,48,49,50,53,55,58,60,195,199],difficult:[6,35,199],difficulti:22,digest:[4,6,192,193],digit:[17,22,36],diminish:22,dinclud:26,dir_path:186,direct:[6,11,17,20,33,53,199],directli:[13,18,20,27,31,48,56,187,199],director:13,directori:[4,6,21,26,27,31,35,36,39,40,41,43,47,50,52,61,116,125,143,186,199],dirti:[4,6,199],disabl:[6,11,14,48,49,56,57,61,74,75,76,77,78,79,80,81,82,91,125,148,150,152,157,160,161,162],disable_stcs_in_l0:48,disableauditlog:[43,125],disableautocompact:[48,125],disablebackup:125,disablebinari:125,disablefullquerylog:[125,197],disablegossip:125,disablehandoff:125,disablehintsfordc:125,disableoldprotocolvers:125,disablesnapshot:147,disableuditlog:43,disallow:6,disambigu:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],disappear:11,discard:6,disconnect:48,discourag:[11,22,33],discov:[36,55],discuss:[8,22,33],disk:[4,6,11,37,42,43,45,47,48,49,52,53,55,88,122,125,138,179,184,188,191,192,197,198,199],dispar:6,displai:[11,43,61,62,64,70,100,115,117,124,125,174,182,186,190,191,192,193,194,199],disrupt:[36,56],dissect:199,dist:[26,34,40],distanc:59,distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,26,33,35,36,48,53,56,58,59,60,187,188,196,197,198,199],distro:34,dive:[42,196,197],divid:12,divis:19,djava:[31,36,56],dml:[21,43],dml_statement:12,dmx4jaddress:53,dmx4jport:53,dns:36,dobar:25,doc:[6,26,27,30,34,55,56,185,199],docker:34,document:[5,12,14,15,17,24,30,32,33,34,41,56,60,61],doe:[6,11,13,14,16,17,18,20,22,29,30,33,34,42,45,48,49,55,56,57,58,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199],doesn:[6,14,22,25,35,36,55,60,186,187,189,190,197,199],dofoo:25,doing:[6,13,18,35,36,48,53,58,199],dollar:[10,12],domain:[28,56,60,146,165],domin:199,don:[5,6,13,25,28,29,30,31,33,34,36,37,48,55,116,140,186,191,196,198,199],done:[6,11,13,22,24,27,28,33,34,35,37,41,48,60,188,191,192],dont:43,doubl:[6,9,10,11,12,14,17,19,22,31,53,57],doubt:11,down:[6,20,48,53,55,57,58,79,125,140,188,195,197,198,199],downgrad:192,download:[6,24,31,34,40,53,59],downsampl:4,downstream:198,downward:20,dozen:198,dpkg:34,drain:[4,125],drive:[6,48,50,197,198,199],driver:[6,12,14,20,35,39,42,61,195],drop:[6,10,15,42,43,48,53,56,88,184,187,188,190,195,198,199],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,22],drop_user_stat:12,dropdown:199,droppabl:[6,48,187],dropped_mut:174,droppedmessag:52,droppedmut:53,dropwizard:53,drwxr:192,dry:[62,182],dsl:24,dt_socket:31,dtest:[24,30,32],due:[11,13,22,26,36,40,48,53,58,195,199],dump:[43,61,62,182,197],duplic:[30,55,193],durabl:[4,47],durable_writ:11,durat:[6,10,15,19,20,48,53,60,127,175,186],dure:[6,11,14,21,26,33,35,36,48,49,53,56,58,60,61,147,184,190,194,197,199],dverbos:26,dying:36,dynam:[6,52,56],dynamic_snitch:57,dynamic_snitch_badness_threshold:57,dynamic_snitch_reset_interval_in_m:57,dynamic_snitch_update_interval_in_m:57,dynamo:[2,42],each:[0,4,6,10,11,12,13,14,17,18,20,22,24,27,29,33,41,42,43,48,49,50,53,55,56,57,58,59,60,61,62,125,150,166,179,182,183,197,198,199],each_quorum:[0,6],earli:[6,12,33],earlier:33,eas:199,easi:[9,27,33,199],easier:[0,27,33,183],easiest:36,easili:56,ec2:[6,50,57],ec2multiregionsnitch:[6,57],ec2snitch:[6,57],ecc:50,echo:[40,187],eclips:[25,32,35],ecosystem:30,eden:199,edg:30,edit:[27,31,34,37,40,53,56,193],editor:27,effect:[6,11,22,33,36,45,49,56,79,125,195,198,199],effectiv:53,effici:[6,11,43,48,57,58],effort:[6,27,33,55],either:[4,6,8,12,13,14,16,22,24,25,27,29,31,33,36,40,41,47,48,53,56,60,177,195,197,198,199],elaps:[48,53,199],elasticsearch:59,elder:31,element:[22,27,61],elig:6,elimin:195,elixir:39,els:[11,13,25,33],email:[8,16,22,34,42],embed:35,emerg:26,emit:6,emploi:45,empti:[6,9,10,11,12,61,190],emptytyp:9,enabl:[0,6,11,14,17,20,35,36,48,49,57,58,61,84,85,86,88,91,92,125,162,186,187,197,199],enable_legacy_ssl_storage_port:6,enable_user_defined_funct:14,enableauditlog:[43,125],enableautocompact:[48,125],enablebackup:125,enablebinari:125,enablefullquerylog:[6,125,197],enablegossip:125,enablehandoff:125,enablehintsfordc:125,enableoldprotocolvers:125,encapsul:[25,53],enclos:[9,10,12,14,20,60],enclosur:12,encod:[15,22,30,43,61,187],encodingstat:187,encount:[5,13,34,40,53,60],encourag:[11,47],encrypt:[6,52,186],end:[22,24,36,43,48,56,61,68,103,125,140,188,193,199],end_token:[68,140],end_token_1:130,end_token_2:130,end_token_n:130,endpoint:[6,53,57,63,103,125,140,177],endpoint_snitch:57,endpointsnitchinfo:56,endtoken:61,enforc:[17,56],engin:[2,11,33,42,53,59],enhac:28,enhanc:[28,50],enjoi:34,enough:[0,6,22,36,37,48,55,57,61,197,199],enqueu:[6,197],ensur:[13,18,21,36,47,49,56,185,197,198],entail:36,enter:[24,36,61,197,199],entir:[0,4,6,11,14,22,36,45,48,55,56,58,61,62,182,184,195,199],entri:[4,6,9,13,16,24,33,42,53,56,61,187],entropi:6,entry_titl:13,enumer:[20,183],env:[36,37,53,56],environ:[0,5,6,26,31,32,35,36,39,42,50,189],ephemer:50,epoch:[22,187],epol:6,equal:[0,6,10,11,13,22,25,48,60],equival:[10,11,12,13,14,20,29,48,196],eras:11,erlang:39,erlcass:38,err:61,errfil:61,error:[6,11,12,14,16,18,20,22,24,25,30,31,34,35,40,42,43,55,60,61,141,185,190,194,196,197,198],escap:[12,17,60],especi:[33,36,48,61,199],essenti:[0,14,36,61],establish:[6,20,57,186],estim:[4,53,55,187,198],estimatedcolumncounthistogram:53,estimatedpartitioncount:53,estimatedpartitionsizehistogram:53,etc:[6,18,22,25,30,36,37,40,43,48,53,56,60,186,199],eth0:6,eth1:6,ev1:22,evalu:[6,19],even:[0,6,10,11,12,13,14,17,22,28,33,42,48,55,56,61,71,147,178,195,197,198,199],evenli:6,evenlog:[185,188],event:[4,6,13,22,43,48,60,61,140,183],event_typ:13,eventlog:[183,185,188,191,193,194],eventlog_dump_2018jul26:183,eventlog_dump_2018jul26_d:183,eventlog_dump_2018jul26_excludekei:183,eventlog_dump_2018jul26_justkei:183,eventlog_dump_2018jul26_justlin:183,eventlog_dump_2018jul26_singlekei:183,eventlog_dump_2018jul26_tim:183,eventlog_dump_2018jul26b:183,eventu:[4,13,27,55],ever:[25,35,36,50],everi:[4,6,11,13,14,18,20,21,22,41,43,45,48,50,55,60,61,195,198,199],everyth:[4,12,25,31,36,59],evict:53,evil:[6,14],ex1:60,ex2:60,exact:[11,12,14,49,196],exactli:[11,14,18,56,183,199],exampl:[0,6,11,13,14,17,20,22,28,34,35,40,41,43,48,56,57,60,61,183,184,185,186,187,188,189,190,191,193,194,195,196,197,198,199],example2:60,exaust:6,excalibur:11,exce:[4,6,17,25,197],exceed:[6,50,188],excel:11,excelsior:11,except:[0,6,13,14,17,30,32,33,35,36,43,53,183,188,197,199],excess:45,exchang:[6,36],exclud:[11,43,53,62,84,108,125,182],excluded_categori:[43,84],excluded_keyspac:[43,84],excluded_us:[43,84],exclus:[22,26,35],execut:[6,9,11,12,13,14,20,24,26,31,35,41,43,48,53,56,61,182,183,184,185,186,187,188,189,190,191,192,193,194,198,199],executor:24,exhaust:[6,195],exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,20,21,22,27,28,30,31,34,35,42,43,45,48,49,57,58,60,184],exit:[62,193],exp:60,expand:[11,62],expans:11,expect:[0,4,6,10,12,22,25,30,33,34,48,55,56,188,198],expens:[6,45,57],experi:[6,48,198],experienc:[0,6,197],experiment:[0,140],expir:[6,10,11,13,22,52,55,56,147,184,187,190],expiri:48,explain:[25,27,30,33,40],explan:[62,182],explicit:[10,11,20],explicitli:[4,6,10,11,13,17,22,25,48,57,60],explor:31,expon:10,exponenti:[53,60,195],expos:[6,9,56],express:[0,6,10,12,19,57],expung:36,extend:[22,33,35,59,62,116,179,182],extens:[6,11,56],extern:[42,53,58,59,196],extra:[0,4,6,48],extract:[25,40],extrem:[6,13,60],f6845640a6cb11e8b6836d2c86545d91:187,f8a4fa30aa2a11e8af27091830ac5256:186,facilit:6,fact:[22,28,35,36,195],factor:[0,6,11,42,49,55,56],factori:60,fail:[0,6,11,13,14,22,24,26,42,48,53,61,125,141],failur:[2,6,33,42,48,50,53,57,93,125,179,195],failuredetector:125,fairli:[6,47,56,199],fake:14,fall:[6,43],fallback:[6,57],fals:[6,11,12,17,20,22,43,45,47,48,49,53,56,58,61,147],famili:[6,50,109,127,164,175,192],fanout_s:48,faq:62,far:[27,28],fare:199,fast:[6,45,48,59,197,199],faster:[6,33,49,50,125,150,198],fastest:[6,29,57],fatal:6,fault:36,fav:[16,22],favorit:199,fax:22,fct:14,fct_using_udt:14,fear:36,feasibl:22,featur:[0,28,30,31,33,56],fed:6,feedback:33,feel:[27,29],fetch:[6,11,27,61],few:[6,48,50,195,197],fewer:[6,33],fffffffff:[17,22],fgc:199,fgct:199,field:[10,13,14,17,22,25,43,45,60,190],field_definit:22,field_nam:13,fifteen:53,fifteenminutecachehitr:53,fifth:198,figur:[28,48,184],file:[4,7,11,27,31,32,33,34,35,36,37,39,42,45,47,48,50,53,56,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,187,188,189,190,192,193,195,196,199],filenam:[4,11,61,109,125,187,191],filenamepattern:43,filesystem:[6,24,56],fill:[34,47,48],fillion:13,filter:[4,9,11,18,42,50,52,53,140,187,192,193],finalfunc:[9,14],finalis:34,find:[4,6,24,27,31,34,35,40,42,45,48,56,58,103,107,183,187,189,192,196,198,199],fine:[4,6,33,55,56],finer:[4,6],finish:[31,33,125,142,197],finish_releas:34,fip:[6,56],fire:[21,28],firefox:61,firewal:[6,36,37,57],first:[0,4,5,6,11,13,14,22,33,34,36,39,48,50,55,56,60,61,140,147,183,187,190,193,195,197,198,199],firstnam:13,fit:[6,48,53],five:53,fiveminutecachehitr:53,fix:[6,10,12,18,24,27,29,32,34,36,48,50,55,60,190],fixm:34,flag:[6,13,29,30,33,47,53,55,58,185],flash:59,flexibl:56,flight:[6,56],flip:11,floor:6,flow:[6,20,30,32,43],fluent:38,flush:[4,6,11,47,48,50,53,83,125,164,193,197],fname:14,focu:[24,33],focus:60,focuss:199,folder:[31,171,191],follow:[0,4,5,6,8,9,10,11,12,13,14,17,18,19,20,22,24,25,26,27,28,29,30,31,33,34,35,36,37,40,42,43,47,48,49,53,55,56,57,58,61,65,68,75,85,94,95,131,140,147,161,166,178,179,184,189,190,193,195,199],font:12,foo:[11,12,47,199],footprint:[125,127],forc:[4,6,11,13,61,68,71,125,139,140,141,194],forcefulli:[63,125],foreground:[37,40],forev:48,forget:5,fork:[27,33],form:[6,10,11,12,14,20,70,124,174],formal:[12,27,34],format:[4,6,10,11,17,22,27,29,30,32,33,43,53,61,62,69,88,109,130,174,176,182,193,198],former:[6,53],forward:[6,11,26],found:[5,6,12,14,24,27,28,33,35,37,41,56,58,60,61,62,171,179,182,186,187,192,193],four:[13,49],fourth:198,fqcn:35,fql:197,fql_log:197,fqltool:197,fraction:6,frame:6,framework:[30,35],franc:[13,22],free:[6,11,22,27,29,31,53,59,194,199],freed:4,freestyl:24,frequenc:[6,47,55],frequent:[6,11,42,48,56,195,199],fresh:58,friendli:[6,22,35],from:[0,4,6,9,11,12,13,14,15,17,18,19,20,22,28,29,32,33,35,39,41,42,43,45,47,48,49,50,53,56,57,58,60,62,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,134,135,138,139,140,141,143,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,184,185,187,188,190,193,195,197,198,199],fromjson:15,froom:22,frozen:[9,10,11,13,14,22],fruit:[22,33],fsync:[4,6,47,53,198],fulfil:60,full:[6,9,11,13,16,20,33,36,40,41,48,49,52,56,59,60,61,78,88,125,131,140,143,186,188,193,198],full_nam:174,fulli:[0,6,11,12,14,34,52,53,56],function_cal:12,function_nam:[13,14,20],fundament:17,further:[5,11,18,22,48,52,56,59,198],furthermor:[10,13,56],futher:43,futur:[6,9,10,11,22,33,90,125,170],g1gc:50,game:[14,22],garbag:[11,50,52,53,95,197],garbagecollect:125,gather:48,gaug:53,gaurante:0,gaussian:60,gc_grace_second:[11,187],gc_type:53,gce:[36,50],gcg:6,gcinspector:197,gcstat:125,gct:199,gcutil:199,gcviewer:197,gen:199,gener:[0,2,4,6,8,11,12,13,14,17,22,24,27,30,31,32,33,34,36,50,56,59,60,61,111,147,161,190,196,197,198,199],genuin:25,geospati:59,get:[4,6,8,24,26,27,29,31,32,33,36,40,42,45,48,53,62,100,101,102,105,108,125,182,184,188,196,198,199],getbatchlogreplaythrottl:125,getcompactionthreshold:125,getcompactionthroughput:125,getconcurrentcompactor:125,getconcurrentviewbuild:[18,125],getendpoint:125,getint:14,getinterdcstreamthroughput:125,getlocalhost:[6,36],getlogginglevel:[125,197],getlong:14,getmaxhintwindow:125,getpartition:25,getreplica:125,getse:125,getsstabl:125,getstr:14,getstreamthroughput:125,gettempsstablepath:25,getter:[20,25],gettimeout:125,gettraceprob:125,gib:[70,124,174,198],gist:[4,25],git1:34,git:[5,24,27,29,31,33,34,197,199],gitbox:[31,34],github:[24,25,29,32,33,34,35,59,199],give:[18,20,22,27,33,35,42,61,185,197,198],giveawai:199,given:[0,6,11,12,13,14,16,22,24,33,45,48,55,56,58,60,61,66,68,73,75,85,98,107,111,125,131,151,158,162,166,173,183,185,187,188,189,192,193],glanc:199,global:[6,61,125,149],gms:197,gmt:22,goal:[6,48,195],gocassa:38,gocql:38,going:[6,33,48,190,196,198,199],gone:6,good:[6,25,27,33,35,36,55,61,189,195,197,198,199],googl:[25,61,199],gori:36,gossip:[2,6,36,53,57,79,89,113,125,169,197],gossipinfo:125,gossipingpropertyfilesnitch:[6,57],gossipstag:[53,197,198],got:6,gotcha:199,gp2:50,gpg:40,grace:[52,55,62,182],grafana:195,grai:22,grain:56,grammar:[11,12,26],grant:[6,9,56],grant_permission_stat:12,grant_role_stat:12,granular:[4,6,11,95],graph:[20,62],graphit:195,gravesit:11,great:[28,33,48,196,197,198,199],greater:[0,6,22,36,57,154,155,197,199],greatli:6,green:[22,31],grep:[4,185,187,189,197,198,199],groovi:24,group:[6,10,11,20,48,53,56,57,195],group_by_claus:13,grow:[22,59],guarante:[0,2,6,11,13,14,22,33,42,45,48,55,58,59,61,184],gui:199,guid:[6,27,31],guidelin:[10,30,34,50],had:[6,9,10,48,190,196,198],half:[4,6,29,36],hand:[6,13,50,198],handi:199,handl:[6,14,30,32,33,36,47,50,53,56,60,88,197],handoff:[6,53,80,114,125,156],handoffwindow:125,hang:33,happen:[6,13,25,29,33,42,48,53,57,195,197,198,199],happi:33,happili:50,hard:[6,14,47,48,50,192,197],harder:6,hardest:28,hardwar:[6,24,42,52,195],has:[0,4,6,10,11,12,13,14,18,20,22,25,33,34,36,43,47,48,50,53,56,57,58,60,61,62,182,186,195,197,198,199],hash:[4,6,48,55,194,199],hashcod:25,haskel:39,hasn:[0,88],have:[0,5,6,9,10,11,12,13,14,15,18,19,20,22,24,25,27,28,29,30,31,33,34,35,36,37,40,43,45,48,49,50,53,56,57,88,147,184,186,188,190,193,194,195,196,197,198,199],haven:33,hayt:38,hdd:[4,6,50],head:[27,33,199],header:[31,61],headroom:6,health:199,healthi:199,heap:[4,6,31,37,42,45,49,50,53,197,198,199],heap_buff:6,heartbeat:[6,197],heavi:[6,197,198,199],heavili:50,held:[6,50,125,129],help:[5,6,10,26,28,33,35,41,43,60,62,64,125,163,186,190,191,192,193,194,195,196,197,198,199],helper:35,henc:[5,6,11,22],here:[6,26,27,29,34,35,36,38,48,53,56,60,198],hex:[12,17,109],hexadecim:[10,12,109],hibern:58,hidden:[58,199],hide:[25,30,62,182],hierarch:20,hierarchi:[20,55],high:[0,6,27,34,36,48,50,59,195,197,198],higher:[0,19,20,33,45,48,53,58,127,175,197,199],highest:[48,187,188],highli:[33,36,50,56,197,198],hint:[0,6,11,12,18,36,37,42,52,53,55,80,81,90,91,106,114,125,126,145,156,159,170,177,198],hint_delai:53,hintedhandoff:[6,52],hintedhandoffmanag:53,hints_creat:53,hints_directori:37,hints_not_stor:53,hintsdispatch:[53,198],hintsfail:53,hintsservic:52,hintssucceed:53,hintstimedout:53,histogram:[4,48,53,125,128,173,187,197],histor:[11,33],histori:[24,25,43,67,69,125],hit:[6,48,53,199],hitrat:53,hoc:35,hold:[0,6,10,13,20,36,48,61,195,197,199],home:[22,34,60,61],honor:[6,31],hope:48,hopefulli:33,host:[6,27,37,42,43,53,57,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,198,199],hostnam:[6,36,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,199],hot:[6,52,53,199],hotspot:11,hotspotdiagnost:56,hottest:6,hour:[6,22,33,34,48],hourli:[43,88],how:[0,5,6,7,8,11,12,22,24,28,30,31,32,33,35,39,41,42,47,48,49,53,57,59,60,61,88,185,197,198,199],howev:[4,6,9,10,11,12,13,15,17,18,20,22,24,33,35,36,37,40,45,49,50,55,56,58,61],hoytech:199,html:[6,60,185],http:[6,24,25,27,29,31,34,40,53,183,184,185,186,187,188,189,190,191,193,194,199],httpadaptor:53,hub:36,hudson:24,huge_daili:43,human:[11,43,47,70,124,174,198],hurt:11,hypothet:29,iauthent:6,iauthor:6,ibm:59,icompressor:49,idct:186,ide:31,idea:[6,14,27,32,33,35,36,48,61,198,199],ideal:[6,35,48,53,56],idealclwritelat:53,idempot:[13,22],idemptot:22,ident:[0,60],identifi:[6,9,10,11,13,14,15,16,20,21,22,60,195],idiomat:8,idl:6,idx:47,ieee:[17,22],iendpointsnitch:[6,57],iftop:199,ignor:[0,6,10,14,22,25,61,174,186],iinternodeauthent:6,illeg:14,illegalargumentexcept:188,illustr:[20,188],imag:[22,27,34,199],imagin:48,immedi:[4,6,11,22,33,45,49,56,65,125],immut:[4,36,49,50],impact:[6,11,30,48,52,56,197,199],implement:[4,6,10,13,14,18,20,24,25,35,36,43,47,49,56,57,59],implementor:6,impli:[11,12,22],implic:[0,56],implicitli:[14,20],import_:61,impos:6,imposs:48,improv:[0,6,11,22,28,33,35,45,48,50,57,58,61,199],inaccur:199,inact:36,inam:189,incast:199,includ:[4,6,10,11,12,13,18,20,22,24,25,26,27,33,34,43,48,50,53,56,59,61,62,84,141,178,182,189,195,196,197,198,199],included_categori:[43,84],included_keyspac:[43,84],included_us:[43,84],inclus:33,incom:[6,43],incomingbyt:53,incompat:[6,10],incomplet:[30,193],inconsist:[36,55],incorrect:36,increas:[0,4,6,11,18,36,45,48,49,50,53,57,58,140,186,194,195],increment:[0,6,10,13,22,33,48,52,53,76,86,125,141,147,167,190,193],incur:[13,22,53],indefinit:43,indent:25,independ:[11,48,50,56,198],index:[4,6,9,10,11,12,13,15,22,42,47,48,52,61,125,131,186,192,193,197],index_build:171,index_identifi:16,index_nam:16,index_summary_off_heap_memory_us:174,indexclass:16,indexedentrys:53,indexinfocount:53,indexinfoget:53,indexnam:131,indexsummaryoffheapmemoryus:53,indic:[5,6,12,13,25,33,36,43,47,140,187,188,195,197,198,199],indirectli:13,individu:[6,10,14,22,33,35,50,56,186,194],induc:13,inequ:[10,13],inet:[9,11,14,17,22],inetaddress:[6,36],inetworkauthor:6,inexpens:50,infin:[9,10,12],influenc:11,info:[6,37,43,53,73,125,183,197],inform:[4,6,12,13,22,34,41,53,55,56,57,58,60,61,64,67,93,113,115,116,117,124,125,146,163,165,185,186,187,195,196],infrastructur:[33,59],ing:11,ingest:6,ingestr:61,inher:[11,22],inherit:20,init:53,initcond:[9,14],initi:[6,14,18,25,30,32,34,43,53,56,58,61,125,158,186],initial_token:58,inject:43,innov:59,input:[9,10,14,17,22,30,61,189,197],inputd:22,inreleas:40,insecur:6,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,20,22,36,39,42,43,50,56,60,61,193],insert_stat:[12,13],insertedtimestamp:183,insid:[6,11,12,13,22,25,60,61],insight:[197,198],inspect:[6,31,60,61,194],instabl:6,instal:[6,21,24,26,35,36,39,42,56,61,192,199],instanc:[0,10,11,12,13,14,16,18,19,20,21,22,24,31,35,36,47,48,50,53],instantan:53,instanti:10,instantli:6,instead:[4,10,11,13,18,22,25,27,34,36,43,48,146,165,183,199],instrospect:196,instruct:[6,8,11,27,28,29,31,42,199],instrument:[26,56],insuffic:195,insuffici:199,insufici:197,intasblob:13,integ:[0,10,11,12,13,17,22,47,53,190],integr:[32,35,42,59],intellij:[25,32],intend:[30,56,186],intens:[6,35,36],intent:30,intention:20,inter:[6,104,125,157,186],interact:[35,41,61,199],interest:[0,48,56,198],interfac:[6,10,14,25,27,36,37,49,56,59,199],interleav:60,intern:[6,9,11,13,18,22,27,30,36,50,53,62,182,195,199],internaldroppedlat:53,internalresponsestag:[53,198],internet:6,internod:[6,36,56,186,195,199],internode_application_timeout_in_m:6,internode_encrypt:[6,56],internode_tcp_connect_timeout_in_m:6,internode_tcp_user_timeout_in_m:6,internodeconnect:[111,161],internodeus:[111,161],interpret:[6,10,22,61],interrupt:36,interv:[4,6,9,53,56,60,187],intra:[6,53,57,60],intrins:22,introduc:[6,10,17,28,43,58,193],introduct:[10,20,35],introspect:199,intrus:185,intvalu:14,invalid:[6,13,20,30,56,116,118,119,120,125,188,194,198],invalidatecountercach:125,invalidatekeycach:125,invalidaterowcach:125,invert:60,invertedindex:21,investig:[6,32,196,197,198,199],invoc:14,invok:[14,29,40,56,179],involv:[6,13,27,48,49,56,193,197,199],ioerror:25,ios:199,ip1:6,ip2:6,ip3:6,ip_address:63,ipv4:[6,17,22,36],ipv6:[6,17,22],irolemanag:6,irrevers:[11,22],isn:[0,18,25,33,36],iso8601:[43,183],iso:22,isol:[6,11,13,53,195,196,198],issu:[0,6,20,24,26,27,28,29,33,34,35,36,45,48,49,140,183,184,185,186,187,188,189,190,191,193,194,195,197,198,199],item:[12,22,24,30,31],iter:[0,6,188],its:[4,6,11,12,13,14,22,31,36,43,48,53,56,57,58,59,60,184,188],itself:[6,11,16,36,40,58,198],iv_length:6,jaa:56,jacki:29,jamm:31,januari:22,jar:[14,25,26,31,35,53],java7:56,java8_hom:31,java:[6,14,21,22,25,31,33,39,40,42,47,48,50,53,56,163,188,196,197,199],java_hom:199,javaag:31,javadoc:[24,25,30],javas:6,javascript:[6,14],javax:56,jbod:50,jce8:6,jce:6,jcek:6,jconsol:[42,48,56],jdk:6,jdwp:31,jenkin:[26,32,42],jetbrain:31,jira:[5,6,26,28,30,32,33,35,47,183,184,185,186,187,188,189,190,191,193,194],jks:60,jkskeyprovid:6,jmap:199,jmc:[48,56],jmx:[6,18,20,42,52,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],jmx_password:56,jmx_user:56,jmxremot:56,job:[33,65,95,138,140,147,178],job_thread:140,john:[13,22],join:[6,8,13,42,48,56,58,125,197,198],joss:13,jpg:22,jsmith:22,json:[9,10,13,15,42,48,49,69,174,176,183],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,jstackjunit:35,jstackjunittask:35,judgement:25,jul:199,junit:[24,25,26,31,35],junittask:35,jurisdict:6,just:[6,14,20,28,31,33,35,36,47,48,55,56,60,195,199],jvm:[6,21,31,36,37,48,52,56,58,196,197],jvm_extra_opt:31,jvm_opt:[37,56],jvmstabilityinspector:30,keep:[6,8,11,25,28,33,36,43,48,53,62,116,182,193,195,198,199],keepal:[6,36],kei:[4,6,9,10,11,13,14,17,22,35,36,40,47,48,49,50,53,56,59,60,62,65,103,107,109,119,125,129,149,150,174,182,187],kept:[4,6,48,53,193],kernel:[6,36,47,199],key_alia:6,key_password:6,key_provid:6,keycach:53,keycachehitr:53,keyserv:[34,40],keyspac:[0,6,9,10,12,14,15,16,20,22,42,43,45,48,49,52,55,56,58,60,61,62,65,66,68,73,75,84,85,94,95,98,103,107,109,116,125,127,129,130,131,132,138,140,146,147,151,164,165,166,173,174,175,178,179,181,182,183,184,185,186,189,190,191,192,193,194,197,198],keyspace1:[20,184,186,187,188,189,190,192,197],keyspace_definit:60,keyspace_nam:[11,14,20,22,48,55,197],keystor:[6,56,186],keystore_password:6,keystorepassword:56,keytyp:187,keyword:[10,11,13,14,15,16,17,22],kib:[70,124,174,198],kick:[125,142],kill:[6,40],kilobyt:49,kind:[11,12,22,24,33,47,48,195,198],kitten:22,knife:163,know:[4,6,13,22,25,28,34,48,189,197,198,199],knowledg:28,known:[20,22,38,41,45,48],krumma:35,ks_owner:56,ks_user:56,kspw:186,ktlist:164,kundera:38,label:[22,24,34],lack:[53,197,198],lag:53,land:49,landlin:22,lang:[42,53,56,188,199],languag:[6,9,10,12,14,21,22,27,38,41,42,61],larg:[6,11,13,14,22,24,35,42,48,50,53,56,59,61,183,189,191,195,197,198,199],large_daili:43,larger:[6,35,36,48,49,50],largest:[6,53],last:[6,12,13,14,15,19,27,48,53,63,125,187,188,189,195,197,199],lastli:[13,22],lastnam:13,latenc:[0,6,11,36,53,57,59,60,196,197],latent:[195,199],later:[0,11,22,25,27,33,36],latest:[0,6,34,40,48,61,179,185,197],latest_ev:60,latter:12,layer:50,layout:[11,27],lazi:11,lazili:11,lead:[6,10,11,22,48,197,199],learn:[6,35,36,61],least:[0,4,6,11,12,13,18,27,36,48,50,55],leav:[6,12,13,25,35,36,61,195,197,198],left:[6,17,19,26,48,193],legaci:[4,6,20,60],legal:[10,11],length:[4,6,10,17,22,30,48],lengthier:33,less:[4,6,22,26,33,36,45,50,191,194,197,198,199],let:[6,28,34,48],letter:17,level:[4,6,10,11,13,19,20,25,30,37,43,50,52,53,56,61,62,105,116,125,158,182,187,188,190,195,197,198],leveledcompactionstrategi:[11,45,48,185,188,198],lexic:36,lib:[4,6,21,26,30,31,35,40,183,184,185,186,187,188,189,190,191,192,193,194,199],libqtcassandra:38,librari:[8,30,32,35,38,53,61],licenc:30,licens:[26,30,31,33],lie:195,lies:195,life:33,lifespan:50,lightweight:62,like:[0,6,12,13,14,17,22,25,27,29,30,33,35,36,42,48,49,50,55,56,188,189,190,195,196,197,199],likewis:20,limit:[4,6,9,10,11,20,22,36,47,48,49,56,60,197,199],line:[6,12,25,33,35,37,40,41,47,56,62,63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,186,188,193,197],linear:50,linearli:45,link:[6,8,11,12,33,35,40,47,192],linux:[6,27,34,36,196,197,199],list:[4,5,6,9,10,11,12,13,14,17,24,26,27,31,32,33,34,35,37,40,41,42,43,47,48,53,56,58,60,61,62,63,65,66,67,68,73,75,81,84,85,91,94,95,98,100,103,107,108,109,111,115,116,122,123,125,127,130,131,132,135,138,139,140,141,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,184,186,187,188,189,194],list_liter:[13,22],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,42,53,199],listen_address:[37,41,42],listen_interfac:37,listsnapshot:[125,192],liter:[10,12,14,17,20,61],littl:[25,195,198],live:[13,27,42,48,53,58,187,192,193,197,199],livediskspaceus:53,liveness_info:183,livescannedhistogram:53,livesstablecount:53,load:[0,6,11,21,22,28,42,52,53,56,57,58,60,62,117,125,132,140,165,182,195,198,199],loader:186,loadm:186,local:[0,4,6,11,20,26,31,32,33,35,41,50,53,56,57,61,125,134,140,144,177,187,195,196,197,199],local_jmx:56,local_on:[0,56,61,195,198],local_quorum:[0,61,195,199],local_read_count:174,local_read_latency_m:174,local_seri:61,local_write_latency_m:174,localhost:[6,41,43,56],locat:[6,34,39,40,43,49,53,56,57,61,171,186,195,197,199],lock:[6,36,53,199],log:[0,4,6,11,13,27,30,35,39,40,42,47,52,53,56,60,62,74,78,84,88,105,125,140,143,158,171,182,196,199],log_al:48,logback:[37,43,197],logdir:43,logger:[25,37,43,84],loggernam:43,logic:[6,21,197,198],login:[6,9,20,34,35,43,56,62,195],logmessag:43,lol:22,longer:[6,9,10,11,34,36,48,58,65,125,190,193,195],longest:197,look:[6,12,24,27,28,29,33,35,48,50,55,188,190,195,197,199],lookup:53,loop:25,lose:[4,6,48,58,193],loss:[6,22,48,55,199],lost:[43,48,58,190],lot:[6,27,41,42,55,62,182,191,197,198,199],low:[6,33,59,125,127,199],lower:[0,4,6,11,12,13,20,36,45,48,53,58,195,197],lowercas:12,lowest:[6,33,48,187],lsm:[198,199],lucen:42,luckili:196,lwt:0,lz4:[4,6],lz4compressor:[4,6,11,49,187],mac:199,macaddr:9,machin:[6,11,35,36,53,56,57,58,187,196,199],made:[6,22,34,42,45,50,56,197],magnet:[4,6],magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,18,20,22,24,26,27,30,31,33,35,36,40,43,45,48,53,56,57,58,60,61,147,188,190,195,196,197,198,199],mail:[5,28,33,34,42],main:[0,6,14,18,31,36,39,40,56,61,188,195,197],main_actor:13,mainli:6,maintain:[6,11,28,33],mainten:53,major:[0,4,10,27,33,56,68,125,192,198],make:[0,6,8,9,21,22,24,25,26,27,28,31,33,35,36,37,40,48,56,58,60,61,183,197,199],malform:195,malici:56,man:6,manag:[6,20,24,27,31,32,34,35,42,53,56,58,62,64,125,182],mandatori:[11,14],mani:[0,6,11,25,30,33,48,49,50,53,56,60,61,62,65,68,75,85,88,94,95,140,147,166,178,179,182,188,194,195,198,199],manifest:[62,182],manipul:[12,15,18,35,42,183],manual:[6,26,29,36,193,199],map:[6,9,10,11,13,14,17,20,42,47,53,197,199],map_liter:[11,16,20,22],mar:22,mark:[6,20,33,48,55,58,79,125,187,189,193],marker:[4,6,11,12,30,36,193],markup:27,marshal:187,massiv:[28,199],match:[4,6,12,13,14,17,20,53,57,187,192],materi:[0,6,10,11,12,15,42,53,61,125,181],materialized_view_stat:12,matter:[11,36,199],maven:26,max:[4,6,42,48,53,56,60,61,88,98,106,125,140,151,159,187,190,197,198],max_hint_window_in_m:58,max_log_s:[43,88],max_map_count:36,max_mutation_size_in_kb:[4,6,36],max_queue_weight:[43,88],max_threshold:48,maxattempt:61,maxbatchs:61,maxfiledescriptorcount:53,maxfiles:43,maxhintwindow:159,maxhistori:43,maxim:50,maximum:[4,6,14,43,45,53,61,88,100,147,153,187,190,191,195,197,198],maximum_live_cells_per_slice_last_five_minut:174,maximum_tombstones_per_slice_last_five_minut:174,maxinserterror:61,maxldt:184,maxoutputs:61,maxparseerror:61,maxpartitions:53,maxpools:53,maxrequest:61,maxrow:61,maxt:184,maxtasksqueu:53,maxthreshold:151,maxtimestamp:4,maxtimeuuid:10,mayb:13,mbean:[6,20,48,53,56],mbeanserv:20,mbit:186,mbp:6,mct:6,mean:[0,6,9,11,12,13,14,17,18,22,42,48,53,57,60,61,140,195,196,197,198,199],meaning:13,meanpartitions:53,meant:[22,36,53],measur:[6,30,33,35,53,58,60,61,199],mechan:47,median:[53,197],medium:199,meet:[6,30,56],megabit:186,megabyt:[6,191,198],mem:199,member:[25,56,60],membership:6,memlock:36,memori:[4,6,11,42,43,45,47,48,52,59,194,197,199],memory_pool:53,memtabl:[2,6,11,45,47,48,49,50,53,164,197,199],memtable_allocation_typ:4,memtable_cell_count:174,memtable_cleanup_threshold:4,memtable_data_s:174,memtable_flush_period_in_m:11,memtable_off_heap_memory_us:174,memtable_switch_count:174,memtablecolumnscount:53,memtableflushwrit:[53,198],memtablelivedatas:53,memtableoffheaps:53,memtableonheaps:53,memtablepool:6,memtablepostflush:[53,198],memtablereclaimmemori:[53,198],memtableswitchcount:53,mention:[6,22,33,53,56,186,195],menu:31,mere:25,merg:[27,29,33,45,49,50,52,199],mergetool:29,merkl:[6,53,55],mess:[33,35],messag:[6,22,24,27,30,33,40,42,43,53,56,186,190,191,192,193,194,195,197,198],met:13,meta:[13,53,60],metadata:[4,20,34,49,50,53,62,182,190,193,194,197],metal:6,meter:53,method:[10,13,14,20,25,28,30,31,35,42,56,60],metric:[6,52,60,196,198,199],metricnam:53,metricsreporterconfigfil:53,mib:[70,124,174],micro:198,microsecond:[6,11,13,22,53,187,198],midnight:22,might:[6,13,34,48,53,63,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,194,195,198],migrat:[6,53,57],migrationstag:[53,198],milli:4,millisecond:[4,6,10,22,53,127,147,175,187,190,198,199],min:[4,6,36,47,48,53,60,61,98,125,151,187,197,198],min_sstable_s:48,min_threshold:48,minbatchs:61,minim:[6,48,50],minimum:[6,11,14,37,53,55,185,187],minlocaldeletiontim:187,minor:[10,12,27,52],minpartitions:53,mint:184,minthreshold:151,mintimestamp:187,mintimeuuid:10,minttl:187,minut:[6,22,43,48,53,56,60,88],mirror:27,misbehav:[42,48,196],misc:[111,161],miscelen:53,miscellan:6,miscstag:[53,198],mismatch:6,misrepres:190,miss:[11,18,24,26,48,53,55,58,193,199],misslat:53,misspel:185,mistaken:[63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181],mistun:197,mit:34,mitig:[6,56],mix:[6,48,60,199],mkdir:[34,197],mmap:36,mnt:16,mock:35,mode:[4,6,56,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,195],model:[11,15,20,33,42,56,60,199],moder:50,modern:[50,199],modif:[13,20,197],modifi:[6,9,10,11,14,20,22,33,45,48,49,189],modification_stat:13,modul:61,modular:30,moment:[6,33],monitor:[36,42,52,56,57,64,125,195,199],monkeyspeci:[11,18],monkeyspecies_by_popul:18,monoton:[0,11],month:22,more:[0,4,6,10,11,12,13,22,25,27,28,33,34,35,37,41,42,43,45,50,52,53,56,57,58,60,62,68,94,95,125,127,140,147,163,175,179,182,187,188,194,196,198,199],moreov:13,most:[6,11,12,13,22,27,28,31,33,35,36,37,43,48,49,50,53,56,61,67,125,175,187,188,195,197,198,199],mostli:[4,6,11,22,196,197],motiv:[35,48],mount:[6,199],move:[6,33,36,42,47,52,53,125,190,193,198],movement:[52,197],movi:[13,22],movingaverag:6,msg:43,mtime:[11,189],mtr:199,much:[0,5,6,11,45,47,48,57,186,195,197,199],multi:[0,6,12,30,197,199],multilin:32,multipl:[4,6,10,11,12,13,14,19,22,25,30,31,33,36,37,48,50,57,60,62,130,182,183,195,198],multipli:48,multivari:59,murmur3:4,murmur3partit:4,murmur3partition:[6,14,61,187],must:[0,4,6,10,11,13,14,17,18,20,25,26,31,33,35,36,37,48,53,56,58,60,61,164,182,183,184,185,186,187,188,189,190,191,192,193,194],mutant:16,mutat:[0,4,6,13,36,47,53,179,198],mutatedanticompactiongaug:53,mutationsizehistogram:53,mutationstag:[53,198],mv1:18,mvn:26,mx4j:53,mx4j_address:53,mx4j_port:53,mx4jtool:53,mxbean:20,myaggreg:14,mycolumn:17,mydir:61,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,19,21],mytrigg:21,nairo:22,name:[4,6,9,10,11,12,13,14,16,17,18,20,21,22,24,27,30,31,33,34,35,36,37,43,53,56,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,185,186,189,192,195,197,198,199],names_valu:13,nan:[9,10,12],nanosecond:[22,53],narrow:[195,197,198,199],nathan:13,nativ:[4,6,10,12,15,17,30,36,41,43,53,61,77,87,125,131,168,186,198,199],native_transport_port:37,native_transport_port_ssl:56,native_typ:22,natur:[11,22,25,48,49,199],navig:27,nbproject:31,ncurs:199,nearli:31,neccessari:6,necessari:[6,11,14,20,33,40,43,49,56,183,187,190],necessarili:[6,12,37],need:[0,4,6,10,11,12,13,20,22,24,25,26,30,31,33,34,35,36,37,40,41,43,45,48,49,50,55,56,57,59,61,103,107,186,191,192,194,198,199],neg:6,negat:[19,56],neglig:[13,199],neighbor:195,neighbour:48,neither:[6,18,22,56],neon:31,nerdmovi:[13,16],nest:[12,13,25],net:[6,31,36,39,40,43,56],netbean:32,netstat:[58,125],netti:6,network:[6,13,36,50,55,56,57,124,125,128,197],network_author:20,network_permiss:6,networktopologystrategi:[56,60],never:[0,6,10,11,12,13,14,22,25,36,48,55,188],nevertheless:13,new_rol:20,new_superus:56,newargtuplevalu:14,newargudtvalu:14,newer:[48,50,61,95,184,199],newest:[11,48,184],newli:[11,20,22,33,47,125,132],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[0,6,36,41,48,61,196,197],ngem3b:13,ngem3c:13,nic:199,nid:199,nifti:29,nio:[6,14,53],nntp:199,no_pubkei:40,node:[0,4,6,11,13,14,21,22,30,35,37,38,41,42,43,45,47,48,50,52,53,55,57,59,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,187,188,196,197,198,199],nodej:39,nodetool:[4,6,18,40,42,45,49,52,55,56,58,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,189,192,194,195,196,197,199],nois:[6,197],noiser:197,noisi:197,nologin:9,non:[6,9,10,11,12,13,14,20,22,36,45,49,53,56,61,187,190],none:[0,6,11,13,22,53,56,187],nonsens:20,nor:[6,11,18,22],norecurs:[9,20],norm:53,normal:[14,17,20,31,36,40,53,60,61,195,197,198,199],nosql:59,nosuperus:[9,20],notabl:[14,17],notat:[10,12,13,61],note:[0,4,5,6,10,11,12,13,14,15,17,20,22,29,32,33,34,36,48,56,182,183,184,185,186,187,188,189,190,191,192,193,194,197,199],noth:[6,11,14,29,35,36,184],notic:[6,56,198,199],notif:8,notion:[11,12],now:[10,24,25,27,31,48,58,199],ntp:6,nullval:61,num_cor:61,num_token:58,number:[0,4,6,10,11,12,13,14,15,17,18,22,24,31,33,34,35,36,40,43,45,48,49,53,56,58,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,187,191,195,196,198,199],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:174,numer:[15,19,24,45,60],numprocess:61,numsampl:197,object:[6,11,12,30,183],objectnam:20,observ:25,obsolet:[6,50,53,194],obtain:[12,56,199],obviou:[14,29],obvious:11,occasion:[55,198],occup:[13,199],occupi:[6,53],occur:[6,10,12,13,21,22,36,48,50,53,55,182,183,184,185,186,187,188,189,190,191,192,193,194,199],occurr:22,octet:[6,57],oddli:6,off:[4,6,36,47,49,53,56,61,125,142,199],off_heap_memory_used_tot:174,offer:[15,35,49],offheap:[45,50],offheap_buff:6,offheap_object:6,offici:[27,33,42,59,61],offset:[4,47,53],often:[6,11,12,25,27,28,33,35,36,43,48,49,50,55,56,57,61,88,188,195,198,199],ohc:6,ohcprovid:6,okai:25,old:[4,6,48,58,62,82,92,125,182,193,199],older:[4,6,14,31,40,48,50,61,184,192],oldest:[4,6,11,43,184],omit:[4,6,10,11,13,17,22,158],onc:[0,4,6,11,12,14,22,24,29,31,33,35,36,47,48,49,50,53,55,56,58,60,61,188,195],one:[0,4,6,9,10,11,12,13,14,17,18,20,22,25,28,31,33,35,37,42,43,45,48,50,53,55,56,57,58,61,62,65,68,75,85,94,95,111,125,140,147,161,164,166,178,179,182,183,187,190,192,193,195,197,198,199],oneminutecachehitr:53,ones:[6,11,12,13,14,18,20,53,188],ongo:[28,48,53,58],onli:[0,4,6,9,11,12,13,14,17,18,20,22,25,27,33,34,35,37,42,45,47,48,49,50,53,55,56,57,58,60,61,62,140,164,174,182,184,186,189,190,191,192,194,195,198,199],onlin:61,only_purge_repaired_tombston:48,onto:[4,48],open:[5,6,20,24,28,32,34,56,57,59,186,199],openfiledescriptorcount:53,openhft:43,openjdk:40,oper:[0,6,10,11,12,13,15,16,18,20,22,25,32,42,43,45,47,50,53,55,56,58,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,185,192,193,195,196,197,198,199],operand:19,operatingsystem:53,operationtimedoutexcept:195,opertaion:6,oplog:193,opnam:60,opportun:[27,45],ops:[36,60],opt:14,optim:[4,6,11,12,13,36,48,50,58,187,197],optimis:140,option1_valu:20,option:[4,6,9,10,11,12,13,14,16,20,22,27,31,33,35,36,40,49,50,52,56,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,186,187,188,189,190,191,192,193,194,197,198,199],oracl:[6,40,56,199],order:[0,4,6,9,10,11,14,18,22,25,33,36,45,47,48,57,58,60,61,187],ordering_claus:13,orderpreservingpartition:6,ordinari:6,org:[6,14,21,24,25,26,27,31,34,35,36,40,43,48,49,53,56,183,184,185,186,187,188,189,190,191,193,194,197],organ:[4,24,31,38],orgapachecassandra:34,origin:[4,9,27,29,33,47,147,188,190,191,192],orign:13,os_prio:199,osx:27,other:[0,4,6,10,11,12,13,14,18,20,22,26,27,28,29,31,33,37,42,43,45,48,50,52,53,56,57,58,125,130,141,184,187,188,193,195,196,197,198,199],other_rol:20,otherwis:[0,6,9,12,13,16,22,55,100,184,195],our:[5,6,8,24,27,28,29,31,34,48,199],ourselv:29,out:[4,6,11,12,25,26,28,31,33,34,48,53,55,56,57,58,59,140,183,184,195,198,199],outbound:6,outboundtcpconnect:6,outgo:[6,199],outgoingbyt:53,outlin:[24,56],outofmemoryerror:42,output:[14,20,30,31,34,45,48,60,61,62,68,69,174,176,182,187,190,191,193,194],outsid:[11,21,22],outstand:[193,198],over:[0,4,6,11,22,36,48,53,55,56,57,58,60,188,190,193],overal:14,overflow:[17,62,147,182],overhead:[6,36,49,53,58],overidden:56,overlap:[0,48,188],overload:[6,14,36,186],overrid:[6,11,24,25,56,58,147,186,190],overridden:[6,11,43],overview:[2,42,52],overwrit:[43,49,50,56],overwritten:[53,95],own:[0,11,12,14,22,28,32,33,36,40,48,49,53,56,59,60,103,109,116,125,179,188,198],owner:22,ownership:[48,146],ownersip:197,p0000:22,p50:198,p99:199,pacif:22,packag:[31,35,36,37,39,41,61,197],packet:[6,197],page:[6,22,24,27,28,31,35,36,50,53,59,62,196,198],paged_rang:198,paged_slic:53,pages:61,pagetimeout:61,pai:[25,26,34],pair:[6,11,20,22,48,56],parallel:[18,35,48,140,198],paramet:[4,6,14,24,25,30,31,37,43,45,50,57,58,125,158],parameter:24,paranoid:6,parent:[26,186],parenthesi:[11,60,61,195],parnew:50,pars:[6,12,43,47,61,199],parser:[9,10],part:[5,6,11,13,14,18,22,26,30,31,33,35,36,56,57,58,61,186,195],parti:[30,42,53,183],partial:[4,11,193],particip:[0,21],particular:[0,6,11,12,13,14,17,20,22,36,50,53,56,195,197,198,199],particularli:[12,22,56,197,198,199],partit:[4,6,10,11,13,14,36,45,48,50,53,60,95,103,107,125,147,175,183,187,195,197,198],partition:[4,10,13,14,55,61,72,125,140,187],partition_kei:[11,13],partitionspercounterbatch:53,partitionsperloggedbatch:53,partitionsperunloggedbatch:53,partitionsvalid:53,partli:13,pass:[30,33,37,61,163,186,187,198,199],password:[6,9,13,20,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186],password_a:20,password_b:20,passwordauthent:[6,56],passwordfilepath:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],past:53,patch:[10,13,24,25,27,29,30,32,35,42],path1:43,path2:43,path:[5,6,16,30,40,45,48,49,50,53,56,59,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,184,186,190,192,194,197,199],pathn:43,patter:20,pattern:[6,20,22,43,195,198,199],paus:[6,36,125,126,197,199],pausehandoff:125,paxo:[13,53,61],pcap:199,peak:[186,199],peer:[6,20,53,199],peerip:53,penalti:[6,13],pend:[6,48,53,125,139,198],pending_flush:174,pendingcompact:53,pendingflush:53,pendingrangecalcul:[53,198],pendingtask:53,pendingtasksbytablenam:53,pennsylvania:22,peopl:[27,33,34,36],per:[0,4,6,10,11,13,25,29,33,36,45,47,48,49,53,56,60,61,125,148,156,183,186,193,195,197,198,199],percent:53,percent_repair:174,percentag:[6,53,57,199],percentil:[53,195,198,199],percentrepair:53,perdiskmemtableflushwriter_0:[53,198],perf:199,perfdisablesharedmem:199,perfect:14,perform:[6,11,13,20,22,28,29,30,32,33,36,37,43,45,48,50,53,55,56,57,61,140,197,198,199],perhap:[195,197],period:[4,6,24,50,53,55,56,58,125,127,199],perl:39,perman:[11,36,48,50,197],permiss:[6,9,12,35,56],permit:[6,20,47,56],persist:[4,36,45,47,50,56,199],person:199,perspect:36,pet:22,pgp:34,pgrep:40,phantom:38,phase:[58,61,198],phi:6,phone:[13,22],php:39,physic:[0,6,11,36,50,57],pick:[6,29,33,36,48,56,58,60,130],pid:[36,40,199],piec:[12,48,53],pile:6,pin:[6,57],ping:[33,199],pkcs5pad:6,pkill:40,place:[5,6,16,21,25,29,33,47,48,53,55,56,61,125,132,186,191,197,199],placehold:[14,61],plai:[14,22],plain:4,plan:[11,29,33],plane:27,platform:[20,24,59],platter:[6,50],player:[14,22],playorm:38,pleas:[5,6,11,13,14,22,24,25,27,31,34,35,36,43,56,60,194],plu:[14,48,53,198],plug:[6,24],pluggabl:[20,56],plugin:[42,53],pmc:34,poe:22,point:[4,6,10,17,22,25,27,31,34,42,56,60,61,103,125,186,195,199],pointer:14,polici:[6,33,34,56,179,195],poll:56,pom:32,pool:[6,40,53,125,153,176,198,199],pop:60,popul:[11,18,60],popular:[31,50],port:[6,31,37,42,43,53,56,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,199],portion:[50,61,191],posit:[4,6,10,11,19,22,45,53,58,183,187],possbili:6,possess:20,possibl:[0,6,10,11,13,14,17,20,22,24,30,33,35,36,45,48,50,53,56,58,60,188,195,197],post:[13,24,32,125,150],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,30,43,48,50,56,58,147,188,190],power8:59,power:[6,59],pr3z1den7:22,practic:[0,6,11,12,13,34,52,56],pre:[6,17,22,50,56,190,191,193],preced:[19,36,60],precis:[10,17,22,48,187],precondit:53,predefin:11,predict:[13,188],prefer:[0,6,11,12,22,25,33,56,57],preferipv4stack:31,prefix:[11,12,22,187,193],prepar:[6,14,15,43,53],prepare_releas:34,preparedstatementscount:53,preparedstatementsevict:53,preparedstatementsexecut:53,preparedstatementsratio:53,prepend:22,prerequisit:[32,39],presenc:6,presens:4,present:[12,13,18,37,47,53,56,190,199],preserv:[6,11,17,20],preserveframepoint:199,press:40,pressur:[6,53,198,199],pretti:[61,199],prevent:[6,11,35,47,53,55,186,190,199],preview:[27,55,140],previou:[6,10,11,22,34,43,48,55,58,192],previous:[6,193],previsouli:[91,125],primari:[9,10,11,13,14,22,35,47,48,49,55,56,58,60],primarili:[6,11],primary_kei:[11,18],print0:189,print:[55,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,197],prio:199,prior:[6,13,20,22,58],prioriti:[33,199],privat:[6,25,34,56,57],privileg:[20,40,56],probabilist:[45,49],probabl:[0,4,6,11,35,45,48,55,112,125,162,197,198,199],problem:[5,6,14,29,30,34,36,56,195,196,198,199],problemat:[22,195],proc:[6,36],proce:[30,49,58,195],procedur:[13,56],proceed:194,process:[0,6,14,24,26,28,29,30,31,32,33,35,36,40,42,47,49,50,53,55,56,58,59,61,64,100,125,126,145,153,185,186,190,192,193,194,197,198,199],prod_clust:61,produc:[13,14,28,48,88,195],product:[0,6,11,26,28,33,36,50,57],profil:[13,31,62,125,127,199],profileload:125,program:[14,35,196,199],programmat:189,progress:[25,29,33,34,45,52,60,62,125,181,182,193,198],project:[24,25,26,27,28,35,53],promin:11,promot:4,prompt:61,propag:[6,11,14,25,30,57],proper:[11,22,27,36,56],properli:[6,30],properti:[4,6,11,18,20,31,34,39,47,48,56,57,58],propertyfilesnitch:[6,57],proport:[6,13],proportion:[6,97,125,148],propos:[6,34,53],protect:[6,50,55,56,193],protocol:[6,30,36,41,43,53,56,61,67,77,82,87,92,125,168,186,197,199],prove:199,provid:[0,4,5,6,11,12,13,14,15,17,22,31,33,34,41,43,47,48,49,50,53,55,56,57,58,59,60,62,124,125,135,139,186,187,188,191,193,194,195,197],provis:199,proxim:[6,57],proxyhistogram:[125,198],prtcl:186,prv:[55,140],ps1:56,ps22dhd:13,pt89h8m53:22,publish:26,published_d:60,pull:[27,35,48,53,140],pure:199,purg:50,purpos:[11,12,13,22,50,56],push:[29,33,53],put:[15,33,37,48,58,116,140,188,198],pwd:34,pwf:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],python:[14,24,33,35,39,40,61],pytz:62,qos:43,quak:[14,22],qualifi:[11,14,33,198],qualiti:[27,56],quantiti:[22,198],queri:[0,6,10,11,12,13,14,16,18,20,39,42,43,48,53,59,60,61,78,88,125,143,196,199],queryvalidationexcept:195,question:[8,20,32,42,199],queu:[6,53],queue:[6,43,53,88,198,199],quick:[116,179,194],quickli:[36,48,198],quill:38,quintana:22,quit:[48,61,186,198,199],quorum:[0,11,56,61,195],quot:[9,10,11,12,14,17,20,60,61],quotat:20,quoted_identifi:12,quoted_nam:11,r_await:199,race:[22,29],rack1:6,rack:[0,6,56,57,195,198],rackdc:[6,57],rackinferringsnitch:[6,57],raid0:50,raid1:50,raid5:50,rain:12,rais:[6,12,36,195],raison:9,ram:[45,49,50,199],ran:189,random:[11,14,36,58],randomli:[6,58],randompartition:[6,13,14],rang:[2,6,10,11,13,22,30,48,52,53,55,60,61,68,73,111,125,130,140,161,195,198],range_slic:[53,198],rangekeysampl:125,rangelat:53,rangemov:58,rangeslic:53,rapid:50,rapidli:199,rare:[10,45,195],raspberri:50,rate:[6,11,53,56,60,61,186,199],ratebasedbackpressur:6,ratefil:61,rather:[6,13,36,47,48,50,60],ratio:[6,49,50,53,60,187],ration:4,raw:[4,6,14,62,182,197],reacah:43,reach:[4,6,11,33,36,47,48,188],read:[0,6,11,13,22,25,27,30,35,36,39,42,45,48,49,50,52,53,56,57,60,61,111,161,174,179,186,187,194,195,197,198,199],read_ahead_kb:199,read_lat:174,read_repair:[0,11,53,198],read_request_timeout:36,readabl:[11,43,47,70,124,174,198],readi:[0,11,27,33,56],readlat:[53,195],readm:[27,34],readrepair:53,readrepairstag:[53,198],readstag:[53,198],readtimeoutexcept:195,readwrit:56,real:[4,8,11,25,36,59,197],realclean:26,realis:60,realiz:48,realli:[6,35,185,189,195,199],realtim:47,reappear:55,reason:[0,4,6,11,13,14,15,18,36,37,40,48,50,55,56,58,198,199],rebas:27,rebuild:[0,45,48,49,53,125,131,147],rebuild_index:125,receiv:[6,14,18,33,36,48,50,58,195,199],recent:[6,33,35,50,67,188,193],reclaim:[43,48],recogn:[13,31,33],recommend:[4,6,11,22,27,36,43,50,56,58,197],recompact:48,recompress:49,reconcil:11,reconnect:56,reconstruct:188,record:[4,6,11,13,19,22,33,43,48,53,60,199],recov:[6,36,48],recoveri:6,recreat:[20,61],recrus:6,recurs:88,recv:40,recycl:[4,6,53],redhat:34,redirect:43,redistribut:[6,197],redo:33,reduc:[4,6,28,36,48,49,55,62,71,97,125,140,148,182],reduct:6,redund:[0,25,30,33,50],reenabl:[87,89,90,125],ref:[34,43,183,184,185,186,187,188,189,190,191,193,194],refer:[6,11,12,13,14,22,24,25,26,35,36,40,41,43,58,60,61,195,197],referenc:[6,60],reflect:[47,48,183],refresh:[6,56,61,125,133],refreshsizeestim:125,refus:42,regard:[11,13],regardless:[0,6,20,33,199],regener:45,regexp:12,region:[6,57],regist:22,registri:56,regress:[30,35],regular:[9,12,27,31,35,36,53,61],regularcolumn:187,regularli:55,regularstatementsexecut:53,regularupd:60,reinsert:[147,190],reject:[6,13,36,47,56,195],rel:[6,22,61,199],relat:[8,10,12,13,26,31,33,48,53,60,187,195,199],relationship:6,releas:[6,10,26,32,33,40,42,61,199],relev:[13,20,22,33,49,56,59,186,187,190,199],relevel:[62,182],reli:[6,14,22,36],reliabl:[28,48],reload:[6,52,125,134,135,136,137],reloadlocalschema:125,reloadse:125,reloadssl:[56,125],reloadtrigg:125,reloc:[125,138,197],relocatesst:125,remain:[6,13,14,20,22,29,48,53,55,58,174,198],remaind:[17,19,49],remeb:43,remedi:48,rememb:[43,195],remot:[0,4,27,29,31,42,48,56,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,195],remov:[0,4,6,10,11,12,13,14,15,17,18,20,22,26,30,36,42,47,52,56,63,66,95,125,139,188,193,194,199],removenod:[58,63,125],renam:[9,22],render:27,reorder:6,repair:[0,4,6,11,18,36,42,49,52,53,57,58,62,116,125,141,158,179,182,187,190,194,198],repair_admin:125,repairedat:189,repairpreparetim:53,repairtim:53,repeat:[12,40,49,56],replac:[0,6,9,14,20,22,26,30,34,36,42,48,52,55,88,192,193],replace_address_first_boot:58,replai:[0,4,22,50,53,97,125,142,148,187],replaybatchlog:125,repli:28,replic:[2,6,11,42,48,50,55,56,58,60,63,125],replica:[0,6,11,13,36,48,53,55,57,58,71,107,125,195,198,199],replication_factor:[0,11,56,60],repo:[26,29,31,34],repodata:34,repomd:34,report:[6,26,32,33,42,52,195],report_writ:20,reportfrequ:61,repositori:[5,8,24,26,27,28,31,33,35,40,59],repres:[6,10,17,20,22,36,48,53,56,57,60,61,187,197],represent:[10,17,183],reproduc:28,reproduct:28,request:[0,6,13,20,21,27,35,36,43,45,48,50,52,56,57,61,125,162,178,194,195,198,199],request_respons:[53,198],requestresponsest:198,requestresponsestag:[53,198],requesttyp:53,requir:[0,6,11,13,14,20,25,27,29,30,31,32,33,34,36,45,49,50,56,60,185,186,189,192],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15,199],reservoir:195,reset:[6,13,27,125,144,158,185],reset_bootstrap_progress:58,resetfullquerylog:125,resetlocalschema:125,resid:[6,13,36,53,199],resolut:[6,13,32,36],resolv:[26,29,36,146,165],resort:[63,125],resourc:[20,56,186,198],resp:14,respect:[6,10,11,14,24,26,40,55,57,88,197],respond:[0,6,12,199],respons:[0,6,20,36,53,58,198],ressourc:22,rest:[6,11,12,22,24,30,58,195],restart:[36,48,56,58,125,132,150,185,197],restor:[48,58,61,186,192,193],restrict:[6,10,11,13,18,55],restructuredtext:27,result:[0,6,10,11,12,14,17,20,22,28,33,36,48,53,55,61,182,183,184,185,186,187,188,189,190,191,192,193,194,199],resum:[64,125,145],resumehandoff:125,resurrect:48,resync:[125,144],retain:[20,36,43,48,190,192],rethrow:25,retir:27,retri:[0,6,11,22,53,88],retriev:[11,13,20,26],reus:30,revers:[11,13],revert:197,review:[11,25,27,32,33,35,42],revis:60,revok:[9,56],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[45,48,49,62,125,147,178,182,190],rewritten:[50,147,190],rfc:[14,22],rhel:42,rich:[22,197],rid:26,rider:22,riderresult:22,right:[6,19,31,34,36,55,61,198,199],ring:[2,6,42,55,56,58,61,121,123,125,158,186,195],rise:195,risk:[11,48],riski:48,rmb:199,rmem_max:6,rmi:[36,56],rogu:14,role:[6,9,10,12,15,52],role_a:20,role_admin:20,role_b:20,role_c:20,role_manag:56,role_nam:20,role_opt:20,role_or_permission_stat:12,role_permiss:6,roll:[36,43,56,88],roll_cycl:[43,88],rollcycl:43,rollingfileappend:43,rollingpolici:43,rollov:43,romain:22,room:[5,8,34],root:[6,29,33,40,194,197],rotat:[6,197],roughli:6,round:[13,48,53],rout:[6,57],routin:199,row:[0,4,6,10,11,13,14,15,17,18,35,41,45,49,50,53,60,61,62,95,116,120,125,147,149,150,182,187,190,194,199],rowcach:[42,53],rowcachehit:53,rowcachehitoutofrang:53,rowcachemiss:53,rowindexentri:53,rows_per_partit:11,rpc:[6,53],rpc_timeout_in_m:[111,161],rpm:34,rpmsign:34,rrqm:199,rsc:179,rst:27,rubi:[14,39],rule:[6,12,14,33,36,195,197],run:[4,5,6,12,22,24,26,29,31,33,34,36,37,40,48,50,53,55,56,58,59,60,62,116,140,163,182,185,186,187,189,191,192,196,197,198,199],runnabl:199,runtim:[6,18,39,105,125],runtimeexcept:25,rust:39,safe:[14,22,48,56,199],safeguard:50,safepoint:197,safeti:[11,48,58],sai:42,said:[11,33,36,125,178,199],same:[0,4,5,6,11,12,13,14,15,17,18,19,20,22,27,29,31,33,37,42,45,48,53,55,56,57,58,60,140,188,193,195,197,199],samerow:60,sampl:[4,6,12,14,53,60,61,88,125,127,129,175],sampler:[53,127,175,198],san:50,sandbox:[6,14],sasi:6,satisfi:[0,11,25,50,53,58],satur:[6,53,198,199],save:[6,13,22,24,26,36,37,45,49,50,58,60,125,150],saved_cach:6,saved_caches_directori:37,sbin:36,scala:[14,39],scalabl:[34,59],scalar:15,scale:[35,49,59,60],scan:[6,13,45,53],scenario:29,scene:36,schedul:[6,24],schema:[9,11,14,17,53,56,60,61,72,125,134,144,185,187],schema_own:20,scope:[20,43,53,56],score:[6,14,22,57],script:[6,14,24,31,34,35,62,88,182,183,184,185,186,187,188,190,191,192,193,194,199],scrub:[45,48,49,53,62,125,171,182],sda:199,sdb:199,sdc1:199,sdc:199,search:[33,59,197],searchabl:199,second:[6,11,12,13,22,36,47,50,56,60,61,62,125,148,156,182,195,197,198,199],secondari:[10,12,13,15,42,48,53,59,125,131],secondary_index_stat:12,secondaryindexmanag:[53,198],section:[2,4,5,7,10,11,12,13,15,20,22,34,36,39,40,41,43,48,53,55,56,58,62,182,186,197,198],secur:[6,14,15,34,42,52],see:[0,4,6,10,11,12,13,14,17,20,22,26,28,31,33,34,35,41,42,43,48,53,56,58,61,95,125,140,185,187,188,191,197,198,199],seed:[6,37,42,57,108,125,135],seedprovid:6,seek:[4,6,50,53],seen:[6,11],segment:[4,6,43,47,53,61,88,197,198],segment_nam:47,segmentid:187,select:[6,9,10,11,12,14,15,19,20,24,31,34,35,36,41,43,45,48,56,60,61,130,197,198,199],select_claus:13,select_stat:[12,18],self:30,selinux:36,semant:[10,13,14],semi:36,send:[6,8,36,60,195,199],sendto:60,sens:[10,13,15,36],sensic:14,sensit:[11,12,14,17,199],sensor:22,sent:[0,6,11,22,36,53,195,199],sentenc:33,separ:[4,6,11,13,25,27,33,37,43,48,50,56,58,61,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,186,190],septemb:33,seq:[6,140],sequenc:12,sequenti:[6,50,140],seren:13,seri:[11,34,48,61],serial:[4,6,62],serializingcacheprovid:6,seriou:[27,195,198],serv:[13,50,56,199],server:[6,12,13,22,31,32,34,35,36,50,53,56,59,60,186,195],server_encryption_opt:[56,186],servic:[6,31,40,53,56,58,197,199],session:[6,20,56,62,125,141],set:[0,4,6,9,10,11,12,13,14,17,18,27,30,32,33,35,37,42,43,45,47,48,49,50,53,56,57,58,60,61,62,65,84,95,125,138,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,178,182,183,185,186,192,194,195,196,197,198,199],set_liter:[20,22],setbatchlogreplaythrottl:125,setcachecapac:125,setcachekeystosav:125,setcompactionthreshold:[48,125],setcompactionthroughput:[48,125],setconcurrentcompactor:125,setconcurrentviewbuild:[18,125],sethintedhandoffthrottlekb:125,setint:14,setinterdcstreamthroughput:125,setlogginglevel:[125,197],setlong:14,setmaxhintwindow:125,setstr:14,setstreamthroughput:125,setter:[20,24,25],settimeout:125,settraceprob:125,setup:[27,33,35,56],sever:[4,13,20,48,55,56,60,186],sfunc:[9,14],sha1:[34,192],sha:[29,34],shadow:[18,48],shape:60,shard:4,share:[11,13,31,187,195,199],sharedpool:61,sharp:38,shed:36,shell:[41,42,62],shift:22,ship:[26,35,41,56,61,197,199],shortcut:18,shorter:[27,56],shorthand:61,shortlog:34,should:[0,4,5,6,10,11,12,13,14,17,20,22,24,26,27,30,31,33,35,36,37,38,39,41,43,45,48,49,50,53,55,56,57,58,60,61,130,140,161,191,193,195,199],shouldn:[11,37],show:[20,26,42,43,55,58,62,73,93,113,125,129,139,146,165,166,174,181,182,194,195,197,198,199],shown:[12,61,174,186],shrink:6,shut:6,shutdown:[4,6,50],side:[6,11,13,17,22,56,195],sig:34,sign:[13,22,36],signal:[6,125,136],signatur:[40,47],signifi:199,signific:[6,27,31,33,35,50,195],significantli:[6,55,199],silent:14,similar:[6,13,14,49,50,194,195,199],similarli:[0,10,17,25,50,125,130],similiar:55,simpl:[6,11,26,28,31,35,56],simple_classnam:35,simple_select:13,simplequerytest:35,simplereplicationstrategi:56,simpleseedprovid:6,simplesnitch:[6,57],simplestrategi:60,simpli:[0,4,6,11,13,14,17,22,31,35,48,50,53,58,179],simul:35,simultan:[6,50,61,65,95,138,147,178],sinc:[6,11,13,14,22,27,31,35,36,40,48,53,55,58,185,188,190,198,199],singl:[0,6,10,11,12,13,14,17,18,20,22,25,33,37,41,42,52,53,55,56,57,61,62,68,182,195,197,198,199],singleton:30,site:[27,34],situat:[6,35,48,199],size:[4,6,11,22,25,36,37,43,45,47,49,50,52,53,56,60,61,62,88,122,125,182,185,187,188,189,192,197,198,199],size_estim:[125,133,197],sizeandtimebasedrollingpolici:43,sizetieredcompactionstrategi:[11,48,198],skinni:198,skip:[6,13,36,53,58,61,62,147,164,182,185,191],skipcol:61,skiprow:61,sks:40,sla:30,slack:[5,33,42,55],slash:12,slave:24,sleep:199,slf4j:[25,26,43],slightli:6,slow:[6,11,57,195,197,198,199],slower:[6,11,45,198,199],slowest:6,slowli:[6,22],small:[4,6,11,13,22,36,48,50,62,182,186,195,199],smaller:[4,6,36,48,50,61,191],smallest:[0,11,14,53,188],smallint:[9,10,14,17,19,22],smith:22,smoother:10,smoothli:6,snappi:[4,6],snappycompressor:[11,49],snapshot:[4,6,26,53,62,66,122,125,147,182,190,194,199],snapshot_nam:[66,192],snapshotnam:[66,125],snitch:[6,42,52,72,125],snt:199,socket:[6,56,161],soft:27,softwar:26,sole:[11,28],solid:[6,50],solr:59,solut:24,some:[0,6,9,11,12,13,14,22,26,27,28,31,33,34,35,36,37,47,48,49,53,56,58,61,187,189,195,197,198,199],some_funct:14,some_keysopac:11,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[29,48],someth:[6,189,197,199],sometim:[6,12,13,195,196,197,198,199],someudt:14,somewher:[40,55],soon:56,sooner:6,sort:[4,11,13,22,48,50,59,174,188,197],sort_kei:174,sourc:[4,5,6,8,14,24,26,27,28,32,34,40,43,53,62,130,183,192,195],source_elaps:61,space:[4,6,25,36,47,48,50,53,191,199],space_used_by_snapshots_tot:174,space_used_l:174,space_used_tot:174,span:[6,13,48],spare:[24,197],sparingli:13,spark:38,speak:[196,197,199],spec:[30,41,53,60,61],speci:[11,18],special:[12,13,35,36,48,53,62,193],specif:[9,11,12,13,22,27,31,33,36,38,47,48,53,55,56,60,61,125,130,140,186],specifc:53,specifi:[0,6,10,11,12,13,14,16,18,20,22,26,31,36,41,47,48,49,53,56,58,60,61,62,66,68,109,125,130,140,146,159,161,164,171,174,177,182,186,192,195],specific_dc:140,specific_host:140,specific_keyspac:130,specific_sourc:130,specific_token:130,specifii:20,specnam:60,specul:[0,11,53],speculative_retri:11,speculativefailedretri:53,speculativeinsufficientreplica:53,speculativeretri:53,speculativesamplelatencynano:53,speed:[6,42,62,182,198],spend:199,spent:[53,199],sphinx:32,spike:36,spin:[6,50],spindl:[4,6],spirit:[6,57],split:[25,36,48,53,60,61,62,68,182],spread:[6,11,57],sql:[13,15],squar:12,squash:[27,33],src:[26,34,130],ssd:[6,16,50,199],ssh:195,ssl:[6,36,52,60,61,62,125,136,182],ssl_storage_port:57,ssp:186,sss:17,sstabl:[2,6,11,36,42,45,49,50,52,62,65,68,95,109,116,125,132,138,147,178,179,183,187,188,190,191,192,194,197,198,199],sstable_act:197,sstable_compression_ratio:174,sstable_count:174,sstable_s:48,sstable_size_in_mb:48,sstable_task:197,sstabledump:[62,182],sstableexpiredblock:[48,62,182],sstablelevelreset:[62,182],sstableload:[56,62,182],sstablemetadata:[62,182,185,189],sstableofflinerelevel:[62,182],sstablerepairedset:[62,182,187],sstablerepairset:189,sstablescrub:[62,182],sstablesperreadhistogram:53,sstablesplit:[62,182],sstableupgrad:[62,182],sstableutil:[62,182,183,187],sstableverifi:[62,182],sstablewrit:25,stabil:[24,33],stabl:[40,61,197],stack:[6,190,191,192,193,194,199],stackcollaps:199,staff:60,staff_act:60,stage:[33,34,100,153,195,198],staging_numb:34,stai:[42,48],stale:56,stall:[6,58],stamp:43,stand:35,standalon:35,standard1:[184,186,187,189,190,192,197],standard:[6,22,24,28,36,40,53,60,183,187,197],start:[0,6,9,13,27,32,36,37,40,42,48,50,53,55,56,58,68,140,171,188,192,195,197,198,199],start_token:[68,140],start_token_1:130,start_token_2:130,start_token_n:130,starter:33,startup:[4,6,21,31,36,48,53,58,193],startupcheck:197,starvat:6,stat:187,state:[6,14,45,48,50,53,58,125,165,196,197],statement:[6,9,10,11,13,14,15,16,17,20,21,22,30,32,43,45,48,53,56,60,61,195,199],static0:11,static1:11,staticcolumn:187,statist:[4,48,53,61,70,96,125,128,173,174,176,186,187,192,193,198],statu:[20,24,30,33,36,40,56,61,62,125,139,166,167,168,169,170,179,182,195,196],statusautocompact:125,statusbackup:125,statusbinari:125,statusgossip:125,statushandoff:125,stc:11,stdev:[60,199],stdin:61,stdout:61,stdvrng:60,step:[6,24,27,31,32,34,56,196,197],still:[0,6,10,11,13,14,17,20,22,24,25,34,55,56,58,61,184,195,199],stop:[4,6,40,61,83,125,143,172,182,183,184,185,186,187,188,189,190,191,192,193,194,197],stop_commit:6,stop_paranoid:6,stopdaemon:125,storag:[0,2,11,15,16,33,36,42,49,50,52,59,186,187],storage_port:[37,57],storageservic:[6,25,56],store:[0,4,6,10,11,12,13,22,42,45,48,49,50,53,56,59,61,80,88,90,125,170,186,187,190],store_typ:6,stort:188,straight:[26,58,199],straightforward:47,strategi:[0,6,11,52,57,60,185,198],stratio:42,stream:[4,6,42,48,49,52,55,64,104,110,125,130,140,157,158,160,161,186,193,199],stream_throughput_outbound_megabits_per_sec:186,street:22,strength:6,stress:[42,62,199],stresscql:60,strict:[10,48],strictli:[8,11,14],string:[4,6,10,11,12,13,14,16,17,20,21,22,24,53,61,109,183],strong:0,strongli:[6,11,12,56],structur:[4,6,9,20,27,30,45,53,62,182,199],stub:56,stuck:188,style:[6,30,31,32,33,35,42],stype:[9,14],sub:[11,13,22,40,48,199],subclass:6,subdirectori:[6,21],subject:[6,14,20,56],submiss:[6,33],submit:[32,33,35,42,68],subopt:60,subrang:6,subscrib:[8,28],subscript:8,subsequ:[6,11,13,20,36,48,49,56],subset:[0,20,48,61,195],substanti:199,substitut:40,substract:19,subsystem:56,subtract:187,subvert:48,succed:53,succeed:194,succesfulli:53,success:[0,34,43,61],successfulli:[34,43,53,194],sudden:6,sudo:[36,40,199],suffer:199,suffici:[6,50,56],suggest:[12,27,28,33,50,194],suit:[6,24,33,35,56,186],suitabl:[13,14,30,33],sum:47,summari:[4,6,33,53,186,187,192,193],sun:[25,56,199],sunx509:186,supercolumn:9,supersed:[10,147,190],superus:[9,20,56],suppli:[11,13,29,183,195],support:[0,4,6,9,10,11,12,13,14,15,16,18,19,20,22,28,31,33,35,36,38,42,48,56,61,62,147,171,190,197,199],suppos:13,sure:[6,8,24,25,26,27,28,31,33,35,36,37,40,48,60,199],surfac:56,surplu:36,surpris:0,surprisingli:6,surround:[17,61],suscept:14,suspect:[5,33,199],suspend:31,svctm:199,svg:199,svn:34,svnpubsub:34,swamp:36,swap:[4,6,199],swiss:163,symbol:199,symmetri:17,symptom:36,sync:[4,6,27,36,53,55,140,199],synchron:[6,55],synctim:53,synonym:20,synopsi:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],syntact:[11,20],syntax:[10,12,13,14,20,22,27,48,49,60],syntaxerror:195,sys:6,sysctl:[6,36],sysf:199,sysintern:6,system:[6,11,14,20,31,35,36,37,41,43,48,50,53,56,59,61,99,101,102,104,110,116,125,132,133,134,152,154,155,157,160,186,191,193,195,196,199],system_auth:[6,56],system_schema:[20,43],system_trac:140,system_view:197,system_virtual_schema:[43,197],tab:[25,31],tabl:[0,4,6,9,10,12,13,14,15,16,17,18,20,21,22,35,43,45,48,49,52,55,56,60,61,62,65,68,75,83,85,94,95,98,103,107,116,125,131,132,134,138,140,147,151,164,166,171,173,174,178,179,182,184,186,187,189,193,194,195,197,198],table1:[20,55],table2:55,table_definit:60,table_nam:[11,13,16,20,21,48,174,197],table_opt:[11,18],tablehistogram:[125,198],tablestat:125,tag:[22,30,34,164],tail:197,take:[6,10,11,13,14,22,27,30,31,33,34,36,45,48,49,50,58,125,164,189,191,194,197,198,199],taken:[6,47,48,53,60,192],tar:[26,40],tarbal:[26,37,39,61],target:[11,20,26,31,35,43,48,186],task:[6,24,26,28,31,33,53,61,197,198,199],taskdef:35,tcp:[6,36,199],tcp_keepalive_intvl:36,tcp_keepalive_prob:36,tcp_keepalive_tim:36,tcp_nodelai:6,tcp_retries2:6,tcp_wmem:6,tcpdump:199,teach:[6,57],team:[34,36],technetwork:6,technic:[11,15],techniqu:[196,199],technot:6,tee:40,tell:[6,13,30,36,37,53,199],templat:[24,34],tempor:6,temporari:[56,62,182],temporarili:6,tend:[4,6,36,50],tendenc:6,tent:34,terabyt:49,term:[6,13,14,15,18,22,59],termin:[12,20,61],ternari:25,test:[6,25,26,30,32,33,34,41,42,50,60,61],test_keyspac:[56,197],testabl:[30,33],testbatchandlist:35,testmethod1:35,testmethod2:35,testsom:35,teststaticcompactt:35,text:[4,9,11,12,13,14,17,22,27,34,43,47,49,56,59,60,199],than:[0,4,6,11,12,13,14,15,18,19,22,25,33,42,47,48,49,50,56,57,58,60,141,154,155,184,186,188,191,192,195,197,198,199],thei:[6,9,10,11,12,13,14,15,18,19,20,22,25,30,33,35,42,43,45,48,49,50,53,56,184,188,193,194,195,197,198,199],them:[0,6,10,11,13,14,22,24,25,28,33,34,35,36,41,43,45,48,53,56,125,178,186,193,195,197,199],themselv:[13,20],theoret:11,therefor:[27,33,35,56,185,193],theses:56,thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,20,22,24,25,26,27,28,29,30,31,33,34,35,36,37,39,40,42,43,45,48,49,50,53,55,56,57,58,60,61,62,63,65,66,68,71,73,75,81,85,91,94,95,97,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,197,198,199],thing:[6,22,28,29,33,36,39,48,55,199],think:6,third:[22,30,42,53,198],thobb:61,those:[11,12,13,14,16,17,18,20,22,33,36,47,48,56,61,178,186,190,191,193,195,199],though:[10,12,22,42,48,49,53],thought:191,thousand:61,thousandssep:61,thread:[4,6,18,43,50,53,56,60,65,95,125,138,140,147,156,176,178,188,197,198],threaddump:199,threadpool:[52,196],threadpoolnam:53,threadprioritypolici:31,three:[0,6,11,45,48,49,56,61,195,197,198],threshold:[4,11,47,50,57,98,125,151,158,199],thrift:[9,60],throttl:[6,24,62,97,125,148,152,156,157,160,182],through:[0,5,9,10,11,12,13,18,24,27,31,33,36,41,43,47,48,61,199],throughout:56,throughput:[0,6,48,49,50,53,99,104,110,125,152,157,160,186,197,198],throwabl:[30,35],thrown:[22,188],thu:[6,10,11,12,13,18,22,36,53,57,58,125,178],thumb:[6,33],thusli:22,tib:[70,124,174],tick:33,ticket:[5,27,28,29,30,33,34,35,47],tid:199,tie:36,tier:52,ties:[13,198],tighter:6,tightli:6,tild:61,time:[0,4,6,8,9,10,11,12,13,15,16,17,18,25,27,30,31,33,35,36,43,45,47,49,52,53,55,56,59,60,61,125,127,187,189,194,195,197,198,199],timefram:58,timehorizon:6,timelin:11,timeout:[6,22,36,53,61,111,125,161,195,198],timeout_in_m:161,timeout_typ:[111,161],timer:[6,53],timestamp:[4,9,10,11,13,14,15,17,19,42,43,48,61,62,147,182,184,187,190],timeunit:48,timeuuid:[9,10,11,17,22,60],timewindowcompactionstrategi:11,timezon:[17,61],tini:[6,48],tinyint:[9,10,14,17,19,22],tip:195,titl:[33,60],tjake:25,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,tmp:[6,192,193,197,199],tmpf:199,tmplink:193,toc:[4,192,193],tock:33,todai:12,todat:14,todo:30,togeth:[6,11,13,14,24,48,195,198,199],toggl:56,tojson:15,token:[2,4,6,9,10,12,13,36,48,53,55,60,61,68,73,116,117,123,125,130,140,146,179,187,188,195,197,198],tokenawar:195,tokenrang:60,toler:[0,45],tom:13,tombston:[4,6,11,17,36,52,53,55,95,147,184,187,190,199],tombstone_compaction_interv:48,tombstone_threshold:48,tombstonescannedhistogram:53,ton:35,too:[6,11,12,14,22,30,48,60,195,198,199],took:[195,197],tool:[6,12,26,27,33,34,36,42,43,48,53,56,58,60,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198],toolset:199,top:[13,22,24,33,34,42,53,127,174,175,188],topcount:[127,175],topic:61,topolog:[6,57,146],toppartit:125,total:[4,6,13,47,48,53,60,186,197,198,199],total_replica:[0,11],totalblockedtask:53,totalcolumnsset:187,totalcommitlogs:53,totalcompactionscomplet:53,totaldiskspaceus:53,totalhint:53,totalhintsinprogress:53,totallat:53,totalrow:187,totalsizecap:43,totimestamp:14,touch:[8,36,48],tough:35,tounixtimestamp:14,tour:22,tpstat:[125,198],trace:[6,53,62,112,125,140,162,190,191,192,193,194,197,199],tracerout:199,track:[4,6,48,53],tracker:[27,33],tradeoff:[0,6,199],tradit:[48,49],traffic:[6,56,57,199],trail:25,transact:[13,21,53,62,171,182],transfer:[6,36,56,186],transform:13,transient_replica:[0,11],transit:[10,20,26],translat:199,transpar:[6,36],transport:[6,31,53,60,77,87,125,168,186,198],treat:[0,6,10,36,57],tree:[6,26,31,53,55],tri:[6,48,195],trigger:[4,6,9,11,12,15,24,42,43,45,49,52,56,65,125,137],trigger_nam:21,trigger_stat:12,trip:[6,13],trivial:56,troubl:197,troubleshoot:[6,30,32,42,55,195,197,198,199],truesnapshotss:53,truli:9,truncat:[4,6,9,10,15,20,56,60,111,125,161,177],truncate_stat:12,truncatehint:125,trunk:[27,29,30,31,33,35],trust:56,trusti:199,trustor:6,truststor:[6,56,60,186],truststore_password:6,truststorepassword:56,tspw:186,tstamp:183,ttl:[4,6,9,10,11,14,17,22,52,147,187,190],tty:61,tunabl:2,tune:[11,36,45,48,50,197,198],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:22,tuplevalu:[10,14],turn:[0,6,33,36,56,195],twc:[11,48],twice:[4,6,22],two:[0,6,11,12,13,14,17,19,31,42,43,45,48,50,56,57,61,187,198,199],txt:[4,14,29,30,33,34,192,193],type:[0,4,6,10,11,12,13,14,15,19,20,30,32,40,42,43,50,52,55,56,60,61,111,125,161,171,183,186,187,191,193,197,198],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,36,45,48,50,53,56,59,61,192,195,197,198,199],typo:27,ubuntu:31,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:22,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:36,unabl:[4,30,42,198],unacknowledg:6,unaffect:22,unari:19,unavail:[6,11,53,56,58,199],unavailableexcept:195,unblock:53,unbound:[6,22],uncaught:197,unchecked_tombstone_compact:48,uncom:[6,53,56],uncommon:33,uncompress:[4,6,49,53],unconfirm:6,undecor:4,undelet:48,under:[6,22,24,25,35,43,53,56,199],underli:[6,18,48,56,199],understand:[6,33,36,55,56,197,199],unencrypt:[6,56],unexpect:[4,182,183,184,185,186,187,188,189,190,191,192,193,194],unexpectedli:22,unfinishedcommit:53,unflush:164,unfortun:35,uniform:60,uniq:197,uniqu:[11,14,22,60,187],unit:[22,30,32,48,125,149,186,191,198],unix:[43,196],unixtimestampof:[10,14],unknown:188,unless:[6,11,13,16,18,20,22,25,47,56,57,187,191,199],unlik:[6,10,13,22],unlimit:[6,36,61,186],unlog:[9,53,60],unnecessari:[30,58],unnecessarili:47,unpredict:13,unprepar:53,unquot:12,unquoted_identifi:12,unquoted_nam:11,unreach:55,unrel:[33,195],unrepair:[6,52,53,55,62,182],unrespons:11,unsafe_aggressive_sstable_expir:48,unsecur:56,unselected_column:18,unset:[6,10,13,17,189],unsign:22,unspecifi:6,unsubscrib:[8,42],unsuccess:43,untar:40,until:[0,4,6,11,18,22,45,47,48,49,56,57],unus:6,unusu:30,unwrit:6,updat:[6,9,10,11,12,14,15,17,18,20,22,27,30,32,33,35,40,42,43,48,49,53,56,60,61,197,198],update_paramet:13,update_stat:[12,13],updatewithlwt:60,upgrad:[4,6,11,48,125,178,192,193],upgradesst:[45,48,49,125],upload:33,upload_bintrai:34,upon:[6,22,43,45,47,49],upper:[12,17,48,56],ups:50,upstream:33,uptim:[117,125],urgent:[6,34],url:[27,29,60],usag:[0,4,6,11,22,42,45,47,49,52,53,61,62,182],use:[0,4,6,9,10,11,12,13,14,16,17,18,20,22,24,25,27,30,31,33,34,35,37,40,41,42,43,45,47,48,50,53,56,57,58,60,61,65,95,108,125,127,138,147,175,178,183,186,187,189,190,191,193,195,196,197,198,199],use_k:43,use_stat:12,usec:199,usecas:48,useconcmarksweepgc:31,usecondcardmark:31,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,22,26,30,31,33,34,35,36,43,48,50,53,56,57,58,60,61,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,184,185,186,187,188,189,191,192,193,195,198,199],useecassandra:56,useful:[0,4,6,11,14,26,33,48,49,53,55,58,61,63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,185,187,197,198,199],useparnewgc:31,user1:13,user2:13,user3:13,user4:13,user:[0,5,6,8,9,10,11,12,13,15,16,17,18,24,30,32,33,34,36,40,43,45,48,49,50,56,61,62,68,84,125,184,192,197,199],user_count:13,user_defined_typ:22,user_funct:20,user_nam:13,user_occup:13,user_opt:20,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,53,56,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186],uses:[0,4,6,11,12,13,14,16,20,21,27,35,36,56,60,194,198,199],usethreadprior:31,using:[4,6,10,11,12,13,14,18,20,22,24,31,32,33,35,39,40,41,42,43,45,49,50,52,53,56,58,61,68,130,147,164,183,185,187,188,189,190,193,195,196,197,198,199],usr:[61,199],usual:[6,13,22,29,33,35,45,56,140,190,195,197],utc:[17,61],utd:11,utf8:[22,61],utf8typ:[9,187],utf:61,util:[4,14,30,48,61,197,199],uuid:[9,10,11,12,17,22],val0:11,val1:11,val:[14,60],valid:[0,6,10,11,12,13,14,17,22,34,36,48,49,53,55,56,61,62,140,147,171,182,194],validationexecutor:[53,198],validationtim:53,valu:[4,6,9,10,11,12,13,14,16,17,19,22,30,31,36,43,45,48,49,53,56,57,59,60,61,62,84,112,116,125,148,152,154,155,156,157,159,160,161,162,182,183,194,195,197,199],valuabl:197,value1:13,value2:13,value_in_kb_per_sec:[148,156],value_in_m:159,value_in_mb:[152,157,160],valueof:14,varchar:[9,11,14,17,22],vari:[11,49],variabl:[6,10,12,17,22,24,31,34,39,189],varianc:197,variant:12,varieti:47,varint:[9,11,14,17,19,22],variou:[6,11,24,31,35,50,56,60,182,196,197],vector:56,verbos:[186,190,193,194],veri:[6,11,13,27,33,35,36,45,48,49,50,189,194,195,197,198,199],verif:[62,182],verifi:[33,36,38,40,49,55,116,125,171,182,183,184,185,186,187,188,189,190,191,192,193,194],versa:193,version:[2,5,6,9,11,14,15,22,26,31,33,38,40,48,53,58,62,67,72,82,92,125,178,179,182,190,193,197],vertic:61,via:[4,6,8,10,18,20,26,30,31,36,37,43,48,49,50,53,55,56,57,187,189,199],vice:193,view:[0,6,10,11,12,15,20,42,53,61,102,125,155,181,189,197,198,199],view_nam:18,viewbuildexecutor:[53,198],viewbuildstatu:125,viewlockacquiretim:53,viewmutationstag:[53,198],viewpendingmut:53,viewreadtim:53,viewreplicasattempt:53,viewreplicassuccess:53,viewwrit:53,viewwritelat:53,virtual:[0,6,36,48,53,58],visibl:[11,20,25,45],visit:60,visual:[27,197],vnode:[6,49],volum:[4,6,47,49,194,198,199],vote:32,vulner:[6,34,56],w_await:199,wai:[4,6,12,15,17,18,22,24,28,29,31,35,36,43,48,49,140,187,188,189,190,197,199],wait:[0,4,6,11,33,36,43,53,125,142,197,198,199],waitingoncommit:53,waitingonfreememtablespac:53,waitingonsegmentalloc:53,want:[4,6,11,13,24,31,33,34,35,36,43,55,56,58,60,185,186,189,197,199],warmup:[60,125,150],warn:[6,11,25,35,52,140,194,197],warrant:198,washington:22,wasn:10,wast:6,watch:[35,199],weaker:0,web:27,websit:[35,40,199],week:[22,55,189],weibul:60,weight:[43,53,88],welcom:8,well:[0,6,11,13,14,17,22,30,31,43,47,49,50,56,57,125,143,192,197,199],went:53,were:[6,9,10,20,30,31,34,48,53,190,193,197,198],west:34,what:[11,13,22,27,28,32,35,37,42,48,50,56,60,61,187,195,196,197,198,199],whatev:[10,13,36],whedon:13,wheel:192,when:[4,6,9,10,11,12,13,14,15,16,17,20,22,24,25,27,30,33,34,35,37,42,43,45,47,49,50,52,53,55,56,57,58,60,61,63,65,66,68,71,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,187,188,193,195,197,198,199],whenev:[188,199],where:[0,4,6,9,10,11,12,14,16,17,18,19,20,22,30,35,37,40,43,45,48,49,56,58,60,61,88,140,195,197,199],where_claus:13,wherea:[22,56,198],whether:[0,6,9,11,13,31,48,57,61,88],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,22,27,33,34,35,36,37,40,41,43,45,47,48,49,50,53,55,56,57,58,60,68,103,107,116,130,140,193,195,196,197,198,199],whichev:[0,6],whilst:6,whitelist:56,whitespac:32,who:[20,24,33,36],whole:[6,13,14,22,48,55],whose:[11,22,171],why:[30,33,42,184,195,197,199],wide:[4,47,198],width:12,wiki:[6,27,31],wildcard:[13,20,191],wildli:11,window:[0,4,6,52,53,56,106,114,125,159,196],winner:36,wip:33,wipe:[36,58],wire:[6,36],wireshark:199,wise:11,wish:[6,34,48,53,197],within:[0,4,6,11,12,13,16,31,33,34,36,48,50,53,56],withing:6,without:[0,6,11,12,13,14,20,22,29,31,33,34,35,36,47,50,53,56,61,62,63,116,125,132,182,183],wmb:199,wmem_max:6,won:[4,6,13,27,29,55,199],wont:[43,48],word:[10,11,12,18,20,22,36,47,56],work:[0,4,6,10,11,14,15,17,24,25,28,29,31,32,34,35,36,42,48,50,53,55,56,57,58,61,186,199],workaround:[186,190],worker:61,workload:[6,28,30,45,48,50,60,198,199],workspac:31,worktre:31,worri:[33,36],wors:[6,57],worst:[6,33],worth:[6,43],worthwhil:6,would:[6,12,13,14,17,20,27,31,33,35,42,48,49,50,55,56,57,187,189,193,197,199],wrap:57,write:[0,4,6,10,11,13,22,25,27,28,30,35,36,47,48,49,50,53,55,56,57,58,60,61,83,111,125,161,174,187,190,193,195,197,198,199],write_lat:174,write_request_timeout:36,writefailedideacl:53,writelat:[53,195],writer:[4,6,25],writetim:[9,14],writetimeoutexcept:[6,195],written:[4,6,11,21,24,36,43,45,48,49,53,55],wrong:[6,34,198],wrqm:199,wrst:199,wrte:53,www:[6,34,40,199],x86:59,xandra:38,xarg:[189,197],xdm:199,xlarg:50,xlarge_daili:43,xml:[26,31,34,35,37,197],xmn220m:31,xms1024m:31,xmx1024m:31,xmx:50,xss256k:31,xzvf:40,yaml:[0,4,6,14,18,20,37,40,53,56,57,58,60,69,84,88,125,143,174,176,186,187,195],year:[13,22],yes:[9,11,56],yet:[6,11,24,28,34,47,53,193],ygc:199,ygct:199,yield:[13,43,58,199],ymmv:197,you:[0,4,5,6,8,10,11,12,13,14,16,17,18,20,21,22,24,25,26,27,28,29,31,32,34,35,36,37,38,39,40,41,42,43,47,48,53,55,56,57,58,59,60,61,63,125,164,183,185,186,187,189,190,191,193,194,195,196,197,198,199],young:199,younger:14,your:[0,5,6,8,10,11,12,25,27,28,31,32,33,35,36,37,40,42,48,50,55,56,57,60,61,186,191,194,196,197,198,199],yourself:[28,29,35],yum:34,yyyi:[17,22,43],z_0:[11,16,18],zero:[6,10,11,36,53,57,197],zgrep:197,zip:[22,43],zipcod:22,zone:[6,22,57],zoomabl:199,zstd:4,zstdcompressor:49},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Arithmetic Operators","Security","Triggers","Data Types","Data Modeling","Jenkins CI Environment","Code Style","Dependency Management","Working on Documentation","Getting Started","How-to Commit","Review Checklist","Building and IDE Integration","Contributing to Cassandra","Contributing Code Changes","Release Process","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Audit Logging","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","Third-Party Plugins","Cassandra Stress","cqlsh: the CQL shell","Cassandra Tools","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","SSTable Tools","sstabledump","sstableexpiredblockers","sstablelevelreset","sstableloader","sstablemetadata","sstableofflinerelevel","sstablerepairedset","sstablescrub","sstablesplit","sstableupgrade","sstableutil","sstableverify","Find The Misbehaving Nodes","Troubleshooting","Cassandra Logs","Use Nodetool","Diving Deep, Use External Tools"],titleterms:{"class":57,"final":193,"function":[13,14,17],"import":[25,116],"long":35,"new":36,"switch":48,"transient":0,Adding:58,Doing:188,IDE:31,IDEs:25,LCS:48,TLS:56,The:[13,15,17,48,195],USE:11,Use:[49,186,198,199],Uses:49,Using:[31,189],Will:36,With:56,about:24,abov:187,access:56,adcanc:43,add:[26,36],address:36,advanc:[49,199],after:58,aggreg:14,alias:13,all:[20,36,187,193],alloc:58,allocate_tokens_for_keyspac:6,allow:13,alreadi:185,alter:[11,18,20,22],ani:36,announc:34,answer:28,apach:[24,31,42],appendic:9,appendix:9,architectur:2,arithmet:19,artifact:34,ask:36,assassin:63,assign:58,attempt:191,audit:43,audit_logging_opt:6,auditlog:43,auth:56,authent:[6,20,56],author:[6,56],auto_snapshot:6,automat:20,automatic_sstable_upgrad:6,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:44,base:27,basic:[190,194,199],batch:[13,36,53],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,bcc:199,befor:33,benefit:49,best:55,binari:40,binauditlogg:43,bintrai:34,blob:[14,36],block:184,bloom:45,boilerpl:25,bootstrap:[36,48,58,64],branch:33,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:53,bug:[5,28,33],build:31,bulk:[36,46],cach:[53,56,199],call:[34,36],can:36,capi:59,captur:[43,47,61,199],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,24,27,31,32,34,35,36,37,40,42,43,47,52,56,59,60,62,192,197],cast:14,cdc:47,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,certif:56,chang:[10,33,36,37,45,47,48],characterist:22,check:190,checklist:30,choic:50,choos:33,circleci:35,claus:13,clean:193,cleanup:[58,65],clear:61,clearsnapshot:66,client:[38,41,53,56,195],client_encryption_opt:6,clientstat:67,clojur:38,cloud:50,cluster:[36,186,198],cluster_nam:6,code:[4,25,33],collect:[22,48,199],column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[31,43,48,61,189],comment:12,commit:29,commit_failure_polici:6,commitlog:[4,53],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_group_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:47,committ:27,common:[11,48,50,197],compact:[9,48,53,68,198],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:69,compactionstat:70,compactionstrategi:48,compat:61,compress:49,concern:48,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_build:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_valid:6,concurrent_writ:6,condition:20,config:186,configur:[6,7,37,43,47,49],conflict:26,connect:[20,36],consist:[0,61],constant:12,contact:8,content:[34,43],contribut:[28,32,33],control:20,convent:[12,25],convers:14,coordin:198,copi:61,corrupt:[190,194],corrupted_tombstone_strategi:6,count:14,counter:[13,22,190],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:[50,199],cql:[9,15,53,61],cqlsh:[41,61],cqlshrc:61,creat:[11,14,16,18,20,21,22,28,33,34],credenti:20,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:35,current:[14,192],custom:22,cython:61,dart:38,data:[11,13,17,20,22,23,36,47,48,58],data_file_directori:6,databas:20,datacent:20,date:[14,22,190],datetim:[14,19],dead:58,deal:190,debian:40,debug:[31,197],decommiss:71,deep:199,defin:[14,22],definit:[11,12],defragment:48,delet:[13,34,36,48],depend:[26,61],describ:[61,73],describeclust:72,detail:[48,186],detect:0,develop:34,diagnostic_events_en:6,dies:36,directori:[37,48],disabl:[43,47],disableauditlog:74,disableautocompact:75,disablebackup:76,disablebinari:77,disablefullquerylog:78,disablegossip:79,disablehandoff:80,disablehintsfordc:81,disableoldprotocolvers:82,disk:[36,50],disk_failure_polici:6,disk_optimization_strategi:6,displai:183,distribut:34,dive:199,document:[27,28,42],doe:[36,43],drain:83,driver:[38,41],drop:[9,11,14,16,18,20,21,22,36],droppedmessag:53,dry:188,dtest:[28,35],dump:183,durat:22,dynam:57,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:[36,187],eclips:31,elixir:38,email:36,enabl:[43,47,56],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_transient_repl:6,enable_user_defined_funct:6,enableauditlog:84,enableautocompact:85,enablebackup:86,enablebinari:87,enablefullquerylog:88,enablegossip:89,enablehandoff:90,enablehintsfordc:91,enableoldprotocolvers:92,encod:17,encrypt:56,endpoint_snitch:6,engin:4,entir:183,entri:36,environ:[24,37],erlang:38,error:[36,195],even:36,exampl:4,except:25,exclud:183,exist:36,exit:61,expand:61,experiment:6,expir:48,explan:187,extend:194,extern:199,factor:36,fail:[36,58],failur:[0,36],failuredetector:93,faq:60,featur:6,file:[6,25,26,40,43,186,191,194,197],file_cache_size_in_mb:6,fileauditlogg:43,filedescriptorratio:53,filter:[13,43,45],find:195,fix:[28,33],flamegraph:199,flow:27,flush:94,format:[25,183],found:[185,188],freez:33,frequent:36,from:[31,34,36,40,61,186],fromjson:17,full:[55,197],full_query_log_dir:6,full_query_logging_opt:6,fulli:48,further:47,garbag:[48,199],garbagecollect:95,garbagecollector:53,gc_grace_second:48,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:96,gener:25,get:[28,39,186,197],getbatchlogreplaythrottl:97,getcompactionthreshold:98,getcompactionthroughput:99,getconcurr:100,getconcurrentcompactor:101,getconcurrentviewbuild:102,getendpoint:103,getinterdcstreamthroughput:104,getlogginglevel:105,getmaxhintwindow:106,getreplica:107,getse:108,getsstabl:109,getstreamthroughput:110,gettimeout:111,gettraceprob:112,github:27,give:36,gossip:0,gossipinfo:113,gpg:34,grace:[48,187],grant:20,graph:60,group:13,guarante:1,handl:25,handoffwindow:114,hang:58,happen:36,hardwar:50,has:185,haskel:38,heap:36,help:[61,115],hide:186,high:199,hint:51,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:53,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,hintsservic:53,host:[36,61],hot:56,how:[27,29,36,43],htop:199,idea:31,ideal_consistency_level:6,identifi:12,impact:49,includ:193,increment:55,incremental_backup:6,index:[16,53,59],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:117,inform:[197,199],initi:28,initial_token:6,insert:[13,17,41],instal:40,integr:[31,56],intellij:31,inter:56,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[20,56,183],internode_application_receive_queue_capacity_in_byt:6,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:6,internode_application_receive_queue_reserve_global_capacity_in_byt:6,internode_application_send_queue_capacity_in_byt:6,internode_application_send_queue_reserve_endpoint_capacity_in_byt:6,internode_application_send_queue_reserve_global_capacity_in_byt:6,internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:118,invalidatekeycach:119,invalidaterowcach:120,investig:[28,195],iostat:199,java:[36,38],jconsol:36,jenkin:24,jira:[27,34],jmx:[36,48,53,56],job:24,join:[36,121],json:17,jstack:199,jstat:199,jvm:[53,199],keep:192,kei:[16,18,34,183],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,36,53,188],keyword:[9,12],lang:36,languag:15,larg:36,latenc:[195,198,199],level:[0,48,185,199],librari:26,lightweight:60,limit:[13,18,43],line:[31,61],list:[8,20,22,28,36,193],listen:36,listen_address:[6,36],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:122,liter:22,live:36,load:[36,46,186],local:[27,198],locat:37,log:[36,37,43,48,193,195,197],logger:197,login:61,lot:[36,189],lucen:59,made:36,mail:8,main:37,major:48,manag:[26,183],mani:189,manifest:190,manipul:13,manual:58,map:[16,22,36],materi:18,max:[14,36],max_concurrent_automatic_sstable_upgrad:6,max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:36,memori:[36,50,53],memorypool:53,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:48,messag:36,metadata:[187,189],method:36,metric:[53,195],min:14,minor:48,mintimeuuid:14,misbehav:195,mode:60,model:23,monitor:[53,58],more:[36,48,183,186,197],move:[58,123],movement:58,multilin:25,multipl:191,nativ:[14,22],native_transport_allow_older_protocol:6,native_transport_flush_in_batches_legaci:6,native_transport_frame_block_size_in_kb:6,native_transport_idle_timeout_in_m:6,native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:38,netbean:31,netstat:124,network:199,network_author:6,networktopologystrategi:[0,11],newer:31,next:[34,195],nexu:34,node:[36,56,58,195],nodej:38,nodetool:[36,43,48,125,198],note:27,noteworthi:22,now:14,num_token:6,number:19,old:[34,192],one:[36,189],onli:[36,183,193],open:31,oper:[19,34,36,48,49,52],option:[18,43,48,55,61],order:13,otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[36,55],outofmemoryerror:36,output:[43,183,184,186],overflow:190,overview:[3,47],own:24,packag:[34,40],packet:199,page:[61,199],paramet:[13,47,48],parti:59,partition:6,password:56,patch:[28,33],pausehandoff:126,perform:[34,35],periodic_commitlog_sync_lag_block_in_m:6,perl:38,permiss:20,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:38,pick:0,plugin:[24,59],point:36,pom:26,port:36,post:34,practic:55,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:[34,40],primari:18,print:[187,189],process:34,profil:60,profileload:127,progress:[58,186],project:31,promot:34,properti:37,proxyhistogram:128,publish:[27,34],python:38,pytz:61,queri:[15,41,195,197,198],question:[28,36],rang:[0,58],range_request_timeout_in_m:6,rangekeysampl:129,rate:195,raw:183,read:[47,54],read_request_timeout_in_m:6,rebuild:130,rebuild_index:131,reduc:185,refresh:132,refreshsizeestim:133,refus:36,releas:34,relevel:188,reliabl:199,reload:[43,56],reloadlocalschema:134,reloadse:135,reloadssl:136,reloadtrigg:137,relocatesst:138,remot:36,remov:[48,58],removenod:139,repair:[48,54,55,140,189],repair_admin:141,repair_session_space_in_mb:6,repaired_data_tracking_for_partition_reads_en:6,repaired_data_tracking_for_range_reads_en:6,replac:58,replaybatchlog:142,replic:[0,36],report:[5,28,36,53],report_unconfirmed_repaired_data_mismatch:6,repositori:34,request:53,request_timeout_in_m:6,requir:[24,26],reserv:9,resetfullquerylog:143,resetlocalschema:144,resolut:26,resourc:199,restrict:20,result:13,resum:58,resumehandoff:145,retriev:14,review:[28,30],revok:20,rewrit:192,rhel:36,right:33,ring:[0,36,146],role:[20,56],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row:183,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rowcach:59,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rubi:38,run:[35,188],runtim:37,rust:38,safeti:6,sai:36,same:36,sampl:43,saved_caches_directori:6,scala:38,scalar:14,script:189,scrub:[147,190],second:187,secondari:16,secur:[20,56],see:36,seed:[24,36],seed_provid:6,select:[13,17,18],selector:13,send:34,serial:61,server:24,server_encryption_opt:6,session:61,set:[20,22,24,31,36,189],setbatchlogreplaythrottl:148,setcachecapac:149,setcachekeystosav:150,setcompactionthreshold:151,setcompactionthroughput:152,setconcurr:153,setconcurrentcompactor:154,setconcurrentviewbuild:155,sethintedhandoffthrottlekb:156,setinterdcstreamthroughput:157,setlogginglevel:158,setmaxhintwindow:159,setstreamthroughput:160,settimeout:161,settraceprob:162,setup:[24,31],share:61,shell:61,show:[36,61,189],sign:34,signatur:14,simplestrategi:[0,11],singl:[36,48,183],size:[48,191],sjk:163,skip:190,slack:[8,34],slow_query_log_timeout_in_m:6,small:191,snapshot:[164,186,191,192],snapshot_before_compact:6,snitch:57,sourc:[31,61],special:61,specif:20,specifi:[187,191],speed:[36,186],sphinx:27,split:191,ssl:[56,186],ssl_storage_port:6,sstabl:[4,48,53,182,184,185,186,189,193],sstable_preemptive_open_interval_in_mb:6,sstabledump:183,sstableexpiredblock:184,sstablelevelreset:185,sstableload:186,sstablemetadata:187,sstableofflinerelevel:188,sstablerepairedset:189,sstablescrub:190,sstablesplit:191,sstableupgrad:192,sstableutil:193,sstableverifi:194,stai:36,standard:56,start:[28,31,33,39],start_native_transport:6,starv:48,state:[198,199],statement:[12,18,25],statu:[165,189,198],statusautocompact:166,statusbackup:167,statusbinari:168,statusgossip:169,statushandoff:170,stc:48,step:[26,195],stop:171,stopdaemon:172,storag:[4,9,53],storage_port:6,store:36,strategi:48,stratio:59,stream:[36,53,58],stream_entire_sst:6,stream_throughput_outbound_megabits_per_sec:6,streaming_connections_per_host:6,streaming_keep_alive_period_in_sec:6,stress:[35,60],structur:183,style:25,submit:28,sum:14,support:[17,60],sync:34,system:197,tabl:[11,47,53,183,185,188,190,192],tablehistogram:173,tablestat:174,tarbal:40,temporari:193,term:12,test:[24,28,31,35],than:36,thei:36,third:59,though:36,thread:199,threadpool:[53,198],threshold:6,throttl:186,throughput:199,tier:48,time:[14,22,48],timestamp:[22,36,183],timeuuid:14,timewindowcompactionstrategi:48,todo:[0,1,3,11,23,44,46,51,54],tojson:17,token:[0,14,58],tombston:48,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[35,62,182,199],top:[36,199],topic:34,toppartit:175,tpstat:176,trace:61,tracetype_query_ttl:6,tracetype_repair_ttl:6,transact:[60,193],transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[21,48],troubleshoot:[26,196],truncat:11,truncate_request_timeout_in_m:6,truncatehint:177,ttl:[13,48],tunabl:0,tupl:22,tweet:34,two:36,type:[9,17,22,26,48,53],udt:22,unabl:36,unit:[28,31,35],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:[48,189],unsubscrib:36,updat:[13,26,28,34,36],upgradesst:178,upload:34,usag:[36,55,60,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,184,185,186,187,188,189,190,191,192,193,194,199],use:36,user:[14,20,22,28,60],using:[27,36,48],uuid:14,valid:190,valu:187,variabl:37,verif:194,verifi:179,version:[4,10,34,61,180,192],view:[18,43],viewbuildstatu:181,vmtouch:199,vote:34,wait:34,warn:47,websit:34,welcom:42,what:[33,36,43],when:[36,48],where:13,whitespac:25,why:[36,48],window:48,windows_timer_interv:6,without:[48,190,191],work:[22,27,33],write_request_timeout_in_m:6,writetim:13,yaml:[43,47],you:33,your:[24,34]}}) \ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/cassandra_stress.html b/src/doc/4.0-alpha1/tools/cassandra_stress.html deleted file mode 100644 index cf47c9604..000000000 --- a/src/doc/4.0-alpha1/tools/cassandra_stress.html +++ /dev/null @@ -1,352 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Cassandra Stress" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Stress

-

cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model.

-

This documentation focuses on user mode as this allows the testing of your -actual schema.

-
-

Usage

-

There are several operation types:

-
-
    -
  • write-only, read-only, and mixed workloads of standard data
  • -
  • write-only and read-only workloads for counter columns
  • -
  • user configured workloads, running custom queries on custom schemas
  • -
-
-

The syntax is cassandra-stress <command> [options]. If you want more information on a given command -or options, just run cassandra-stress help <command|option>.

-
-
Commands:
-
-
read:
-
Multiple concurrent reads - the cluster must first be populated by a write test
-
write:
-
Multiple concurrent writes against the cluster
-
mixed:
-
Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test
-
counter_write:
-
Multiple concurrent updates of counters.
-
counter_read:
-
Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.
-
user:
-
Interleaving of user provided queries, with configurable ratio and distribution.
-
help:
-
Print help for a command or option
-
print:
-
Inspect the output of a distribution definition
-
legacy:
-
Legacy support mode
-
-
-
Primary Options:
-
-
-pop:
-
Population distribution and intra-partition visit order
-
-insert:
-
Insert specific options relating to various methods for batching and splitting partition updates
-
-col:
-
Column details such as size and count distribution, data generator, names, comparator and if super columns should be used
-
-rate:
-
Thread count, rate limit or automatic mode (default is auto)
-
-mode:
-
Thrift or CQL with options
-
-errors:
-
How to handle errors when encountered during stress
-
-sample:
-
Specify the number of samples to collect for measuring latency
-
-schema:
-
Replication settings, compression, compaction, etc.
-
-node:
-
Nodes to connect to
-
-log:
-
Where to log progress to, and the interval at which to do it
-
-transport:
-
Custom transport factories
-
-port:
-
The port to connect to cassandra nodes on
-
-sendto:
-
Specify a stress server to send this command to
-
-graph:
-
Graph recorded metrics
-
-tokenrange:
-
Token range settings
-
-
-
Suboptions:
-
Every command and primary option has its own collection of suboptions. These are too numerous to list here. -For information on the suboptions for each command or option, please use the help command, -cassandra-stress help <command|option>.
-
-
-
-

User mode

-

User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn’t scale.

-
-

Profile

-

User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname.

-

An identifier for the profile:

-
specname: staff_activities
-
-
-

The keyspace for the test:

-
keyspace: staff
-
-
-

CQL for the keyspace. Optional if the keyspace already exists:

-
keyspace_definition: |
- CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-
-
-

The table to be stressed:

-
table: staff_activities
-
-
-

CQL for the table. Optional if the table already exists:

-
table_definition: |
-  CREATE TABLE staff_activities (
-      name text,
-      when timeuuid,
-      what text,
-      PRIMARY KEY(name, when, what)
-  )
-
-
-

Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:

-
columnspec:
-  - name: name
-    size: uniform(5..10) # The names of the staff members are between 5-10 characters
-    population: uniform(1..10) # 10 possible staff members to pick from
-  - name: when
-    cluster: uniform(20..500) # Staff members do between 20 and 500 events
-  - name: what
-    size: normal(10..100,50)
-
-
-

Supported types are:

-

An exponential distribution over the range [min..max]:

-
EXP(min..max)
-
-
-

An extreme value (Weibull) distribution over the range [min..max]:

-
EXTREME(min..max,shape)
-
-
-

A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:

-
GAUSSIAN(min..max,stdvrng)
-
-
-

A gaussian/normal distribution, with explicitly defined mean and stdev:

-
GAUSSIAN(min..max,mean,stdev)
-
-
-

A uniform distribution over the range [min, max]:

-
UNIFORM(min..max)
-
-
-

A fixed distribution, always returning the same value:

-
FIXED(val)
-
-
-

If preceded by ~, the distribution is inverted

-

Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)

-

Insert distributions:

-
insert:
-  # How many partition to insert per batch
-  partitions: fixed(1)
-  # How many rows to update per partition
-  select: fixed(1)/500
-  # UNLOGGED or LOGGED batch for insert
-  batchtype: UNLOGGED
-
-
-

Currently all inserts are done inside batches.

-

Read statements to use during the test:

-
queries:
-   events:
-      cql: select *  from staff_activities where name = ?
-      fields: samerow
-   latest_event:
-      cql: select * from staff_activities where name = ?  LIMIT 1
-      fields: samerow
-
-
-

Running a user mode test:

-
cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once
-
-
-

This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test.

-

The full example can be found here yaml

-
-
Running a user mode test with multiple yaml files::
-
cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m “ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)” truncate=once
-
This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table
-
although care must be taken that the table definition is identical (data generation specs can be different).
-
-
-
-

Lightweight transaction support

-

cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s).

-

Lightweight transaction update query:

-
queries:
-  regularupdate:
-      cql: update blogposts set author = ? where domain = ? and published_date = ?
-      fields: samerow
-  updatewithlwt:
-      cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ?
-      fields: samerow
-
-
-

The full example can be found here yaml

-
-
-
-

Graphing

-

Graphs can be generated for each run of stress.

-../_images/example-stress-graph.png -

To create a new graph:

-
cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph"
-
-
-

To add a new run to an existing graph point to an existing file and add a revision name:

-
cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run"
-
-
-
-
-

FAQ

-

How do you use NetworkTopologyStrategy for the keyspace?

-

Use the schema option making sure to either escape the parenthesis or enclose in quotes:

-
cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)"
-
-
-

How do you use SSL?

-

Use the transport option:

-
cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra"
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/cqlsh.html b/src/doc/4.0-alpha1/tools/cqlsh.html deleted file mode 100644 index fadda53cd..000000000 --- a/src/doc/4.0-alpha1/tools/cqlsh.html +++ /dev/null @@ -1,485 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/index.html b/src/doc/4.0-alpha1/tools/index.html deleted file mode 100644 index ff7b50e35..000000000 --- a/src/doc/4.0-alpha1/tools/index.html +++ /dev/null @@ -1,257 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/assassinate.html b/src/doc/4.0-alpha1/tools/nodetool/assassinate.html deleted file mode 100644 index 81409370f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/assassinate.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/bootstrap.html b/src/doc/4.0-alpha1/tools/nodetool/bootstrap.html deleted file mode 100644 index 0aeabc27a..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-p <port> | --port <port>)] [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/cleanup.html b/src/doc/4.0-alpha1/tools/nodetool/cleanup.html deleted file mode 100644 index 8eb8730ac..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/cleanup.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/clearsnapshot.html b/src/doc/4.0-alpha1/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 2b3309b2b..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/clientstats.html b/src/doc/4.0-alpha1/tools/nodetool/clientstats.html deleted file mode 100644 index 6526139c2..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/clientstats.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/compact.html b/src/doc/4.0-alpha1/tools/nodetool/compact.html deleted file mode 100644 index 240b6b26f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/compact.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/compactionhistory.html b/src/doc/4.0-alpha1/tools/nodetool/compactionhistory.html deleted file mode 100644 index dc85e8139..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/compactionstats.html b/src/doc/4.0-alpha1/tools/nodetool/compactionstats.html deleted file mode 100644 index 86e24bd51..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/decommission.html b/src/doc/4.0-alpha1/tools/nodetool/decommission.html deleted file mode 100644 index c223570c6..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/decommission.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/describecluster.html b/src/doc/4.0-alpha1/tools/nodetool/describecluster.html deleted file mode 100644 index 441fa8c1a..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/describecluster.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/describering.html b/src/doc/4.0-alpha1/tools/nodetool/describering.html deleted file mode 100644 index 94cdb7fd5..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/describering.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disableauditlog.html b/src/doc/4.0-alpha1/tools/nodetool/disableauditlog.html deleted file mode 100644 index f30127b61..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disableautocompaction.html b/src/doc/4.0-alpha1/tools/nodetool/disableautocompaction.html deleted file mode 100644 index ddf9abb37..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablebackup.html b/src/doc/4.0-alpha1/tools/nodetool/disablebackup.html deleted file mode 100644 index f1af71f5e..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablebinary.html b/src/doc/4.0-alpha1/tools/nodetool/disablebinary.html deleted file mode 100644 index 1615c24b4..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablefullquerylog.html b/src/doc/4.0-alpha1/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index 2540cf7ac..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablegossip.html b/src/doc/4.0-alpha1/tools/nodetool/disablegossip.html deleted file mode 100644 index 77e7ceaf4..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablehandoff.html b/src/doc/4.0-alpha1/tools/nodetool/disablehandoff.html deleted file mode 100644 index c8556bc59..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disablehintsfordc.html b/src/doc/4.0-alpha1/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index d8a6756fd..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/disableoldprotocolversions.html b/src/doc/4.0-alpha1/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index 5853e1ebe..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/drain.html b/src/doc/4.0-alpha1/tools/nodetool/drain.html deleted file mode 100644 index e3cc20f06..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/drain.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enableauditlog.html b/src/doc/4.0-alpha1/tools/nodetool/enableauditlog.html deleted file mode 100644 index de9582c9c..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enableautocompaction.html b/src/doc/4.0-alpha1/tools/nodetool/enableautocompaction.html deleted file mode 100644 index dbe6f567c..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablebackup.html b/src/doc/4.0-alpha1/tools/nodetool/enablebackup.html deleted file mode 100644 index bd3cb0d1d..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablebinary.html b/src/doc/4.0-alpha1/tools/nodetool/enablebinary.html deleted file mode 100644 index 9acb4f3d4..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablefullquerylog.html b/src/doc/4.0-alpha1/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index b4c86be8f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablegossip.html b/src/doc/4.0-alpha1/tools/nodetool/enablegossip.html deleted file mode 100644 index bb55dee81..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablehandoff.html b/src/doc/4.0-alpha1/tools/nodetool/enablehandoff.html deleted file mode 100644 index 934479a06..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enablehintsfordc.html b/src/doc/4.0-alpha1/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index a255065af..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/enableoldprotocolversions.html b/src/doc/4.0-alpha1/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 675658600..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/failuredetector.html b/src/doc/4.0-alpha1/tools/nodetool/failuredetector.html deleted file mode 100644 index c9d88559d..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/flush.html b/src/doc/4.0-alpha1/tools/nodetool/flush.html deleted file mode 100644 index c5e96a7fb..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/flush.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/garbagecollect.html b/src/doc/4.0-alpha1/tools/nodetool/garbagecollect.html deleted file mode 100644 index 4e9f03ab4..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/gcstats.html b/src/doc/4.0-alpha1/tools/nodetool/gcstats.html deleted file mode 100644 index 3f43f52f7..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/gcstats.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/4.0-alpha1/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index b171a7546..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getcompactionthreshold.html b/src/doc/4.0-alpha1/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 621ed0f96..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getcompactionthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index bf2c9d577..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getconcurrency.html b/src/doc/4.0-alpha1/tools/nodetool/getconcurrency.html deleted file mode 100644 index b65af1fb6..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getconcurrentcompactors.html b/src/doc/4.0-alpha1/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index dfb728991..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/4.0-alpha1/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 5697b45b0..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getendpoints.html b/src/doc/4.0-alpha1/tools/nodetool/getendpoints.html deleted file mode 100644 index fcd5af325..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index e8ef24730..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getlogginglevels.html b/src/doc/4.0-alpha1/tools/nodetool/getlogginglevels.html deleted file mode 100644 index 3273115f0..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getmaxhintwindow.html b/src/doc/4.0-alpha1/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 79934662a..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getreplicas.html b/src/doc/4.0-alpha1/tools/nodetool/getreplicas.html deleted file mode 100644 index de155d3bd..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getseeds.html b/src/doc/4.0-alpha1/tools/nodetool/getseeds.html deleted file mode 100644 index 3118bc3f9..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getseeds.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getsstables.html b/src/doc/4.0-alpha1/tools/nodetool/getsstables.html deleted file mode 100644 index 666af03bb..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getsstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/getstreamthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 7ae8b62ce..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/gettimeout.html b/src/doc/4.0-alpha1/tools/nodetool/gettimeout.html deleted file mode 100644 index 3b938c900..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/gettraceprobability.html b/src/doc/4.0-alpha1/tools/nodetool/gettraceprobability.html deleted file mode 100644 index 458c738db..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/gossipinfo.html b/src/doc/4.0-alpha1/tools/nodetool/gossipinfo.html deleted file mode 100644 index 74d04991f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/handoffwindow.html b/src/doc/4.0-alpha1/tools/nodetool/handoffwindow.html deleted file mode 100644 index 0482d62bd..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/help.html b/src/doc/4.0-alpha1/tools/nodetool/help.html deleted file mode 100644 index b9658be12..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/help.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/import.html b/src/doc/4.0-alpha1/tools/nodetool/import.html deleted file mode 100644 index e63eac796..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/import.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/info.html b/src/doc/4.0-alpha1/tools/nodetool/info.html deleted file mode 100644 index 3cd8721a2..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/info.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/invalidatecountercache.html b/src/doc/4.0-alpha1/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 934638993..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/invalidatekeycache.html b/src/doc/4.0-alpha1/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 5f39ee2d5..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/invalidaterowcache.html b/src/doc/4.0-alpha1/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index 6ce338589..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/join.html b/src/doc/4.0-alpha1/tools/nodetool/join.html deleted file mode 100644 index 90955c8e1..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/join.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/listsnapshots.html b/src/doc/4.0-alpha1/tools/nodetool/listsnapshots.html deleted file mode 100644 index ba22ac131..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/move.html b/src/doc/4.0-alpha1/tools/nodetool/move.html deleted file mode 100644 index 97d9422b9..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/move.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/netstats.html b/src/doc/4.0-alpha1/tools/nodetool/netstats.html deleted file mode 100644 index 033b46e44..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/netstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/nodetool.html b/src/doc/4.0-alpha1/tools/nodetool/nodetool.html deleted file mode 100644 index ddea3c298..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/nodetool.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-pwf <passwordFilePath> | –password-file <passwordFilePath>)]
-
[(-pp | –print-port)] [(-pw <password> | –password <password>)] -[(-p <port> | –port <port>)] [(-u <username> | –username <username>)] -[(-h <host> | –host <host>)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/pausehandoff.html b/src/doc/4.0-alpha1/tools/nodetool/pausehandoff.html deleted file mode 100644 index 9a478d5a8..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/profileload.html b/src/doc/4.0-alpha1/tools/nodetool/profileload.html deleted file mode 100644 index d3201bf4b..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/profileload.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/proxyhistograms.html b/src/doc/4.0-alpha1/tools/nodetool/proxyhistograms.html deleted file mode 100644 index 496f2f948..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/rangekeysample.html b/src/doc/4.0-alpha1/tools/nodetool/rangekeysample.html deleted file mode 100644 index 76353e4f3..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/rebuild.html b/src/doc/4.0-alpha1/tools/nodetool/rebuild.html deleted file mode 100644 index 1f06dac2c..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/rebuild.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/rebuild_index.html b/src/doc/4.0-alpha1/tools/nodetool/rebuild_index.html deleted file mode 100644 index 802f24f43..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/refresh.html b/src/doc/4.0-alpha1/tools/nodetool/refresh.html deleted file mode 100644 index 335d2b842..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/refresh.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/refreshsizeestimates.html b/src/doc/4.0-alpha1/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index f40785411..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/reloadlocalschema.html b/src/doc/4.0-alpha1/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index f1a9140eb..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/reloadseeds.html b/src/doc/4.0-alpha1/tools/nodetool/reloadseeds.html deleted file mode 100644 index fed2ddd96..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/reloadssl.html b/src/doc/4.0-alpha1/tools/nodetool/reloadssl.html deleted file mode 100644 index 7bd3a1b67..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/reloadtriggers.html b/src/doc/4.0-alpha1/tools/nodetool/reloadtriggers.html deleted file mode 100644 index 4c8c675ca..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/relocatesstables.html b/src/doc/4.0-alpha1/tools/nodetool/relocatesstables.html deleted file mode 100644 index ab4cd4ea6..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/removenode.html b/src/doc/4.0-alpha1/tools/nodetool/removenode.html deleted file mode 100644 index ca7543e65..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/removenode.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/repair.html b/src/doc/4.0-alpha1/tools/nodetool/repair.html deleted file mode 100644 index d1c56f68e..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/repair.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/repair_admin.html b/src/doc/4.0-alpha1/tools/nodetool/repair_admin.html deleted file mode 100644 index 3ff8277de..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/replaybatchlog.html b/src/doc/4.0-alpha1/tools/nodetool/replaybatchlog.html deleted file mode 100644 index e6bdee06a..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/resetfullquerylog.html b/src/doc/4.0-alpha1/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 6ad4368b4..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/resetlocalschema.html b/src/doc/4.0-alpha1/tools/nodetool/resetlocalschema.html deleted file mode 100644 index a63f1c144..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/resumehandoff.html b/src/doc/4.0-alpha1/tools/nodetool/resumehandoff.html deleted file mode 100644 index 6b52a2a01..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/ring.html b/src/doc/4.0-alpha1/tools/nodetool/ring.html deleted file mode 100644 index 1b53d9e3b..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/ring.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/scrub.html b/src/doc/4.0-alpha1/tools/nodetool/scrub.html deleted file mode 100644 index 33909c577..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/scrub.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/4.0-alpha1/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index beb644854..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setcachecapacity.html b/src/doc/4.0-alpha1/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 5f6f5f49f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setcachekeystosave.html b/src/doc/4.0-alpha1/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index cc2649b15..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setcompactionthreshold.html b/src/doc/4.0-alpha1/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index d4a6b427c..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setcompactionthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 5c19692f1..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setconcurrency.html b/src/doc/4.0-alpha1/tools/nodetool/setconcurrency.html deleted file mode 100644 index 5e92740eb..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setconcurrentcompactors.html b/src/doc/4.0-alpha1/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index f4590eba1..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/4.0-alpha1/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 285be5616..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/4.0-alpha1/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index bc4511944..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index c9f363608..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setlogginglevel.html b/src/doc/4.0-alpha1/tools/nodetool/setlogginglevel.html deleted file mode 100644 index f7fd32a87..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setmaxhintwindow.html b/src/doc/4.0-alpha1/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 077e1afec..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/setstreamthroughput.html b/src/doc/4.0-alpha1/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index e612fe186..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/settimeout.html b/src/doc/4.0-alpha1/tools/nodetool/settimeout.html deleted file mode 100644 index e0326cdc6..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/settimeout.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/settraceprobability.html b/src/doc/4.0-alpha1/tools/nodetool/settraceprobability.html deleted file mode 100644 index 15df1a08f..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/sjk.html b/src/doc/4.0-alpha1/tools/nodetool/sjk.html deleted file mode 100644 index a6ef085c5..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/sjk.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/snapshot.html b/src/doc/4.0-alpha1/tools/nodetool/snapshot.html deleted file mode 100644 index c58196b8b..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/snapshot.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/status.html b/src/doc/4.0-alpha1/tools/nodetool/status.html deleted file mode 100644 index ab329a726..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/status.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/statusautocompaction.html b/src/doc/4.0-alpha1/tools/nodetool/statusautocompaction.html deleted file mode 100644 index 75816e649..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/statusbackup.html b/src/doc/4.0-alpha1/tools/nodetool/statusbackup.html deleted file mode 100644 index 8b1e40f90..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/statusbinary.html b/src/doc/4.0-alpha1/tools/nodetool/statusbinary.html deleted file mode 100644 index 584a60093..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/statusgossip.html b/src/doc/4.0-alpha1/tools/nodetool/statusgossip.html deleted file mode 100644 index ae5bc0423..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/statushandoff.html b/src/doc/4.0-alpha1/tools/nodetool/statushandoff.html deleted file mode 100644 index 64c272b7d..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/stop.html b/src/doc/4.0-alpha1/tools/nodetool/stop.html deleted file mode 100644 index 30b7c39a1..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/stop.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB, VERIFY,
-            INDEX_BUILD
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/stopdaemon.html b/src/doc/4.0-alpha1/tools/nodetool/stopdaemon.html deleted file mode 100644 index 62bf24e4c..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/tablehistograms.html b/src/doc/4.0-alpha1/tools/nodetool/tablehistograms.html deleted file mode 100644 index e9c0505b0..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/tablestats.html b/src/doc/4.0-alpha1/tools/nodetool/tablestats.html deleted file mode 100644 index 43abbc85e..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/tablestats.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/toppartitions.html b/src/doc/4.0-alpha1/tools/nodetool/toppartitions.html deleted file mode 100644 index e82e67bad..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/tpstats.html b/src/doc/4.0-alpha1/tools/nodetool/tpstats.html deleted file mode 100644 index 213b4e992..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/tpstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/truncatehints.html b/src/doc/4.0-alpha1/tools/nodetool/truncatehints.html deleted file mode 100644 index cf91a026b..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/upgradesstables.html b/src/doc/4.0-alpha1/tools/nodetool/upgradesstables.html deleted file mode 100644 index c18846edc..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/verify.html b/src/doc/4.0-alpha1/tools/nodetool/verify.html deleted file mode 100644 index 4811ff1c7..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/verify.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/version.html b/src/doc/4.0-alpha1/tools/nodetool/version.html deleted file mode 100644 index 62d6b4c32..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/version.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/nodetool/viewbuildstatus.html b/src/doc/4.0-alpha1/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 7851f0000..000000000 --- a/src/doc/4.0-alpha1/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/index.html b/src/doc/4.0-alpha1/tools/sstable/index.html deleted file mode 100644 index 974a9c549..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/index.html +++ /dev/null @@ -1,228 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "SSTable Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

SSTable Tools

-

This section describes the functionality of the various sstable tools.

-

Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstabledump.html b/src/doc/4.0-alpha1/tools/sstable/sstabledump.html deleted file mode 100644 index ba34d4542..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstabledump.html +++ /dev/null @@ -1,403 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstabledump" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstabledump

-

Dump contents of a given SSTable to standard output in JSON format.

-

You must supply exactly one sstable.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstabledump <options> <sstable file path>

- ---- - - - - - - - - - - - - - - - - - - - - -
-dCQL row per line internal representation
-eEnumerate partition keys only
-k <arg>Partition key
-x <arg>Excluded partition key(s)
-tPrint raw timestamps instead of iso8601 date strings
-lOutput each row as a separate JSON object
-

If necessary, use sstableutil first to find out the sstables used by a table.

-
-
-

Dump entire table

-

Dump the entire table without any options.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26
-
-cat eventlog_dump_2018Jul26
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-]
-
-
-
-
-

Dump table in a more manageable format

-

Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines
-
-cat eventlog_dump_2018Jul26_justlines
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Dump only keys

-

Dump only the keys by using the -e option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys
-
-cat eventlog_dump_2018Jul26b
-[ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ]
-
-
-
-
-

Dump row for a single key

-

Dump a single key using the -k option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey
-
-cat eventlog_dump_2018Jul26_singlekey
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Exclude a key or keys in dump of rows

-

Dump a table except for the rows excluded with the -x option. Multiple keys can be used.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e  > eventlog_dump_2018Jul26_excludekeys
-
-cat eventlog_dump_2018Jul26_excludekeys
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display raw timestamps

-

By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times
-
-cat eventlog_dump_2018Jul26_times
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "1532118147028809" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display internal structure in output

-

Dump the table in a format that reflects the internal structure.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d
-
-cat eventlog_dump_2018Jul26_d
-[3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]:  | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711]
-[d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]:  | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522]
-[cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]:  | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809]
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableexpiredblockers.html b/src/doc/4.0-alpha1/tools/sstable/sstableexpiredblockers.html deleted file mode 100644 index 468ba2712..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableexpiredblockers.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableexpiredblockers" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableexpiredblockers

-

During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable.

-

This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-10015

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableexpiredblockers <keyspace> <table>

-
-
-

Output blocked sstables

-

If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing.

-

Otherwise, the script will return <sstable> blocks <#> expired sstables from getting dropped followed by a list of the blocked sstables.

-

Example:

-
sstableexpiredblockers keyspace1 standard1
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstablelevelreset.html b/src/doc/4.0-alpha1/tools/sstable/sstablelevelreset.html deleted file mode 100644 index 3a4f73ff1..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstablelevelreset.html +++ /dev/null @@ -1,174 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablelevelreset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablelevelreset

-

If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration.

-

See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5271

-
-

Usage

-

sstablelevelreset –really-reset <keyspace> <table>

-

The really-reset flag is required, to ensure this intrusive command is not run accidentally.

-
-
-

Table not found

-

If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error.

-

Example:

-
ColumnFamily not found: keyspace/evenlog.
-
-
-
-
-

Table has no sstables

-

Example:

-
Found no sstables, did you give the correct keyspace/table?
-
-
-
-
-

Table already at level 0

-

The script will not set the level if it is already set to 0.

-

Example:

-
Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0
-
-
-
-
-

Table levels reduced to 0

-

If the level is not already 0, then this will reset it to 0.

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 1
-
-sstablelevelreset --really-reset keyspace eventlog
-Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableloader.html b/src/doc/4.0-alpha1/tools/sstable/sstableloader.html deleted file mode 100644 index 06f5f2b8c..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableloader.html +++ /dev/null @@ -1,408 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableloader" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableloader

-

Bulk-load the sstables found in the directory <dir_path> to the configured cluster. The parent directories of <dir_path> are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files.

-

Several of the options listed below don’t work quite as intended, and in those cases, workarounds are mentioned for specific use cases.

-

To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-1278

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableloader <options> <dir_path>

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-d, –nodes <initial hosts>Required. Try to connect to these hosts (comma-separated) -initially for ring information
-u, –username <username>username for Cassandra authentication
-pw, –password <password>password for Cassandra authentication
-p, –port <native transport port>port used for native connection (default 9042)
-sp, –storage-port <storage port>port used for internode communication (default 7000)
-ssp, –ssl-storage-port <ssl storage port>port used for TLS internode communication (default 7001)
–no-progressdon’t display progress
-t, –throttle <throttle>throttle speed in Mbits (default unlimited)
-idct, –inter-dc-throttle <inter-dc-throttle>inter-datacenter throttle speed in Mbits (default unlimited)
-cph, –connections-per-host <connectionsPerHost>number of concurrent connections-per-host
-i, –ignore <NODES>don’t stream to this (comma separated) list of nodes
-alg, –ssl-alg <ALGORITHM>Client SSL: algorithm (default: SunX509)
-ciphers, –ssl-ciphers <CIPHER-SUITES>Client SSL: comma-separated list of encryption suites to use
-ks, –keystore <KEYSTORE>Client SSL: full path to keystore
-kspw, –keystore-password <KEYSTORE-PASSWORD>Client SSL: password of the keystore
-st, –store-type <STORE-TYPE>Client SSL: type of store
-ts, –truststore <TRUSTSTORE>Client SSL: full path to truststore
-tspw, –truststore-password <TRUSTSTORE-PASSWORD>Client SSL: password of the truststore
-prtcl, –ssl-protocol <PROTOCOL>Client SSL: connections protocol to use (default: TLS)
-ap, –auth-provider <auth provider>custom AuthProvider class name for cassandra authentication
-f, –conf-path <path to config file>cassandra.yaml file path for streaming throughput and client/server SSL
-v, –verboseverbose output
-h, –helpdisplay this help message
-

You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

Load sstables from a Snapshot

-

Copy the snapshot sstables into an accessible directory and use sstableloader to restore them.

-

Example:

-
cp snapshots/1535397029191/* /path/to/keyspace1/standard1/
-
-sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4700000
-   Total duration (ms):          : 4390
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

The -d or –nodes option is required, or the script will not run.

-

Example:

-
sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Initial hosts must be specified (-d)
-
-
-
-
-

Use a Config File for SSL Clusters

-

If SSL encryption is enabled in the cluster, use the –conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line.

-

Example:

-
sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 9.165KiB/s (avg: 9.165KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 5.147MiB/s (avg: 18.299KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 9.751MiB/s (avg: 27.423KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 8.203MiB/s (avg: 36.524KiB/s)
-...
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 9356 ms
-   Average transfer rate   : 480.105KiB/s
-   Peak transfer rate      : 586.410KiB/s
-
-
-
-
-

Hide Progress Output

-

To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the –no-progress option.

-

Example:

-
sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2]
-
-
-
-
-

Get More Detail

-

Using the –verbose option will provide much more progress output.

-

Example:

-
sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 12.056KiB/s (avg: 12.056KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 9.092MiB/s (avg: 24.081KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 18.832MiB/s (avg: 36.099KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 2.253MiB/s (avg: 47.882KiB/s)
-progress: [/172.17.0.2]0:0/1 7  % total: 7% 6.388MiB/s (avg: 59.743KiB/s)
-progress: [/172.17.0.2]0:0/1 8  % total: 8% 14.606MiB/s (avg: 71.635KiB/s)
-progress: [/172.17.0.2]0:0/1 9  % total: 9% 8.880MiB/s (avg: 83.465KiB/s)
-progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s)
-progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s)
-progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s)
-progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s)
-progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s)
-progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s)
-progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s)
-progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s)
-progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s)
-progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s)
-progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s)
-progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s)
-progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s)
-progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s)
-progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s)
-progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s)
-progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s)
-progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s)
-progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s)
-progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s)
-progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s)
-progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s)
-progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s)
-progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s)
-progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s)
-progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s)
-progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s)
-progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s)
-progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s)
-progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s)
-progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s)
-progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s)
-progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s)
-progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s)
-progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s)
-progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s)
-progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s)
-progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s)
-progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s)
-progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s)
-progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s)
-progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s)
-progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s)
-progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s)
-progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s)
-progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s)
-progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s)
-progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s)
-progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s)
-progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s)
-progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s)
-progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s)
-progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s)
-progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s)
-progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s)
-progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s)
-progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s)
-progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s)
-progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s)
-progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s)
-progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s)
-progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s)
-progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 6706 ms
-   Average transfer rate   : 669.835KiB/s
-   Peak transfer rate      : 767.802KiB/s
-
-
-
-
-

Throttling Load

-

To prevent the table loader from overloading the system resources, you can throttle the process with the –throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below.

-

Example:

-
sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 0 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 37634
-   Average transfer rate (MB/s): : 0
-   Peak transfer rate (MB/s):    : 0
-
-
-
-
-

Speeding up Load

-

To speed up the load process, the number of connections per host can be increased.

-

Example:

-
sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 100
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 3486
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

This small data set doesn’t benefit much from the increase in connections per host, but note that the total duration has decreased in this example.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstablemetadata.html b/src/doc/4.0-alpha1/tools/sstable/sstablemetadata.html deleted file mode 100644 index aee726e60..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstablemetadata.html +++ /dev/null @@ -1,472 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablemetadata" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablemetadata

-

Print information about an sstable from the related Statistics.db and Summary.db files to standard output.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablemetadata <options> <sstable filename(s)>

- ---- - - - - - -
–gc_grace_seconds <arg>The gc_grace_seconds to use when calculating droppable tombstones
-
- -
-

Specify gc grace seconds

-

To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn’t access the schema directly, this is a way to more accurately estimate droppable tombstones – for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds).

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-12208

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4
-Estimated tombstone drop times:
-1536599100:         1
-1536599640:         1
-1536599700:         2
-
-echo $(date +%s)
-1536602005
-
-# if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 4.0E-5
-
-# if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 9.61111111111111E-6
-
-# if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 0.0
-
-
-
-
-

Explanation of each value printed above

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueExplanation
SSTableprefix of the sstable filenames related to this sstable
Partitionerpartitioner type used to distribute data across nodes; defined in cassandra.yaml
Bloom Filter FPprecision of Bloom filter used in reads; defined in the table definition
Minimum timestampminimum timestamp of any entry in this sstable, in epoch microseconds
Maximum timestampmaximum timestamp of any entry in this sstable, in epoch microseconds
SSTable min local deletion timeminimum timestamp of deletion date, based on TTL, in epoch seconds
SSTable max local deletion timemaximum timestamp of deletion date, based on TTL, in epoch seconds
Compressorblank (-) by default; if not blank, indicates type of compression enabled on the table
TTL mintime-to-live in seconds; default 0 unless defined in the table definition
TTL maxtime-to-live in seconds; default 0 unless defined in the table definition
First tokenlowest token and related key found in the sstable summary
Last tokenhighest token and related key found in the sstable summary
Estimated droppable tombstonesratio of tombstones to columns, using configured gc grace seconds if relevant
SSTable levelcompaction level of this sstable, if leveled compaction (LCS) is used
Repaired atthe timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds
Replay positions coveredthe interval of time and commitlog positions related to this sstable
totalColumnsSetnumber of cells in the table
totalRowsnumber of rows in the table
Estimated tombstone drop timesapproximate number of rows that will expire, ordered by epoch seconds
Count Row Size Cell Counttwo histograms in two columns; one represents distribution of Row Size -and the other represents distribution of Cell Count
Estimated cardinalityan estimate of unique values, used for compaction
EncodingStats* minTTLin epoch milliseconds
EncodingStats* minLocalDeletionTimein epoch seconds
EncodingStats* minTimestampin epoch microseconds
KeyTypethe type of partition key, useful in reading and writing data -from/to storage; defined in the table definition
ClusteringTypesthe type of clustering key, useful in reading and writing data -from/to storage; defined in the table definition
StaticColumnsa list of the shared columns in the table
RegularColumnsa list of non-static, non-key columns in the table
-
    -
  • For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableofflinerelevel.html b/src/doc/4.0-alpha1/tools/sstable/sstableofflinerelevel.html deleted file mode 100644 index 2d1491a25..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableofflinerelevel.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableofflinerelevel" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableofflinerelevel

-

When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-8301

-

The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):

-
L3 [][][][][][][][][][][]
-L2 [    ][    ][    ][  ]
-L1 [          ][        ]
-L0 [                    ]
-
-
-

Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):

-
[][][]
-[    ][][][]
-    [    ]
-[          ]
-...
-
-
-

Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below.

-

If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableofflinerelevel [–dry-run] <keyspace> <table>

-
-
-

Doing a dry run

-

Use the –dry-run option to see the current level distribution and predicted level after the change.

-

Example:

-
sstableofflinerelevel --dry-run keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-Potential leveling:
-L0=1
-L1=1
-
-
-
-
-

Running a relevel

-

Example:

-
sstableofflinerelevel keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-New leveling:
-L0=1
-L1=1
-
-
-
-
-

Keyspace or table not found

-

If an invalid keyspace and/or table is provided, an exception will be thrown.

-

Example:

-
sstableofflinerelevel --dry-run keyspace evenlog
-
-Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog
-    at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstablerepairedset.html b/src/doc/4.0-alpha1/tools/sstable/sstablerepairedset.html deleted file mode 100644 index e8de2c988..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstablerepairedset.html +++ /dev/null @@ -1,192 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablerepairedset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablerepairedset

-

Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired.

-

Note that running a repair (e.g., via nodetool repair) doesn’t set the status of this metadata. Only setting the status of this metadata via this tool does.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5351

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablerepairedset –really-set <options> [-f <sstable-list> | <sstables>]

- ---- - - - - - - - - - - - - - - -
–really-setrequired if you want to really set the status
–is-repairedset the repairedAt status to the last modified time
–is-unrepairedset the repairedAt status to 0
-fuse a file containing a list of sstables as the input
-
-
-

Set a lot of sstables to unrepaired status

-

There are many ways to do this programmatically. This way would likely include variables for the keyspace and table.

-

Example:

-
find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired %
-
-
-
-
-

Set one to many sstables to repaired status

-

Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice.

-

Example:

-
nodetool repair keyspace1 standard1
-find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired %
-
-
-
- -
-

Using command in a script

-

If you know you ran repair 2 weeks ago, you can do something like the following:

-
sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstablescrub.html b/src/doc/4.0-alpha1/tools/sstable/sstablescrub.html deleted file mode 100644 index b102fca7e..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstablescrub.html +++ /dev/null @@ -1,210 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablescrub" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablescrub

-

Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4321

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablescrub <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-m,–manifest-checkonly check and repair the leveled manifest, without actually scrubbing the sstables
-n,–no-validatedo not validate columns using column validator
-r,–reinsert-overflowed-ttlRewrites rows with overflowed expiration date affected by CASSANDRA-14092 -with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows.
-s,–skip-corruptedskip corrupt rows in counter tables
-v,–verboseverbose output
-
-
-

Basic Scrub

-

The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable.

-

Example:

-
sstablescrub keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped
-Checking leveled manifest
-
-
-
-
-

Scrub without Validation

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-9406

-

Use the –no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client.

-

Example:

-
sstablescrub --no-validate keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned
-
-
-
-
-

Skip Corrupted Counter Tables

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5930

-

If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the –skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+.

-

Example:

-
sstablescrub --skip-corrupted keyspace1 counter1
-
-
-
-
-

Dealing with Overflow Dates

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-14092

-

Using the option –reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow).

-

Example:

-
sstablescrub --reinsert-overflowed-ttl keyspace1 counter1
-
-
-
-
-

Manifest Check

-

As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstablesplit.html b/src/doc/4.0-alpha1/tools/sstable/sstablesplit.html deleted file mode 100644 index 67881a8a6..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstablesplit.html +++ /dev/null @@ -1,201 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablesplit" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablesplit

-

Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4766

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablesplit <options> <filename>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h, –helpdisplay this help message
–no-snapshotdon’t snapshot the sstables before splitting
-s, –size <size>maximum size in MB for the output sstables (default: 50)
-

This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped.

-
-
-

Split a File

-

Split a large sstable into smaller sstables. By default, unless the option –no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-Pre-split sstables snapshotted into snapshot pre-split-1533144514795
-
-
-
-
-

Split Multiple Files

-

Wildcards can be used in the filename portion of the command to split multiple files.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1*
-
-
-
-
-

Attempt to Split a Small File

-

If the file is already smaller than the split size provided, the sstable will not be split.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB)
-No sstables needed splitting.
-
-
-
-
-

Split a File into Specified Size

-

The default size used for splitting is 50MB. Specify another size with the –size option. The size is in megabytes (MB). Specify only the number, not the units. For example –size 50 is correct, but –size 50MB is not.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db
-Pre-split sstables snapshotted into snapshot pre-split-1533144996008
-
-
-
-
-

Split Without Snapshot

-

By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the –no-snapshot option to skip it.

-

Example:

-
sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db
-
-
-

Note: There is no output, but you can see the results in your file system.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableupgrade.html b/src/doc/4.0-alpha1/tools/sstable/sstableupgrade.html deleted file mode 100644 index b6df310b4..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableupgrade.html +++ /dev/null @@ -1,248 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableupgrade" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableupgrade

-

Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version.

-

The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableupgrade <options> <keyspace> <table> [snapshot_name]

- ---- - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-k,–keep-sourcedo not delete the source sstables
-
-
-

Rewrite tables to the current Cassandra version

-

Start with a set of sstables in one version of Cassandra:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables:

-
sstableupgrade keyspace1 standard1
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 13:48 backups
--rw-r--r--   1 user  wheel      292 Aug 22 13:48 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4599475 Aug 22 13:48 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:48 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 13:48 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330807 Aug 22 13:48 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 13:48 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 13:48 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 13:48 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite tables to the current Cassandra version, and keep tables in old version

-

Again, starting with a set of sstables in one version:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:

-
sstableupgrade keyspace1 standard1 -k
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 14:00 backups
--rw-r--r--@  1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--@  1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--@  1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--@  1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--@  1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--@  1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--@  1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--@  1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
--rw-r--r--   1 user  wheel      292 Aug 22 14:01 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4596370 Aug 22 14:01 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 14:01 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 14:01 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330801 Aug 22 14:01 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 14:01 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 14:01 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 14:01 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite a snapshot to the current Cassandra version

-

Find the snapshot name:

-
nodetool listsnapshots
-
-Snapshot Details:
-Snapshot name       Keyspace name                Column family name           True size          Size on disk
-...
-1534962986979       keyspace1                    standard1                    5.85 MB            5.85 MB
-
-
-

Then rewrite the snapshot:

-
sstableupgrade keyspace1 standard1 1534962986979
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete.
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableutil.html b/src/doc/4.0-alpha1/tools/sstable/sstableutil.html deleted file mode 100644 index 5f08cab39..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableutil.html +++ /dev/null @@ -1,204 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableutil" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableutil

-

List sstable files for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7066

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableutil <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - -
-c, –cleanupclean up any outstanding transactions
-d, –debugdisplay stack traces
-h, –helpdisplay this help message
-o, –oploginclude operation logs
-t, –type <arg>all (list all files, final or temporary), tmp (list temporary files only), -final (list final files only),
-v, –verboseverbose output
-
-
-

List all sstables

-

The basic command lists the sstables associated with a given keyspace/table.

-

Example:

-
sstableutil keyspace eventlog
-Listing files...
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt
-
-
-
-
-

List only temporary sstables

-

Using the -t option followed by tmp will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra.

-
-
-

List only final sstables

-

Using the -t option followed by final will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option.

-
-
-

Include transaction logs

-

Using the -o option will include transaction logs in the listing, in the format above.

-
-
-

Clean up sstables

-

Using the -c option removes any transactions left over from incomplete writes or compactions.

-

From the 3.0 upgrade notes:

-

New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix “add:” or “remove:”. They also contain a special line “commit”, only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the “add” prefix) and delete the old sstables (those with the “remove” prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/tools/sstable/sstableverify.html b/src/doc/4.0-alpha1/tools/sstable/sstableverify.html deleted file mode 100644 index fe642681e..000000000 --- a/src/doc/4.0-alpha1/tools/sstable/sstableverify.html +++ /dev/null @@ -1,204 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableverify" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableverify

-

Check sstable(s) for errors or corruption, for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5791

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableverify <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-e, –extendedextended verification
-h, –helpdisplay this help message
-v, –verboseverbose output
-
-
-

Basic Verification

-

This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-
-
-
-
-

Extended Verification

-

During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time.

-

Example:

-
root@DC1C1:/# sstableverify -e keyspace eventlog
-WARN  14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully
-
-
-
-
-

Corrupted File

-

Corrupted files are listed if they are detected by the script.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db
-
-
-

A similar (but less verbose) tool will show the suggested actions:

-
nodetool verify keyspace eventlog
-error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/troubleshooting/finding_nodes.html b/src/doc/4.0-alpha1/troubleshooting/finding_nodes.html deleted file mode 100644 index 3a2cfed8a..000000000 --- a/src/doc/4.0-alpha1/troubleshooting/finding_nodes.html +++ /dev/null @@ -1,240 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Find The Misbehaving Nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Find The Misbehaving Nodes

-

The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware).

-

There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below.

-
-

Client Logs and Errors

-

Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter’s nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with.

-

Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax drivers:

-
    -
  • SyntaxError (client). This and other QueryValidationException -indicate that the client sent a malformed request. These are rarely server -issues and usually indicate bad queries.
  • -
  • UnavailableException (server): This means that the Cassandra -coordinator node has rejected the query as it believes that insufficent -replica nodes are available. If many coordinators are throwing this error it -likely means that there really are (typically) multiple nodes down in the -cluster and you can identify them using nodetool status If only a single coordinator is throwing this error it may -mean that node has been partitioned from the rest.
  • -
  • OperationTimedOutException (server): This is the most frequent -timeout message raised when clients set timeouts and means that the query -took longer than the supplied timeout. This is a client side timeout -meaning that it took longer than the client specified timeout. The error -message will include the coordinator node that was last tried which is -usually a good starting point. This error usually indicates either -aggressive client timeout values or latent server coordinators/replicas.
  • -
  • ReadTimeoutException or WriteTimeoutException (server): These -are raised when clients do not specify lower timeouts and there is a -coordinator timeouts based on the values supplied in the cassandra.yaml -configuration file. They usually indicate a serious server side problem as -the default values are usually multiple seconds.
  • -
-
-
-

Metrics

-

If you have Cassandra metrics reporting to a -centralized location such as Graphite or -Grafana you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are:

-
-

Errors

-

Cassandra refers to internode messaging errors as “drops”, and provided a -number of Dropped Message Metrics to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue.

-
-
-

Latency

-

For timeouts or latency related issues you can start with Table -Metrics by comparing Coordinator level metrics e.g. -CoordinatorReadLatency or CoordinatorWriteLatency with their associated -replica metrics e.g. ReadLatency or WriteLatency. Issues usually show -up on the 99th percentile before they show up on the 50th percentile or -the mean. While maximum coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, maximum replica latencies that correlate with increased 99th -percentiles on coordinators can help narrow down the problem.

-

There are usually three main possibilities:

-
    -
  1. Coordinator latencies are high on all nodes, but only a few node’s local -read latencies are high. This points to slow replica nodes and the -coordinator’s are just side-effects. This usually happens when clients are -not token aware.
  2. -
  3. Coordinator latencies and replica latencies increase at the -same time on the a few nodes. If clients are token aware this is almost -always what happens and points to slow replicas of a subset of token -ranges (only part of the ring).
  4. -
  5. Coordinator and local latencies are high on many nodes. This usually -indicates either a tipping point in the cluster capacity (too many writes or -reads per second), or a new query pattern.
  6. -
-

It’s important to remember that depending on the client’s load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use TokenAware policies the same -node’s coordinator and replica latencies will often increase together, but if -you just use normal DCAwareRoundRobin coordinator latencies can increase -with unrelated replica node’s latencies. For example:

-
    -
  • TokenAware + LOCAL_ONE: should always have coordinator and replica -latencies on the same node rise together
  • -
  • TokenAware + LOCAL_QUORUM: should always have coordinator and -multiple replica latencies rise together in the same datacenter.
  • -
  • TokenAware + QUORUM: replica latencies in other datacenters can -affect coordinator latencies.
  • -
  • DCAwareRoundRobin + LOCAL_ONE: coordinator latencies and unrelated -replica node’s latencies will rise together.
  • -
  • DCAwareRoundRobin + LOCAL_QUORUM: different coordinator and replica -latencies will rise together with little correlation.
  • -
-
-
-

Query Rates

-

Sometimes the Table query rate metrics can help -narrow down load issues as “small” increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with BATCH writes, where a client may send a single BATCH -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator BATCH write turns into 450 -replica writes! This is why keeping BATCH’s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a “single” -query.

-
-
-
-

Next Step: Investigate the Node(s)

-

Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -logs, nodetool, and -os tools. If you are not able to login you may still -have access to logs and nodetool -remotely.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/troubleshooting/index.html b/src/doc/4.0-alpha1/troubleshooting/index.html deleted file mode 100644 index 9ab5af5e1..000000000 --- a/src/doc/4.0-alpha1/troubleshooting/index.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-

As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you.

-

These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don’t -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/troubleshooting/reading_logs.html b/src/doc/4.0-alpha1/troubleshooting/reading_logs.html deleted file mode 100644 index 158740a3a..000000000 --- a/src/doc/4.0-alpha1/troubleshooting/reading_logs.html +++ /dev/null @@ -1,350 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Cassandra Logs" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Logs

-

Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs.

-
-

Common Log Files

-

Cassandra has three main logs, the system.log, debug.log and -gc.log which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively.

-

These logs by default live in ${CASSANDRA_HOME}/logs, but most Linux -distributions relocate logs to /var/log/cassandra. Operators can tune -this location as well as what levels are logged using the provided -logback.xml file.

-
-

system.log

-

This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log:

-
    -
  • Uncaught exceptions. These can be very useful for debugging errors.
  • -
  • GCInspector messages indicating long garbage collector pauses. When long -pauses happen Cassandra will print how long and also what was the state of -the system (thread state) at the time of that pause. This can help narrow -down a capacity issue (either not enough heap or not enough spare CPU).
  • -
  • Information about nodes joining and leaving the cluster as well as token -metadata (data ownersip) changes. This is useful for debugging network -partitions, data movements, and more.
  • -
  • Keyspace/Table creation, modification, deletion.
  • -
  • StartupChecks that ensure optimal configuration of the operating system -to run Cassandra
  • -
  • Information about some background operational tasks (e.g. Index -Redistribution).
  • -
-

As with any application, looking for ERROR or WARN lines can be a -great first step:

-
$ # Search for warnings or errors in the latest system.log
-$ grep 'WARN\|ERROR' system.log | tail
-...
-
-$ # Search for warnings or errors in all rotated system.log
-$ zgrep 'WARN\|ERROR' system.log.* | less
-...
-
-
-
-
-

debug.log

-

This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal system.log. Some -examples of activities logged to this log:

-
    -
  • Information about compactions, including when they start, which sstables -they contain, and when they finish.
  • -
  • Information about memtable flushes to disk, including when they happened, -how large the flushes were, and which commitlog segments the flush impacted.
  • -
-

This log can be very noisy, so it is highly recommended to use grep and -other log analysis tools to dive deep. For example:

-
$ # Search for messages involving a CompactionTask with 5 lines of context
-$ grep CompactionTask debug.log -C 5
-...
-
-$ # Look at the distribution of flush tasks per keyspace
-$ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c
-    6 compaction_history:
-    1 test_keyspace:
-    2 local:
-    17 size_estimates:
-    17 sstable_activity:
-
-
-
-
-

gc.log

-

The gc log is a standard Java GC log. With the default jvm.options -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:

-
$ grep stopped gc.log.0.current | tail
-2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds
-2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds
-2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds
-2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds
-2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds
-2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds
-2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds
-2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds
-2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds
-2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds
-
-
-

This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n  | tail | xargs -IX grep X gc.log.0.current | sort -k 1
-2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds
-2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds
-2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds
-2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds
-2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds
-2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds
-2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds
-2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds
-2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds
-2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds
-
-
-

In this case any client waiting on a query would have experienced a 56ms -latency at 17:13:41.

-

Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn’t know could have disk latency, so the JVM safepoint logic -doesn’t handle a blocking memory mapped read particularly well).

-

Using these logs you can even get a pause distribution with something like -histogram.py:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py
-# NumSamples = 410293; Min = 0.00; Max = 11.49
-# Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498
-# each ∎ represents a count of 5470
-    0.0001 -     1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
-    1.1496 -     2.2991 [    15]:
-    2.2991 -     3.4486 [     5]:
-    3.4486 -     4.5981 [     1]:
-    4.5981 -     5.7475 [     5]:
-    5.7475 -     6.8970 [     9]:
-    6.8970 -     8.0465 [     1]:
-    8.0465 -     9.1960 [     0]:
-    9.1960 -    10.3455 [     0]:
-   10.3455 -    11.4949 [     2]:
-
-
-

We can see in this case while we have very good average performance something -is causing multi second JVM pauses … In this case it was mostly safepoint -pauses caused by slow disks:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X  gc.log.0.current| sort -k 1
-2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds
-2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds
-2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds
-2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds
-2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds
-2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds
-2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds
-2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds
-2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds
-2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds
-
-
-

Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as GCViewer which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -200ms and GC throughput greater than 99% (ymmv).

-

Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues.

-
-
-
-

Getting More Information

-

If the default logging levels are insuficient, nodetool can set higher -or lower logging levels for various packages and classes using the -nodetool setlogginglevel command. Start by viewing the current levels:

-
$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-
-
-

Perhaps the Gossiper is acting up and we wish to enable it at TRACE -level for even more insight:

-
$ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE
-
-$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-org.apache.cassandra.gms.Gossiper                      TRACE
-
-$ grep TRACE debug.log | tail -2
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating
-heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ...
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local
-heartbeat version 2341 greater than 2340 for 127.0.0.1:7000
-
-
-

Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -logback.xml.

-
diff --git a/conf/logback.xml b/conf/logback.xml
-index b2c5b10..71b0a49 100644
---- a/conf/logback.xml
-+++ b/conf/logback.xml
-@@ -98,4 +98,5 @@ appender reference in the root level section below.
-   </root>
-
-   <logger name="org.apache.cassandra" level="DEBUG"/>
-+  <logger name="org.apache.cassandra.gms.Gossiper" level="TRACE"/>
- </configuration>
-
-
-
-

Full Query Logger

-

Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -nodetool and the logs are read with the provided bin/fqltool utility:

-
$ mkdir /var/tmp/fql_logs
-$ nodetool enablefullquerylog --path /var/tmp/fql_logs
-
-# ... do some querying
-
-$ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail
-Query time: 1530750927224
-Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name =
-'system_views' AND table_name = 'sstable_tasks';
-Values:
-
-Type: single
-Protocol version: 4
-Query time: 1530750934072
-Query: select * from keyspace1.standard1 ;
-Values:
-
-$ nodetool disablefullquerylog
-
-
-

Note that if you want more information than this tool provides, there are other -live capture options available such as packet capture.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/troubleshooting/use_nodetool.html b/src/doc/4.0-alpha1/troubleshooting/use_nodetool.html deleted file mode 100644 index 68570e5cd..000000000 --- a/src/doc/4.0-alpha1/troubleshooting/use_nodetool.html +++ /dev/null @@ -1,320 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Use Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Use Nodetool

-

Cassandra’s nodetool allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see nodetool help -for all the commands), but briefly some of the most useful for troubleshooting:

-
-

Cluster Status

-

You can use nodetool status to assess status of the cluster:

-
$ nodetool status <optional keyspace>
-
-Datacenter: dc1
-=======================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-UN  127.0.1.1  4.69 GiB   1            100.0%            35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e  r1
-UN  127.0.1.2  4.71 GiB   1            100.0%            752e278f-b7c5-4f58-974b-9328455af73f  r2
-UN  127.0.1.3  4.69 GiB   1            100.0%            9dc1a293-2cc0-40fa-a6fd-9e6054da04a7  r3
-
-
-

In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all “up”. The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -nodetool status on multiple nodes in a cluster to see the full view.

-

You can use nodetool status plus a little grep to see which nodes are -down:

-
$ nodetool status | grep -v '^UN'
-Datacenter: dc1
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-Datacenter: dc2
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-DN  127.0.0.5  105.73 KiB  1            33.3%             df303ac7-61de-46e9-ac79-6e630115fd75  r1
-
-
-

In this case there are two datacenters and there is one node down in datacenter -dc2 and rack r1. This may indicate an issue on 127.0.0.5 -warranting investigation.

-
-
-

Coordinator Query Latency

-

You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using nodetool proxyhistograms:

-
$ nodetool proxyhistograms
-Percentile       Read Latency      Write Latency      Range Latency   CAS Read Latency  CAS Write Latency View Write Latency
-                     (micros)           (micros)           (micros)           (micros)           (micros)           (micros)
-50%                    454.83             219.34               0.00               0.00               0.00               0.00
-75%                    545.79             263.21               0.00               0.00               0.00               0.00
-95%                    654.95             315.85               0.00               0.00               0.00               0.00
-98%                    785.94             379.02               0.00               0.00               0.00               0.00
-99%                   3379.39            2346.80               0.00               0.00               0.00               0.00
-Min                     42.51             105.78               0.00               0.00               0.00               0.00
-Max                  25109.16           43388.63               0.00               0.00               0.00               0.00
-
-
-

Here you can see the full latency distribution of reads, writes, range requests -(e.g. select * from keyspace.table), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds).

-
-
-

Local Query Latency

-

If you know which table is having latency/error issues, you can use -nodetool tablehistograms to get a better idea of what is happening -locally on a node:

-
$ nodetool tablehistograms keyspace table
-Percentile  SSTables     Write Latency      Read Latency    Partition Size        Cell Count
-                              (micros)          (micros)           (bytes)
-50%             0.00             73.46            182.79             17084               103
-75%             1.00             88.15            315.85             17084               103
-95%             2.00            126.93            545.79             17084               103
-98%             2.00            152.32            654.95             17084               103
-99%             2.00            182.79            785.94             17084               103
-Min             0.00             42.51             24.60             14238                87
-Max             2.00          12108.97          17436.92             17084               103
-
-
-

This shows you percentile breakdowns particularly critical metrics.

-

The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. SizeTieredCompactionStrategy typically has many more reads -per read than LeveledCompactionStrategy does for update heavy workloads.

-

The second column shows you a latency breakdown of local write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments.

-

The third column shows you a latency breakdown of local read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read.

-

The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it’s read.

-
-
-

Threadpool State

-

You can use nodetool tpstats to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:

-
$ nodetool tpstats
-Pool Name                         Active   Pending      Completed   Blocked  All time blocked
-ReadStage                              2         0             12         0                 0
-MiscStage                              0         0              0         0                 0
-CompactionExecutor                     0         0           1940         0                 0
-MutationStage                          0         0              0         0                 0
-GossipStage                            0         0          10293         0                 0
-Repair-Task                            0         0              0         0                 0
-RequestResponseStage                   0         0             16         0                 0
-ReadRepairStage                        0         0              0         0                 0
-CounterMutationStage                   0         0              0         0                 0
-MemtablePostFlush                      0         0             83         0                 0
-ValidationExecutor                     0         0              0         0                 0
-MemtableFlushWriter                    0         0             30         0                 0
-ViewMutationStage                      0         0              0         0                 0
-CacheCleanupExecutor                   0         0              0         0                 0
-MemtableReclaimMemory                  0         0             30         0                 0
-PendingRangeCalculator                 0         0             11         0                 0
-SecondaryIndexManagement               0         0              0         0                 0
-HintsDispatcher                        0         0              0         0                 0
-Native-Transport-Requests              0         0            192         0                 0
-MigrationStage                         0         0             14         0                 0
-PerDiskMemtableFlushWriter_0           0         0             30         0                 0
-Sampler                                0         0              0         0                 0
-ViewBuildExecutor                      0         0              0         0                 0
-InternalResponseStage                  0         0              0         0                 0
-AntiEntropyStage                       0         0              0         0                 0
-
-Message type           Dropped                  Latency waiting in queue (micros)
-                                             50%               95%               99%               Max
-READ                         0               N/A               N/A               N/A               N/A
-RANGE_SLICE                  0              0.00              0.00              0.00              0.00
-_TRACE                       0               N/A               N/A               N/A               N/A
-HINT                         0               N/A               N/A               N/A               N/A
-MUTATION                     0               N/A               N/A               N/A               N/A
-COUNTER_MUTATION             0               N/A               N/A               N/A               N/A
-BATCH_STORE                  0               N/A               N/A               N/A               N/A
-BATCH_REMOVE                 0               N/A               N/A               N/A               N/A
-REQUEST_RESPONSE             0              0.00              0.00              0.00              0.00
-PAGED_RANGE                  0               N/A               N/A               N/A               N/A
-READ_REPAIR                  0               N/A               N/A               N/A               N/A
-
-
-

This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the RequestResponseState queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ALL ties up RF -RequestResponseState threads whereas LOCAL_ONE only uses a single -thread in the ReadStage threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the concurrent_compactors or compaction_throughput options.

-

The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation.

-
-
-

Compaction State

-

As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS page cache, -and can put a lot of load on your disk drives. There are great -os tools to determine if this is the case, but often it’s a -good idea to check if compactions are even running using -nodetool compactionstats:

-
$ nodetool compactionstats
-pending tasks: 2
-- keyspace.table: 2
-
-id                                   compaction type keyspace table completed total    unit  progress
-2062b290-7f3a-11e8-9358-cd941b956e60 Compaction      keyspace table 21848273  97867583 bytes 22.32%
-Active compaction remaining time :   0h00m04s
-
-
-

In this case there is a single compaction running on the keyspace.table -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass -H to get the units in a human readable format.

-

Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don’t take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra’s concurrent_compactors -or compaction_throughput options.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha1/troubleshooting/use_tools.html b/src/doc/4.0-alpha1/troubleshooting/use_tools.html deleted file mode 100644 index 2a3044e8b..000000000 --- a/src/doc/4.0-alpha1/troubleshooting/use_tools.html +++ /dev/null @@ -1,608 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Diving Deep, Use External Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Diving Deep, Use External Tools

-

Machine access allows operators to dive even deeper than logs and nodetool -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes.

-
-

JVM Tooling

-

The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks.

-

NOTE: There are two common gotchas with JVM tooling and Cassandra:

-
    -
  1. By default Cassandra ships with -XX:+PerfDisableSharedMem set to prevent -long pauses (see CASSANDRA-9242 and CASSANDRA-9483 for details). If -you want to use JVM tooling you can instead have /tmp mounted on an in -memory tmpfs which also effectively works around CASSANDRA-9242.
  2. -
  3. Make sure you run the tools as the same user as Cassandra is running as, -e.g. if the database is running as cassandra the tool also has to be -run as cassandra, e.g. via sudo -u cassandra <cmd>.
  4. -
-
-

Garbage Collection State (jstat)

-

If you suspect heap pressure you can use jstat to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):

-
jstat -gcutil <cassandra pid> 500ms
- S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT
- 0.00   0.00  81.53  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.94  31.16  93.07  88.20     12    0.151     3    0.257    0.408
-
-
-

In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies.

-
-
-

Thread Information (jstack)

-

To get a point in time snapshot of exactly what Cassandra is doing, run -jstack against the Cassandra PID. Note that this does pause the JVM for -a very brief period (<20ms).:

-
$ jstack <cassandra pid> > threaddump
-
-# display the threaddump
-$ cat threaddump
-...
-
-# look at runnable threads
-$grep RUNNABLE threaddump -B 1
-"Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000]
-   java.lang.Thread.State: RUNNABLE
---
-"Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-...
-
-# Note that the nid is the Linux thread id
-
-
-

Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on.

-
-
-
-

Basic OS Tooling

-

A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of:

-
    -
  • CPU cores. For executing concurrent user queries
  • -
  • CPU processing time. For query activity (data decompression, row merging, -etc…)
  • -
  • CPU processing time (low priority). For background tasks (compaction, -streaming, etc …)
  • -
  • RAM for Java Heap. Used to hold internal data-structures and by default the -Cassandra memtables. Heap space is a crucial component of write performance -as well as generally.
  • -
  • RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS -disk cache is a crucial component of read performance.
  • -
  • Disks. Cassandra cares a lot about disk read latency, disk write throughput, -and of course disk space.
  • -
  • Network latency. Cassandra makes many internode requests, so network latency -between nodes can directly impact performance.
  • -
  • Network throughput. Cassandra (as other databases) frequently have the -so called “incast” problem where a small request (e.g. SELECT * from -foo.bar) returns a massively large result set (e.g. the entire dataset). -In such situations outgoing bandwidth is crucial.
  • -
-

Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource.

-
-

High Level Resource Usage (top/htop)

-

Cassandra makes signifiant use of system resources, and often the very first -useful action is to run top or htop (website)to see the state of the machine.

-

Useful things to look at:

-
    -
  • System load levels. While these numbers can be confusing, generally speaking -if the load average is greater than the number of CPU cores, Cassandra -probably won’t have very good (sub 100 millisecond) latencies. See -Linux Load Averages -for more information.
  • -
  • CPU utilization. htop in particular can help break down CPU utilization -into user (low and normal priority), system (kernel), and io-wait -. Cassandra query threads execute as normal priority user threads, while -compaction threads execute as low priority user threads. High system -time could indicate problems like thread contention, and high io-wait -may indicate slow disk drives. This can help you understand what Cassandra -is spending processing resources doing.
  • -
  • Memory usage. Look for which programs have the most resident memory, it is -probably Cassandra. The number for Cassandra is likely inaccurately high due -to how Linux (as of 2018) accounts for memory mapped file memory.
  • -
-
-
-

IO Usage (iostat)

-

Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:

-
$ sudo iostat -xdm 2
-Linux 4.13.0-13-generic (hostname)     07/03/2018     _x86_64_    (8 CPU)
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.28    0.32    5.42     0.01     0.13    48.55     0.01    2.21    0.26    2.32   0.64   0.37
-sdb               0.00     0.00    0.00    0.00     0.00     0.00    79.34     0.00    0.20    0.20    0.00   0.16   0.00
-sdc               0.34     0.27    0.76    0.36     0.01     0.02    47.56     0.03   26.90    2.98   77.73   9.21   1.03
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.00    2.00   32.00     0.01     4.04   244.24     0.54   16.00    0.00   17.00   1.06   3.60
-sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00    0.00    0.00   0.00   0.00
-sdc               0.00    24.50    0.00  114.00     0.00    11.62   208.70     5.56   48.79    0.00   48.79   1.12  12.80
-
-
-

In this case we can see that /dev/sdc1 is a very slow drive, having an -await close to 50 milliseconds and an avgqu-sz close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user.

-

Important metrics to assess using iostat:

-
    -
  • Reads and writes per second. These numbers will change with the workload, -but generally speaking the more reads Cassandra has to do from disk the -slower Cassandra read latencies are. Large numbers of reads per second -can be a dead giveaway that the cluster has insufficient memory for OS -page caching.
  • -
  • Write throughput. Cassandra’s LSM model defers user writes and batches them -together, which means that throughput to the underlying medium is the most -important write metric for Cassandra.
  • -
  • Read latency (r_await). When Cassandra missed the OS page cache and reads -from SSTables, the read latency directly determines how fast Cassandra can -respond with the data.
  • -
  • Write latency. Cassandra is less sensitive to write latency except when it -syncs the commit log. This typically enters into the very high percentiles of -write latency.
  • -
-

Note that to get detailed latency breakdowns you will need a more advanced -tool such as bcc-tools.

-
-
-

OS page Cache Usage

-

As Cassandra makes heavy use of memory mapped files, the health of the -operating system’s Page Cache is -crucial to performance. Start by finding how much available cache is in the -system:

-
$ free -g
-              total        used        free      shared  buff/cache   available
-Mem:             15           9           2           0           3           5
-Swap:             0           0           0
-
-
-

In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap.

-

If you suspect that you are missing the OS page cache frequently you can use -advanced tools like cachestat or -vmtouch to dive deeper.

-
-
-

Network Latency and Reliability

-

Whenever Cassandra does writes or reads that involve other replicas, -LOCAL_QUORUM reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ping and traceroute or most -effectively mtr:

-
$ mtr -nr www.google.com
-Start: Sun Jul 22 13:10:28 2018
-HOST: hostname                     Loss%   Snt   Last   Avg  Best  Wrst StDev
-  1.|-- 192.168.1.1                0.0%    10    2.0   1.9   1.1   3.7   0.7
-  2.|-- 96.123.29.15               0.0%    10   11.4  11.0   9.0  16.4   1.9
-  3.|-- 68.86.249.21               0.0%    10   10.6  10.7   9.0  13.7   1.1
-  4.|-- 162.141.78.129             0.0%    10   11.5  10.6   9.6  12.4   0.7
-  5.|-- 162.151.78.253             0.0%    10   10.9  12.1  10.4  20.2   2.8
-  6.|-- 68.86.143.93               0.0%    10   12.4  12.6   9.9  23.1   3.8
-  7.|-- 96.112.146.18              0.0%    10   11.9  12.4  10.6  15.5   1.6
-  9.|-- 209.85.252.250             0.0%    10   13.7  13.2  12.5  13.9   0.0
- 10.|-- 108.170.242.238            0.0%    10   12.7  12.4  11.1  13.0   0.5
- 11.|-- 74.125.253.149             0.0%    10   13.4  13.7  11.8  19.2   2.1
- 12.|-- 216.239.62.40              0.0%    10   13.4  14.7  11.5  26.9   4.6
- 13.|-- 108.170.242.81             0.0%    10   14.4  13.2  10.9  16.0   1.7
- 14.|-- 72.14.239.43               0.0%    10   12.2  16.1  11.0  32.8   7.1
- 15.|-- 216.58.195.68              0.0%    10   25.1  15.3  11.1  25.1   4.8
-
-
-

In this example of mtr, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between 200ms and 3s of additional latency, so that -can be a common cause of latency issues.

-
-
-

Network Throughput

-

As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is iftop which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ccm cluster:

-
$ # remove the -t for ncurses instead of pure text
-$ sudo iftop -nNtP -i lo
-interface: lo
-IP address is: 127.0.0.1
-MAC address is: 00:00:00:00:00:00
-Listening on lo
-   # Host name (port/service if enabled)            last 2s   last 10s   last 40s cumulative
---------------------------------------------------------------------------------------------
-   1 127.0.0.1:58946                          =>      869Kb      869Kb      869Kb      217KB
-     127.0.0.3:9042                           <=         0b         0b         0b         0B
-   2 127.0.0.1:54654                          =>      736Kb      736Kb      736Kb      184KB
-     127.0.0.1:9042                           <=         0b         0b         0b         0B
-   3 127.0.0.1:51186                          =>      669Kb      669Kb      669Kb      167KB
-     127.0.0.2:9042                           <=         0b         0b         0b         0B
-   4 127.0.0.3:9042                           =>     3.30Kb     3.30Kb     3.30Kb       845B
-     127.0.0.1:58946                          <=         0b         0b         0b         0B
-   5 127.0.0.1:9042                           =>     2.79Kb     2.79Kb     2.79Kb       715B
-     127.0.0.1:54654                          <=         0b         0b         0b         0B
-   6 127.0.0.2:9042                           =>     2.54Kb     2.54Kb     2.54Kb       650B
-     127.0.0.1:51186                          <=         0b         0b         0b         0B
-   7 127.0.0.1:36894                          =>     1.65Kb     1.65Kb     1.65Kb       423B
-     127.0.0.5:7000                           <=         0b         0b         0b         0B
-   8 127.0.0.1:38034                          =>     1.50Kb     1.50Kb     1.50Kb       385B
-     127.0.0.2:7000                           <=         0b         0b         0b         0B
-   9 127.0.0.1:56324                          =>     1.50Kb     1.50Kb     1.50Kb       383B
-     127.0.0.1:7000                           <=         0b         0b         0b         0B
-  10 127.0.0.1:53044                          =>     1.43Kb     1.43Kb     1.43Kb       366B
-     127.0.0.4:7000                           <=         0b         0b         0b         0B
---------------------------------------------------------------------------------------------
-Total send rate:                                     2.25Mb     2.25Mb     2.25Mb
-Total receive rate:                                      0b         0b         0b
-Total send and receive rate:                         2.25Mb     2.25Mb     2.25Mb
---------------------------------------------------------------------------------------------
-Peak rate (sent/received/total):                     2.25Mb         0b     2.25Mb
-Cumulative (sent/received/total):                     576KB         0B      576KB
-============================================================================================
-
-
-

In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring.

-
-
-
-

Advanced tools

-

Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy.

-
-

bcc-tools

-

Most modern Linux distributions (kernels newer than 4.1) support bcc-tools for diving deep into performance problems. -First install bcc-tools, e.g. via apt on Debian:

-
$ apt install bcc-tools
-
-
-

Then you can use all the tools that bcc-tools contains. One of the most -useful tools is cachestat -(cachestat examples) -which allows you to determine exactly how many OS page cache hits and misses -are happening:

-
$ sudo /usr/share/bcc/tools/cachestat -T 1
-TIME        TOTAL   MISSES     HITS  DIRTIES   BUFFERS_MB  CACHED_MB
-18:44:08       66       66        0       64           88       4427
-18:44:09       40       40        0       75           88       4427
-18:44:10     4353       45     4308      203           88       4427
-18:44:11       84       77        7       13           88       4428
-18:44:12     2511       14     2497       14           88       4428
-18:44:13      101       98        3       18           88       4428
-18:44:14    16741        0    16741       58           88       4428
-18:44:15     1935       36     1899       18           88       4428
-18:44:16       89       34       55       18           88       4428
-
-
-

In this case there are not too many page cache MISSES which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node’s “hot” dataset. If you don’t have enough cache, MISSES will -be high and performance will be slow. If you have enough cache, MISSES will -be low and performance will be fast (as almost all reads are being served out -of memory).

-

You can also measure disk latency distributions using biolatency -(biolatency examples) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:

-
$ sudo /usr/share/bcc/tools/biolatency -D 10
-Tracing block device I/O... Hit Ctrl-C to end.
-
-
-disk = 'sda'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 12       |****************************************|
-        32 -> 63         : 9        |******************************          |
-        64 -> 127        : 1        |***                                     |
-       128 -> 255        : 3        |**********                              |
-       256 -> 511        : 7        |***********************                 |
-       512 -> 1023       : 2        |******                                  |
-
-disk = 'sdc'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 0        |                                        |
-        32 -> 63         : 0        |                                        |
-        64 -> 127        : 41       |************                            |
-       128 -> 255        : 17       |*****                                   |
-       256 -> 511        : 13       |***                                     |
-       512 -> 1023       : 2        |                                        |
-      1024 -> 2047       : 0        |                                        |
-      2048 -> 4095       : 0        |                                        |
-      4096 -> 8191       : 56       |*****************                       |
-      8192 -> 16383      : 131      |****************************************|
-     16384 -> 32767      : 9        |**                                      |
-
-
-

In this case most ios on the data drive (sdc) are fast, but many take -between 8 and 16 milliseconds.

-

Finally biosnoop (examples) -can be used to dive even deeper and see per IO latencies:

-
$ sudo /usr/share/bcc/tools/biosnoop | grep java | head
-0.000000000    java           17427  sdc     R  3972458600 4096      13.58
-0.000818000    java           17427  sdc     R  3972459408 4096       0.35
-0.007098000    java           17416  sdc     R  3972401824 4096       5.81
-0.007896000    java           17416  sdc     R  3972489960 4096       0.34
-0.008920000    java           17416  sdc     R  3972489896 4096       0.34
-0.009487000    java           17427  sdc     R  3972401880 4096       0.32
-0.010238000    java           17416  sdc     R  3972488368 4096       0.37
-0.010596000    java           17427  sdc     R  3972488376 4096       0.34
-0.011236000    java           17410  sdc     R  3972488424 4096       0.32
-0.011825000    java           17427  sdc     R  3972488576 16384      0.65
-... time passes
-8.032687000    java           18279  sdc     R  10899712  122880     3.01
-8.033175000    java           18279  sdc     R  10899952  8192       0.46
-8.073295000    java           18279  sdc     R  23384320  122880     3.01
-8.073768000    java           18279  sdc     R  23384560  8192       0.46
-
-
-

With biosnoop you see every single IO and how long they take. This data -can be used to construct the latency distributions in biolatency but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (128kb) of read_ahead_kb. To improve point read -performance you may may want to decrease read_ahead_kb on fast data volumes -such as SSDs while keeping the a higher value like 128kb value is probably -right for HDs. There are tradeoffs involved, see queue-sysfs docs for more -information, but regardless biosnoop is useful for understanding how -Cassandra uses drives.

-
-
-

vmtouch

-

Sometimes it’s useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -vmtouch.

-

First install it:

-
$ git clone https://github.com/hoytech/vmtouch.git
-$ cd vmtouch
-$ make
-
-
-

Then run it on the Cassandra data directory:

-
$ ./vmtouch /var/lib/cassandra/data/
-           Files: 312
-     Directories: 92
-  Resident Pages: 62503/64308  244M/251M  97.2%
-         Elapsed: 0.005657 seconds
-
-
-

In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn’t really matter unless reads are missing the -cache (per e.g. cachestat), in which case having -additional memory may help read performance.

-
-
-

CPU Flamegraphs

-

Cassandra often uses a lot of CPU, but telling what it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -CPU Flamegraphs -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a “compaction problem dropping -tombstones” or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -Java Flamegraphs.

-

Generally:

-
    -
  1. Enable the -XX:+PreserveFramePointer option in Cassandra’s -jvm.options configuation file. This has a negligible performance impact -but allows you actually see what Cassandra is doing.
  2. -
  3. Run perf to get some data.
  4. -
  5. Send that data through the relevant scripts in the FlameGraph toolset and -convert the data into a pretty flamegraph. View the resulting SVG image in -a browser or other image browser.
  6. -
-

For example just cloning straight off github we first install the -perf-map-agent to the location of our JVMs (assumed to be -/usr/lib/jvm):

-
$ sudo bash
-$ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
-$ cd /usr/lib/jvm
-$ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent
-$ cd perf-map-agent
-$ cmake .
-$ make
-
-
-

Now to get a flamegraph:

-
$ git clone --depth=1 https://github.com/brendangregg/FlameGraph
-$ sudo bash
-$ cd FlameGraph
-$ # Record traces of Cassandra and map symbols for all java processes
-$ perf record -F 49 -a -g -p <CASSANDRA PID> -- sleep 30; ./jmaps
-$ # Translate the data
-$ perf script > cassandra_stacks
-$ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \
-    ./flamegraph.pl --color=java --hash > cassandra_flames.svg
-
-
-

The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser.

-
-
-

Packet Capture

-

Sometimes you have to understand what queries a Cassandra node is performing -right now to troubleshoot an issue. For these times trusty packet capture -tools like tcpdump and Wireshark can be very helpful to dissect packet captures. -Wireshark even has native CQL support although it sometimes has -compatibility issues with newer Cassandra protocol releases.

-

To get a packet capture first capture some packets:

-
$ sudo tcpdump -U -s0 -i <INTERFACE> -w cassandra.pcap -n "tcp port 9042"
-
-
-

Now open it up with wireshark:

-
$ wireshark cassandra.pcap
-
-
-

If you don’t see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> Decode as -> select CQL from the -dropdown for port 9042.

-

If you don’t want to do this manually or use a GUI, you can also use something -like cqltrace to ease obtaining and -parsing CQL packet captures.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/.buildinfo b/src/doc/4.0-alpha2/.buildinfo deleted file mode 100644 index b6680cb71..000000000 --- a/src/doc/4.0-alpha2/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: f049887e2a20253c713dd3548147c786 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/4.0-alpha2/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml b/src/doc/4.0-alpha2/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml deleted file mode 100644 index fc5db0814..000000000 --- a/src/doc/4.0-alpha2/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Keyspace Name -keyspace: stresscql - -# The CQL for creating a keyspace (optional if it already exists) -# Would almost always be network topology unless running something locall -keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -# Table name -table: blogposts - -# The CQL for creating a table you wish to stress (optional if it already exists) -table_definition: | - CREATE TABLE blogposts ( - domain text, - published_date timeuuid, - url text, - author text, - title text, - body text, - PRIMARY KEY(domain, published_date) - ) WITH CLUSTERING ORDER BY (published_date DESC) - AND compaction = { 'class':'LeveledCompactionStrategy' } - AND comment='A table to hold blog posts' - -### Column Distribution Specifications ### - -columnspec: - - name: domain - size: gaussian(5..100) #domain names are relatively short - population: uniform(1..10M) #10M possible domains to pick from - - - name: published_date - cluster: fixed(1000) #under each domain we will have max 1000 posts - - - name: url - size: uniform(30..300) - - - name: title #titles shouldn't go beyond 200 chars - size: gaussian(10..200) - - - name: author - size: uniform(5..20) #author names should be short - - - name: body - size: gaussian(100..5000) #the body of the blog post can be long - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # Our partition key is the domain so only insert one per batch - - select: fixed(1)/1000 # We have 1000 posts per domain so 1/1000 will allow 1 post per batch - - batchtype: UNLOGGED # Unlogged batches - - -# -# A list of queries you wish to run against the schema -# -queries: - singlepost: - cql: select * from blogposts where domain = ? LIMIT 1 - fields: samerow - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow diff --git a/src/doc/4.0-alpha2/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml b/src/doc/4.0-alpha2/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml deleted file mode 100644 index 17161af27..000000000 --- a/src/doc/4.0-alpha2/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml +++ /dev/null @@ -1,44 +0,0 @@ -spacenam: example # idenitifier for this spec if running with multiple yaml files -keyspace: example - -# Would almost always be network topology unless running something locally -keyspace_definition: | - CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -table: staff_activities - -# The table under test. Start with a partition per staff member -# Is this a good idea? -table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when) - ) - -columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -insert: - # we only update a single partition in any given insert - partitions: fixed(1) - # we want to insert a single row per partition and we have between 20 and 500 - # rows per partition - select: fixed(1)/500 - batchtype: UNLOGGED # Single partition unlogged batches are essentially noops - -queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - diff --git a/src/doc/4.0-alpha2/_images/docs_commit.png b/src/doc/4.0-alpha2/_images/docs_commit.png deleted file mode 100644 index d90d96a88..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_commit.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_create_branch.png b/src/doc/4.0-alpha2/_images/docs_create_branch.png deleted file mode 100644 index a04cb54f3..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_create_branch.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_create_file.png b/src/doc/4.0-alpha2/_images/docs_create_file.png deleted file mode 100644 index b51e37035..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_create_file.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_editor.png b/src/doc/4.0-alpha2/_images/docs_editor.png deleted file mode 100644 index 5b9997bcc..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_editor.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_fork.png b/src/doc/4.0-alpha2/_images/docs_fork.png deleted file mode 100644 index 20a592a98..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_fork.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_pr.png b/src/doc/4.0-alpha2/_images/docs_pr.png deleted file mode 100644 index 211eb25ef..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_pr.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/docs_preview.png b/src/doc/4.0-alpha2/_images/docs_preview.png deleted file mode 100644 index 207f0ac43..000000000 Binary files a/src/doc/4.0-alpha2/_images/docs_preview.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug0.png b/src/doc/4.0-alpha2/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug1.png b/src/doc/4.0-alpha2/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug2.png b/src/doc/4.0-alpha2/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug3.png b/src/doc/4.0-alpha2/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug4.png b/src/doc/4.0-alpha2/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug5.png b/src/doc/4.0-alpha2/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/eclipse_debug6.png b/src/doc/4.0-alpha2/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/4.0-alpha2/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_images/example-stress-graph.png b/src/doc/4.0-alpha2/_images/example-stress-graph.png deleted file mode 100644 index a65b08b16..000000000 Binary files a/src/doc/4.0-alpha2/_images/example-stress-graph.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_sources/architecture/dynamo.rst.txt b/src/doc/4.0-alpha2/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index 12c586e2c..000000000 --- a/src/doc/4.0-alpha2/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,164 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -.. _transient-replication: - -Transient Replication -~~~~~~~~~~~~~~~~~~~~~ - -Transient replication allows you to configure a subset of replicas to only replicate data that hasn't been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn't been incrementally repaired. - -To use transient replication, you first need to enable it in ``cassandra.yaml``. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as ``/ RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/4.0-alpha2/_sources/architecture/guarantees.rst.txt b/src/doc/4.0-alpha2/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/4.0-alpha2/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha2/_sources/architecture/index.rst.txt b/src/doc/4.0-alpha2/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/4.0-alpha2/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/4.0-alpha2/_sources/architecture/overview.rst.txt b/src/doc/4.0-alpha2/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/4.0-alpha2/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha2/_sources/architecture/storage_engine.rst.txt b/src/doc/4.0-alpha2/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index 23b738de7..000000000 --- a/src/doc/4.0-alpha2/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables. - -All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the "commitlog_segment_size_in_mb" option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running "nodetool drain" before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup. - -- ``commitlog_segment_size_in_mb``: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. - -***NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*** - -*Default Value:* 32 - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied. - -- ``commitlog_sync``: may be either “periodic” or “batch.” - - - ``batch``: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait "commitlog_sync_batch_window_in_ms" milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason. - - - ``commitlog_sync_batch_window_in_ms``: Time to wait between "batch" fsyncs - *Default Value:* 2 - - - ``periodic``: In periodic mode, writes are immediately ack'ed, and the CommitLog is simply synced every "commitlog_sync_period_in_ms" milliseconds. - - - ``commitlog_sync_period_in_ms``: Time to wait between "periodic" fsyncs - *Default Value:* 10000 - -*Default Value:* batch - -*** NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using "batch" mode, it is recommended to store commitlogs in a separate, dedicated device.** - - -- ``commitlog_directory``: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -- ``commitlog_compression``: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported. - -(Default Value: (complex option):: - - # - class_name: LZ4Compressor - # parameters: - # - - -- ``commitlog_total_space_in_mb``: Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume. - -*Default Value:* 8192 - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. - -SSTable Versions -^^^^^^^^^^^^^^^^ - -This section was created using the following -`gist `_ -which utilized this original -`source `_. - -The version numbers, to date are: - -Version 0 -~~~~~~~~~ - -* b (0.7.0): added version to sstable filenames -* c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings -* d (0.7.0): row size in data component becomes a long instead of int -* e (0.7.0): stores undecorated keys in data and index components -* f (0.7.0): switched bloom filter implementations in data component -* g (0.8): tracks flushed-at context in metadata component - -Version 1 -~~~~~~~~~ - -* h (1.0): tracks max client timestamp in metadata component -* hb (1.0.3): records compression ration in metadata component -* hc (1.0.4): records partitioner in metadata component -* hd (1.0.10): includes row tombstones in maxtimestamp -* he (1.1.3): includes ancestors generation in metadata component -* hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) -* ia (1.2.0): - - * column indexes are promoted to the index file - * records estimated histogram of deletion times in tombstones - * bloom filter (keys and columns) upgraded to Murmur3 -* ib (1.2.1): tracks min client timestamp in metadata component -* ic (1.2.5): omits per-row bloom filter of column names - -Version 2 -~~~~~~~~~ - -* ja (2.0.0): - - * super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format) - * tracks max local deletiontime in sstable metadata - * records bloom_filter_fp_chance in metadata component - * remove data size and column count from data file (CASSANDRA-4180) - * tracks max/min column values (according to comparator) -* jb (2.0.1): - - * switch from crc32 to adler32 for compression checksums - * checksum the compressed data -* ka (2.1.0): - - * new Statistics.db file format - * index summaries can be downsampled and the sampling level is persisted - * switch uncompressed checksums to adler32 - * tracks presense of legacy (local and remote) counter shards -* la (2.2.0): new file name format -* lb (2.2.7): commit log lower bound included - -Version 3 -~~~~~~~~~ - -* ma (3.0.0): - - * swap bf hash order - * store rows natively -* mb (3.0.7, 3.7): commit log lower bound included -* mc (3.0.8, 3.9): commit log intervals included - -Example Code -~~~~~~~~~~~~ - -The following example is useful for finding all sstables that do not match the "ib" SSTable version - -.. code-block:: bash - - find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots" diff --git a/src/doc/4.0-alpha2/_sources/bugs.rst.txt b/src/doc/4.0-alpha2/_sources/bugs.rst.txt deleted file mode 100644 index 32d676f9d..000000000 --- a/src/doc/4.0-alpha2/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs -============== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``cassandra`` :ref:`Slack room `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/4.0-alpha2/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/4.0-alpha2/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index 09a126596..000000000 --- a/src/doc/4.0-alpha2/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,2059 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -Replica factor is determined via the replication strategy used by the specified -keyspace. - -*Default Value:* KEYSPACE - -``allocate_tokens_for_local_replication_factor`` ------------------------------------------------- -*This option is commented out by default.* - -Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS. - -*Default Value:* 3 - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``network_authorizer`` ----------------------- - -Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}. - -- AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization. -- CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllNetworkAuthorizer - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using. - -The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down the node and kill the JVM, so the node can be replaced. - -stop - shut down the node, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic", "group", or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed. - - -*Default Value:* 2 - -``commitlog_sync_group_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes. - - -*Default Value:* 1000 - -``commitlog_sync`` ------------------- - -the default option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``periodic_commitlog_sync_lag_block_in_ms`` -------------------------------------------- -*This option is commented out by default.* - -When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete. - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1:7000" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_space_in_mb`` ------------------------------- -*This option is commented out by default.* - -Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_frame_block_size_in_kb`` -------------------------------------------- -*This option is commented out by default.* - -If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed. - -*Default Value:* 32 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_allow_older_protocols`` ------------------------------------------- - -Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored. - -*Default Value:* true - -``native_transport_idle_timeout_in_ms`` ---------------------------------------- -*This option is commented out by default.* - -Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period. - -Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side. - -Idle connection timeouts are disabled by default. - -*Default Value:* 60000 - -``rpc_address`` ---------------- - -The address or interface to bind the native transport server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``concurrent_validations`` --------------------------- -*This option is commented out by default.* - -Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default) - -*Default Value:* 0 - -``concurrent_materialized_view_builders`` ------------------------------------------ - -Number of simultaneous materialized view builder tasks to allow. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_entire_sstables`` --------------------------- -*This option is commented out by default.* - -When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696. - -*Default Value:* true - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms. - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms. - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``internode_application_send_queue_capacity_in_bytes`` ------------------------------------------------------- -*This option is commented out by default.* - -Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details. - -The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000 - -The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000 - -The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000 - -Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received. - -The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth. - -The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster. - - -*Default Value:* 4194304 #4MiB - -``internode_application_send_queue_reserve_endpoint_capacity_in_bytes`` ------------------------------------------------------------------------ -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_send_queue_reserve_global_capacity_in_bytes`` ---------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``internode_application_receive_queue_capacity_in_bytes`` ---------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 4194304 #4MiB - -``internode_application_receive_queue_reserve_endpoint_capacity_in_bytes`` --------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_receive_queue_reserve_global_capacity_in_bytes`` ------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``slow_query_log_timeout_in_ms`` --------------------------------- - - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``streaming_connections_per_host`` ----------------------------------- -*This option is commented out by default.* - -Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files). - -*Default Value:* 1 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html - -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - # set to true for allowing secure incoming connections - enabled: false - # If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port - optional: false - # if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used - # during upgrade to 4.0; otherwise, set to false. - enable_legacy_ssl_storage_port: false - # on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true. - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client-to-server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``gc_warn_threshold_in_ms`` ---------------------------- -*This option is commented out by default.* - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature. - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``ideal_consistency_level`` ---------------------------- -*This option is commented out by default.* - -Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability. - -*Default Value:* EACH_QUORUM - -``full_query_log_dir`` ----------------------- -*This option is commented out by default.* - -Path to write full query log data to when the full query log is enabled -The full query log will recrusively delete the contents of this path at -times. Don't place links in this directory to other parts of the filesystem. - -*Default Value:* /tmp/cassandrafullquerylog - -``automatic_sstable_upgrade`` ------------------------------ -*This option is commented out by default.* - -Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version - -*Default Value:* false - -``max_concurrent_automatic_sstable_upgrades`` ---------------------------------------------- -*This option is commented out by default.* -Limit the number of concurrent sstable upgrades - -*Default Value:* 1 - -``audit_logging_options`` -------------------------- - -Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options. - -``full_query_logging_options`` ------------------------------- -*This option is commented out by default.* - - -default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog - -``corrupted_tombstone_strategy`` --------------------------------- -*This option is commented out by default.* - -validate tombstones on reads and compaction -can be either "disabled", "warn" or "exception" - -*Default Value:* disabled - -``diagnostic_events_enabled`` ------------------------------ - -Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX. - -*Default Value:* false - -``native_transport_flush_in_batches_legacy`` --------------------------------------------- -*This option is commented out by default.* - -Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating. - -*Default Value:* false - -``repaired_data_tracking_for_range_reads_enabled`` --------------------------------------------------- - -Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads - -*Default Value:* false - -``repaired_data_tracking_for_partition_reads_enabled`` ------------------------------------------------------- - -*Default Value:* false - -``report_unconfirmed_repaired_data_mismatches`` ------------------------------------------------ -If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_transient_replication`` --------------------------------- - -Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use. - -*Default Value:* false diff --git a/src/doc/4.0-alpha2/_sources/configuration/index.rst.txt b/src/doc/4.0-alpha2/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/4.0-alpha2/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/4.0-alpha2/_sources/contactus.rst.txt b/src/doc/4.0-alpha2/_sources/contactus.rst.txt deleted file mode 100644 index 3ed9004dd..000000000 --- a/src/doc/4.0-alpha2/_sources/contactus.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or :ref:`Slack rooms `. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _slack: - -Slack ------ -To chat with developers or users in real-time, join our rooms on `ASF Slack `__: - -- ``cassandra`` - for user questions and general discussions. -- ``cassandra-dev`` - strictly for questions or discussions related to Cassandra development. - diff --git a/src/doc/4.0-alpha2/_sources/cql/appendices.rst.txt b/src/doc/4.0-alpha2/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/4.0-alpha2/_sources/cql/changes.rst.txt b/src/doc/4.0-alpha2/_sources/cql/changes.rst.txt deleted file mode 100644 index 6691f156a..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,211 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.5 -^^^^^ - -- Adds support for arithmetic operators (:jira:`11935`) -- Adds support for ``+`` and ``-`` operations on dates (:jira:`11936`) -- Adds ``currentTimestamp``, ``currentDate``, ``currentTime`` and ``currentTimeUUID`` functions (:jira:`13132`) - - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/4.0-alpha2/_sources/cql/ddl.rst.txt b/src/doc/4.0-alpha2/_sources/cql/ddl.rst.txt deleted file mode 100644 index afb130e48..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,788 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -``SimpleStrategy`` -"""""""""""""""""" - -A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -``NetworkTopologyStrategy``. ``SimpleStrategy`` supports a single mandatory argument: - -========================= ====== ======= ============================================= -sub-option type since description -========================= ====== ======= ============================================= -``'replication_factor'`` int all The number of replicas to store per range -========================= ====== ======= ============================================= - -``NetworkTopologyStrategy`` -""""""""""""""""""""""""""" - -A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options: - -===================================== ====== ====== ============================================= -sub-option type since description -===================================== ====== ====== ============================================= -``''`` int all The number of replicas to store per range in - the provided datacenter. -``'replication_factor'`` int 4.0 The number of replicas to use as a default - per datacenter if not specifically provided. - Note that this always defers to existing - definitions or explicit datacenter settings. - For example, to have three replicas per - datacenter, supply this with a value of 3. -===================================== ====== ====== ============================================= - -Note that when ``ALTER`` ing keyspaces and supplying ``replication_factor``, -auto-expansion will only *add* new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying ``replication_factor``, -explicitly zero out the datacenter you want to have zero replicas. - -An example of auto-expanding datacenters with two datacenters: ``DC1`` and ``DC2``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true; - - -An example of auto-expanding and overriding a datacenter:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true; - -An example that excludes a datacenter while using ``replication_factor``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ; - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true; - -If :ref:`transient replication ` has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format ``'/'`` - -For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:: - - CREATE KEYSPACE some_keysopace - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'}; - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records'; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, b, c) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``speculative_retry`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``additional_write_policy`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``memtable_flush_period_in_ms``| *simple* | 0 | Time (in ms) before Cassandra flushes memtables to disk. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair`` | *simple* | BLOCKING | Sets read repair behavior (see below) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _speculative-retry-options: - -Speculative retry options -######################### - -By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ``ONE``, a quorum for ``QUORUM``, and so on. -``speculative_retry`` determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. ``additional_write_policy`` specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive): - -============================ ======================== ============================================================================= - Format Example Description -============================ ======================== ============================================================================= - ``XPERCENTILE`` 90.5PERCENTILE Coordinators record average per-table response times for all replicas. - If a replica takes longer than ``X`` percent of this table's average - response time, the coordinator queries an additional replica. - ``X`` must be between 0 and 100. - ``XP`` 90.5P Synonym for ``XPERCENTILE`` - ``Yms`` 25ms If a replica takes more than ``Y`` milliseconds to respond, - the coordinator queries an additional replica. - ``MIN(XPERCENTILE,YMS)`` MIN(99PERCENTILE,35MS) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is lower at the time of calculation. - Parameters are ``XPERCENTILE``, ``XP``, or ``Yms``. - This is helpful to help protect against a single slow instance; in the - happy case the 99th percentile is normally lower than the specified - fixed value however, a slow host may skew the percentile very high - meaning the slower the cluster gets, the higher the value of the percentile, - and the higher the calculated time used to determine if we should - speculate or not. This allows us to set an upper limit that we want to - speculate at, but avoid skewing the tail latencies by speculating at the - lower value when the percentile is less than the specified fixed upper bound. - ``MAX(XPERCENTILE,YMS)`` MAX(90.5P,25ms) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is higher at the time of calculation. - ``ALWAYS`` Coordinators always query all replicas. - ``NEVER`` Coordinators never query additional replicas. -============================ =================== ============================================================================= - -This setting does not affect reads with consistency level ``ALL`` because they already query all replicas. - -Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default ``99PERCENTILE``. - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Read Repair options -################### - -The ``read_repair`` options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back - in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of - replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. - Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement - is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it - is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a - batch, but then select a single row by specifying the clustering column in a SELECT statement. - -The available read repair settings are: - -Blocking -```````` -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity - -None -```` - -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table'; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/4.0-alpha2/_sources/cql/definitions.rst.txt b/src/doc/4.0-alpha2/_sources/cql/definitions.rst.txt deleted file mode 100644 index 3df6f2099..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,234 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `arithmetic_operation` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - arithmetic_operation: '-' `term` | `term` ('+' | '-' | '*' | '/' | '%') `term` - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- An arithmetic operation between terms. see :ref:`the section on arithmetic operations ` -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/4.0-alpha2/_sources/cql/dml.rst.txt b/src/doc/4.0-alpha2/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/4.0-alpha2/_sources/cql/functions.rst.txt b/src/doc/4.0-alpha2/_sources/cql/functions.rst.txt deleted file mode 100644 index 965125a79..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,581 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``currentTimeUUID`` is an alias of ``now``. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Datetime functions -`````````````````` - -Retrieving the current date/time -################################ - -The following functions can be used to retrieve the date/time at the time where the function is invoked: - -===================== =============== - Function name Output type -===================== =============== - ``currentTimestamp`` ``timestamp`` - ``currentDate`` ``date`` - ``currentTime`` ``time`` - ``currentTimeUUID`` ``timeUUID`` -===================== =============== - -For example the last 2 days of data can be retrieved using:: - - SELECT * FROM myTable WHERE date >= currentDate() - 2d - -Time conversion functions -######################### - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/4.0-alpha2/_sources/cql/index.rst.txt b/src/doc/4.0-alpha2/_sources/cql/index.rst.txt deleted file mode 100644 index b4c21cf6c..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - operators - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/4.0-alpha2/_sources/cql/indexes.rst.txt b/src/doc/4.0-alpha2/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/4.0-alpha2/_sources/cql/json.rst.txt b/src/doc/4.0-alpha2/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/4.0-alpha2/_sources/cql/mvs.rst.txt b/src/doc/4.0-alpha2/_sources/cql/mvs.rst.txt deleted file mode 100644 index 200090a60..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. note:: By default, materialized views are built in a single thread. The initial build can be parallelized by - increasing the number of threads specified by the property ``concurrent_materialized_view_builders`` in - ``cassandra.yaml``. This property can also be manipulated at runtime through both JMX and the - ``setconcurrentviewbuilders`` and ``getconcurrentviewbuilders`` nodetool commands. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. - -MV Limitations -``````````````` - -.. Note:: Removal of columns not selected in the Materialized View (via ``UPDATE base SET unselected_column = null`` or - ``DELETE unselected_column FROM base``) may shadow missed updates to other columns received by hints or repair. - For this reason, we advise against doing deletions on base columns not selected in views until this is - fixed on CASSANDRA-13826. diff --git a/src/doc/4.0-alpha2/_sources/cql/operators.rst.txt b/src/doc/4.0-alpha2/_sources/cql/operators.rst.txt deleted file mode 100644 index 1faf0d045..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/operators.rst.txt +++ /dev/null @@ -1,74 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _arithmetic_operators: - -Arithmetic Operators --------------------- - -CQL supports the following operators: - -=============== ======================================================================================================= - Operator Description -=============== ======================================================================================================= - \- (unary) Negates operand - \+ Addition - \- Substraction - \* Multiplication - / Division - % Returns the remainder of a division -=============== ======================================================================================================= - -.. _number-arithmetic: - -Number Arithmetic -^^^^^^^^^^^^^^^^^ - -All arithmetic operations are supported on numeric types or counters. - -The return type of the operation will be based on the operand types: - -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - left/right tinyint smallint int bigint counter float double varint decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - **tinyint** tinyint smallint int bigint bigint float double varint decimal - **smallint** smallint smallint int bigint bigint float double varint decimal - **int** int int int bigint bigint float double varint decimal - **bigint** bigint bigint bigint bigint bigint double double varint decimal - **counter** bigint bigint bigint bigint bigint double double varint decimal - **float** float float float double double float double decimal decimal - **double** double double double double double double double decimal decimal - **varint** varint varint varint decimal decimal decimal decimal decimal decimal - **decimal** decimal decimal decimal decimal decimal decimal decimal decimal decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - -``*``, ``/`` and ``%`` operators have a higher precedence level than ``+`` and ``-`` operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression. - -.. _datetime--arithmetic: - -Datetime Arithmetic -^^^^^^^^^^^^^^^^^^^ - -A ``duration`` can be added (+) or substracted (-) from a ``timestamp`` or a ``date`` to create a new -``timestamp`` or ``date``. So for instance:: - - SELECT * FROM myTable WHERE t = '2017-01-01' - 2d - -will select all the records with a value of ``t`` which is in the last 2 days of 2016. diff --git a/src/doc/4.0-alpha2/_sources/cql/security.rst.txt b/src/doc/4.0-alpha2/_sources/cql/security.rst.txt deleted file mode 100644 index 429a1ef0d..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/security.rst.txt +++ /dev/null @@ -1,538 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - :| ACCESS TO DATACENTERS `set_literal` - :| ACCESS TO ALL DATACENTERS - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'}; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ``ACCESS TO ALL DATACENTERS`` can be used for -explicitness, but there's no functional difference. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ``ACCESS TO ALL DATACENTERS`` clause. - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. note:: DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain - connected and will retain the ability to perform any database actions which do not require :ref:`authorization`. - However, if authorization is enabled, :ref:`permissions` of the dropped role are also revoked, - subject to the :ref:`caching options` configured in :ref:`cassandra.yaml`. - Should a dropped role be subsequently recreated and have new :ref:`permissions` or - :ref:`roles` granted to it, any client sessions still connected will acquire the newly granted - permissions and roles. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -Because of their function in normal driver operations, certain tables cannot have their `SELECT` permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:: - -* `system_schema.keyspaces` -* `system_schema.columns` -* `system_schema.tables` -* `system.local` -* `system.peers` - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/4.0-alpha2/_sources/cql/triggers.rst.txt b/src/doc/4.0-alpha2/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/4.0-alpha2/_sources/cql/types.rst.txt b/src/doc/4.0-alpha2/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/4.0-alpha2/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/4.0-alpha2/_sources/data_modeling/index.rst.txt b/src/doc/4.0-alpha2/_sources/data_modeling/index.rst.txt deleted file mode 100644 index dde031a19..000000000 --- a/src/doc/4.0-alpha2/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -============= - -.. todo:: TODO diff --git a/src/doc/4.0-alpha2/_sources/development/ci.rst.txt b/src/doc/4.0-alpha2/_sources/development/ci.rst.txt deleted file mode 100644 index 192b18862..000000000 --- a/src/doc/4.0-alpha2/_sources/development/ci.rst.txt +++ /dev/null @@ -1,72 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Jenkins CI Environment -********************** - -About CI testing and Apache Cassandra -===================================== - -Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the `dtest `_ scripts written in Python. As outlined in :doc:`testing`, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at `builds.apache.org `_, running `Jenkins `_. - - - -Setting up your own Jenkins server -================================== - -Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution. - -Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment. - -Required plugins ----------------- - -The following plugins need to be installed additionally to the standard plugins (git, ant, ..). - -You can install any missing plugins through the install manager. - -Go to ``Manage Jenkins -> Manage Plugins -> Available`` and install the following plugins and respective dependencies: - -* Job DSL -* Javadoc Plugin -* description setter plugin -* Throttle Concurrent Builds Plug-in -* Test stability history -* Hudson Post build task - - -Setup seed job --------------- - -Config ``New Item`` - -* Name it ``Cassandra-Job-DSL`` -* Select ``Freestyle project`` - -Under ``Source Code Management`` select Git using the repository: ``https://github.com/apache/cassandra-builds`` - -Under ``Build``, confirm ``Add build step`` -> ``Process Job DSLs`` and enter at ``Look on Filesystem``: ``jenkins-dsl/cassandra_job_dsl_seed.groovy`` - -Generated jobs will be created based on the Groovy script's default settings. You may want to override settings by checking ``This project is parameterized`` and add ``String Parameter`` for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches). - -**When done, confirm "Save"** - -You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message `"Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use"`. Goto ``Manage Jenkins`` -> ``In-process Script Approval`` to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates. - -Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label "cassandra", once the job is to be run. Please make sure to make any executors available by selecting ``Build Executor Status`` -> ``Configure`` -> Add "``cassandra``" as label and save. - - - diff --git a/src/doc/4.0-alpha2/_sources/development/code_style.rst.txt b/src/doc/4.0-alpha2/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/4.0-alpha2/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/4.0-alpha2/_sources/development/dependencies.rst.txt b/src/doc/4.0-alpha2/_sources/development/dependencies.rst.txt deleted file mode 100644 index 7d230d3ae..000000000 --- a/src/doc/4.0-alpha2/_sources/development/dependencies.rst.txt +++ /dev/null @@ -1,54 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dependency Management -********************* - -Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the :doc:`ci` and reported related issues on Jira/ML, in case of any project dependency changes. - -As Cassandra is an Apache product, all included libraries must follow Apache's `software license requirements `_. - -Required steps to add or update libraries -========================================= - -* Add or replace jar file in ``lib`` directory -* Add or update ``lib/license`` files -* Update dependencies in ``build.xml`` - - * Add to ``parent-pom`` with correct version - * Add to ``all-pom`` if simple Cassandra dependency (see below) - - -POM file types -============== - -* **parent-pom** - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here. -* **build-deps-pom(-sources)** + **coverage-deps-pom** - used by ``ant build`` compile target. Listed dependenices will be resolved and copied to ``build/lib/{jar,sources}`` by executing the ``maven-ant-tasks-retrieve-build`` target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution. -* **test-deps-pom** - refered by ``maven-ant-tasks-retrieve-test`` to retrieve and save dependencies to ``build/test/lib``. Exclusively used during JUnit test execution. -* **all-pom** - pom for `cassandra-all.jar `_ that can be installed or deployed to public maven repos via ``ant publish`` -* **dist-pom** - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ``ant artifacts``. Should be left as is, but needed for installing or deploying releases. - - -Troubleshooting and conflict resolution -======================================= - -Here are some useful commands that may help you out resolving conflicts. - -* ``ant realclean`` - gets rid of the build directory, including build artifacts. -* ``mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j`` - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ``ant mvn-install``. -* ``rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/`` - removes cached local Cassandra maven artifacts - - diff --git a/src/doc/4.0-alpha2/_sources/development/documentation.rst.txt b/src/doc/4.0-alpha2/_sources/development/documentation.rst.txt deleted file mode 100644 index 8b7cd4e4e..000000000 --- a/src/doc/4.0-alpha2/_sources/development/documentation.rst.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -Working on Documentation -************************* - -How Cassandra is documented -=========================== - -The official Cassandra documentation lives in the project's git repository. We use a static site generator, `Sphinx `_, to create pages hosted at `cassandra.apache.org `_. You'll also find developer centric content about Cassandra internals in our retired `wiki `_ (not covered by this guide). - -Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses `reStructuredText `_ for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at `existing documents <..>`_ to get a better idea how we use reStructuredText to write our documents. - -So how do you actually start making contributions? - -GitHub based work flow -====================== - -*Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)* - -Follow these steps to contribute using GitHub. It's assumed that you're logged in with an existing account. - -1. Fork the GitHub mirror of the `Cassandra repository `_ - -.. image:: images/docs_fork.png - -2. Create a new branch that you can use to make your edits. It's recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work. - -.. image:: images/docs_create_branch.png - -3. Navigate to document sources ``doc/source`` to find the ``.rst`` file to edit. The URL of the document should correspond to the directory structure. New files can be created using the "Create new file" button: - -.. image:: images/docs_create_file.png - -4. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing ``.rst`` files to get a better idea what format elements to use. - -.. image:: images/docs_editor.png - -Make sure to preview added content before committing any changes. - -.. image:: images/docs_preview.png - -5. Commit your work when you're done. Make sure to add a short description of all your edits since the last time you committed before. - -.. image:: images/docs_commit.png - -6. Finally if you decide that you're done working on your branch, it's time to create a pull request! - -.. image:: images/docs_pr.png - -Afterwards the GitHub Cassandra mirror will list your pull request and you're done. Congratulations! Please give us some time to look at your suggested changes before we get back to you. - - -Jira based work flow -==================== - -*Recommended for major changes* - -Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same `contribution guides `_ as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed. - -Working on documents locally using Sphinx -========================================= - -*Recommended for advanced editing* - -Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at ``doc/README.md``. Setup is very easy (at least on OSX and Linux). - -Notes for committers -==================== - -Please feel free to get involved and merge pull requests created on the GitHub mirror if you're a committer. As this is a read-only repository, you won't be able to merge a PR directly on GitHub. You'll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub. - -You may use a git work flow like this:: - - git remote add github https://github.com/apache/cassandra.git - git fetch github pull//head: - git checkout - -Now either rebase or squash the commit, e.g. for squashing:: - - git reset --soft origin/trunk - git commit --author - -Make sure to add a proper commit message including a "Closes #" text to automatically close the PR. - -Publishing ----------- - -Details for building and publishing of the site at cassandra.apache.org can be found `here `_. - diff --git a/src/doc/4.0-alpha2/_sources/development/gettingstarted.rst.txt b/src/doc/4.0-alpha2/_sources/development/gettingstarted.rst.txt deleted file mode 100644 index c2f5ef36e..000000000 --- a/src/doc/4.0-alpha2/_sources/development/gettingstarted.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _gettingstarted: - -Getting Started -************************* - -Initial Contributions -======================== - -Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we'd suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work. - * Add to or update the documentation - * Answer questions on the user list - * Review and test a submitted patch - * Investigate and fix a reported bug - * Create unit tests and d-tests - -Updating documentation -======================== - -The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (:ref:`patches`). - -Answering questions on the user list -==================================== - -Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the `community `_ page for details on how to subscribe to the mailing list. - -Reviewing and testing a submitted patch -======================================= - -Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in :ref:`_development_how_to_review` or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, "I tested this performance enhacement on our application's standard production load test and found a 3% improvement.") - -Investigate and/or fix a reported bug -===================================== - -Often, the hardest work in fixing a bug is reproducing it. Even if you don't have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (:ref:`patches`). - -Create unit tests and Dtests -============================ - -Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See :ref:`testing` and :ref:`patches` for more detail. - - - diff --git a/src/doc/4.0-alpha2/_sources/development/how_to_commit.rst.txt b/src/doc/4.0-alpha2/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index dff39832d..000000000 --- a/src/doc/4.0-alpha2/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``-atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/4.0-alpha2/_sources/development/how_to_review.rst.txt b/src/doc/4.0-alpha2/_sources/development/how_to_review.rst.txt deleted file mode 100644 index 4778b6946..000000000 --- a/src/doc/4.0-alpha2/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _how_to_review: - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/4.0-alpha2/_sources/development/ide.rst.txt b/src/doc/4.0-alpha2/_sources/development/ide.rst.txt deleted file mode 100644 index 97c73ae61..000000000 --- a/src/doc/4.0-alpha2/_sources/development/ide.rst.txt +++ /dev/null @@ -1,185 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -| - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -| - -Opening Cassandra in Apache NetBeans -======================================= - -`Apache NetBeans `_ is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans. - -Open Cassandra as a Project (C* 4.0 and newer) ------------------------------------------------ - -Please clone and build Cassandra as described above and execute the following steps: - -1. Start Apache NetBeans - -2. Open the NetBeans project from the `ide/` folder of the checked out Cassandra directory using the menu item "Open Project…" in NetBeans' File menu - -The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant `build.xml` script. - - * Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu. - * Profile Project is available via the Profile menu. In the opened Profiler tab, click the green "Profile" button. - * Cassandra's code style is honored in `ide/nbproject/project.properties` - -The `JAVA8_HOME` system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute. - -| - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/4.0-alpha2/_sources/development/index.rst.txt b/src/doc/4.0-alpha2/_sources/development/index.rst.txt deleted file mode 100644 index ffa7134dd..000000000 --- a/src/doc/4.0-alpha2/_sources/development/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contributing to Cassandra -************************* - -.. toctree:: - :maxdepth: 2 - - gettingstarted - ide - testing - patches - code_style - how_to_review - how_to_commit - documentation - ci - dependencies - release_process diff --git a/src/doc/4.0-alpha2/_sources/development/patches.rst.txt b/src/doc/4.0-alpha2/_sources/development/patches.rst.txt deleted file mode 100644 index f3a2cca0f..000000000 --- a/src/doc/4.0-alpha2/_sources/development/patches.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _patches: - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue marked as `Low Hanging Fruit `_ Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or :ref:`Slack `. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -4.0 Code freeze (see below) -3.11 Critical bug fixes only -3.0 Critical bug fixes only -2.2 Critical bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -4.0 Code Freeze -""""""""""""""" - -Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance: - - * Bug fixes - * Measurable performance improvements - * Changes not distributed as part of the release such as: - * Testing related improvements and fixes - * Build and infrastructure related changes - * Documentation - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. - - :: - - - - patch by ; reviewed by for CASSANDRA-##### - - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/4.0-alpha2/_sources/development/release_process.rst.txt b/src/doc/4.0-alpha2/_sources/development/release_process.rst.txt deleted file mode 100644 index 0ab6dff1a..000000000 --- a/src/doc/4.0-alpha2/_sources/development/release_process.rst.txt +++ /dev/null @@ -1,268 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. release_process: - -Release Process -*************** - -.. contents:: :depth: 3 - -|  -| - -.. attention:: - - WORK IN PROGRESS - * A number of these steps still have been finalised/tested. - * The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org - - -The steps for Release Managers to create, vote and publish releases for Apache Cassandra. - -While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release. - - -Prerequisites -============= - -Background docs - * `ASF Release Policy `_ - * `ASF Release Distribution Policy `_ - * `ASF Release Best Practices `_ - - -A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools. - - -Create and publish your GPG key -------------------------------- - -To create a GPG key, follow the `guidelines `_. -Include your public key in:: - - https://dist.apache.org/repos/dist/release/cassandra/KEYS - - -Publish your GPG key in a PGP key server, such as `MIT Keyserver `_. - - -Create Release Artifacts -======================== - -Any committer can perform the following steps to create and call a vote on a proposed release. - -Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there's security vulnerabilities currently being worked on in private. - -Perform the Release -------------------- - -Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:: - - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh` - - # After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout, - # on the branch/commit that we wish to tag for the tentative release along with version number to tag. - # For example here might be `3.11` and `3.11.3` - cd ~/git/cassandra/ - git checkout cassandra- - ../cassandra-builds/cassandra-release/prepare_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Call for a Vote". - -The ``prepare_release.sh`` script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:: - - cd ~/git/cassandra-build - docker build -f docker/centos7-image.docker docker/ - docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh -tentative - rpmsign --addsign dist/*.rpm - -For more information on the above steps see the `cassandra-builds documentation `_. -The next step is to copy and commit these binaries to staging svnpubsub:: - - # FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org - cd ~/git - svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev - mkdir cassandra-dist-dev/ - cp cassandra-build/dist/*.rpm cassandra-dist-dev// - - svn add cassandra-dist-dev/ - svn ci cassandra-dist-dev/ - -After committing the binaries to staging, increment the version number in Cassandra on the `cassandra-` - - cd ~/git/cassandra/ - git checkout cassandra- - edit build.xml # update ` ` - edit debian/changelog # add entry for new version - edit CHANGES.txt # add entry for new version - git commit -m "Update version to " build.xml debian/changelog CHANGES.txt - git push - -Call for a Vote -=============== - -Fill out the following email template and send to the dev mailing list:: - - I propose the following artifacts for release as . - - sha1: - - Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative - - Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// - - Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ - - The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ - - The vote will be open for 72 hours (longer if needed). - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative - - - -Post-vote operations -==================== - -Any PMC can perform the following steps to formalize and publish a successfully voted release. - -Publish Artifacts ------------------ - -Run the following commands to publish the voted release artifacts:: - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # edit the variables at the top of `finish_release.sh` - - # After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, - # on the tentative release tag that we wish to tag for the final release version number tag. - cd ~/git/cassandra/ - git checkout -tentative - ../cassandra-builds/cassandra-release/finish_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Send Release Announcement". -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout. - - -Promote Nexus Repository ------------------------- - - * Login to `Nexus repository `_ again. - * Click on "Staging" and then on the repository with id "cassandra-staging". - * Find your closed staging repository, right click on it and choose "Promote". - * Select the "Releases" repository and click "Promote". - * Next click on "Repositories", select the "Releases" repository and validate that your artifacts exist as you expect them. - -Sign and Upload Distribution Packages to Bintray ---------------------------------------- - -Run the following command:: - - cd ~/git - # FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org - svn mv https://dist.apache.org/repos/dist/dev/cassandra/ https://dist.apache.org/repos/dist/release/cassandra/ - - # Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on - svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat - cd cassandra-dist-redhat/x/ - createrepo . - gpg --detach-sign --armor repodata/repomd.xml - for f in `find repodata/ -name *.bz2`; do - gpg --detach-sign --armor $f; - done - - svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist- - cd cassandra-dist- - cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist- - - -Update and Publish Website --------------------------- - -See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate. - -Release version in JIRA ------------------------ - -Release the JIRA version. - - * In JIRA go to the version that you want to release and release it. - * Create a new version, if it has not been done before. - -Update to Next Development Version ----------------------------------- - -Edit and commit ``build.xml`` so the base.version property points to the next version. - -Wait for Artifacts to Sync --------------------------- - -Wait for the artifacts to sync at http://www.apache.org/dist/cassandra/ - -Send Release Announcement -------------------------- - -Fill out the following email template and send to both user and dev mailing lists:: - - The Cassandra team is pleased to announce the release of Apache Cassandra version . - - Apache Cassandra is a fully distributed database. It is the right choice - when you need scalability and high availability without compromising - performance. - - http://cassandra.apache.org/ - - Downloads of source and binary distributions are listed in our download - section: - - http://cassandra.apache.org/download/ - - This version is release[1] on the series. As always, - please pay attention to the release notes[2] and let us know[3] if you - were to encounter any problem. - - Enjoy! - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= - [3]: https://issues.apache.org/jira/browse/CASSANDRA - -Update Slack Cassandra topic ---------------------------- - -Update topic in ``cassandra`` :ref:`Slack room ` - /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don't ask to ask - -Tweet from @Cassandra ---------------------- - -Tweet the new release, from the @Cassandra account - -Delete Old Releases -------------------- - -As described in `When to Archive `_. -Also check people.apache.org as previous release scripts used it. diff --git a/src/doc/4.0-alpha2/_sources/development/testing.rst.txt b/src/doc/4.0-alpha2/_sources/development/testing.rst.txt deleted file mode 100644 index 7f38fe590..000000000 --- a/src/doc/4.0-alpha2/_sources/development/testing.rst.txt +++ /dev/null @@ -1,98 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _testing: - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -If you see an error like this:: - - Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: - org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader - AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] - -You will need to install the ant-optional package since it contains the ``JUnitTask`` class. - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -See :ref:`cassandra_stress` - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/4.0-alpha2/_sources/faq/index.rst.txt b/src/doc/4.0-alpha2/_sources/faq/index.rst.txt deleted file mode 100644 index acb7538d6..000000000 --- a/src/doc/4.0-alpha2/_sources/faq/index.rst.txt +++ /dev/null @@ -1,299 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair -full`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. Note that you will need to run a full repair (``-full``) to make sure that already repaired - sstables are not skipped. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/4.0-alpha2/_sources/getting_started/configuring.rst.txt b/src/doc/4.0-alpha2/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index e71eeedbe..000000000 --- a/src/doc/4.0-alpha2/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the default configuration file present at ``./conf/cassandra.yaml`` is enough, -you shouldn't need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/4.0-alpha2/_sources/getting_started/drivers.rst.txt b/src/doc/4.0-alpha2/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index 9a2c1567a..000000000 --- a/src/doc/4.0-alpha2/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ - -Perl -^^^^ - -- `Cassandra::Client and DBD::Cassandra `__ - -Elixir -^^^^^^ - -- `Xandra `__ -- `CQEx `__ - -Dart -^^^^ - -- `dart_cassandra_cql `__ diff --git a/src/doc/4.0-alpha2/_sources/getting_started/index.rst.txt b/src/doc/4.0-alpha2/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/4.0-alpha2/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/4.0-alpha2/_sources/getting_started/installing.rst.txt b/src/doc/4.0-alpha2/_sources/getting_started/installing.rst.txt deleted file mode 100644 index fb8a0463f..000000000 --- a/src/doc/4.0-alpha2/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xzvf apache-cassandra-3.6-bin.tar.gz - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/4.0-alpha2/_sources/getting_started/querying.rst.txt b/src/doc/4.0-alpha2/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/4.0-alpha2/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/4.0-alpha2/_sources/index.rst.txt b/src/doc/4.0-alpha2/_sources/index.rst.txt deleted file mode 100644 index 9f8016b9b..000000000 --- a/src/doc/4.0-alpha2/_sources/index.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - architecture/index - data_modeling/index - cql/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - plugins/index - - bugs - contactus diff --git a/src/doc/4.0-alpha2/_sources/operating/audit_logging.rst.txt b/src/doc/4.0-alpha2/_sources/operating/audit_logging.rst.txt deleted file mode 100644 index 068209ee8..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/audit_logging.rst.txt +++ /dev/null @@ -1,236 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - - - -Audit Logging ------------------- - -Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml. - -- ``BinAuditLogger`` An efficient way to log events to file in a binary format. -- ``FileAuditLogger`` Logs events to ``audit/audit.log`` file using slf4j logger. - -*Recommendation* ``BinAuditLogger`` is a community recommended logger considering the performance - -What does it capture -^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging captures following events - -- Successful as well as unsuccessful login attempts. - -- All database commands executed via Native protocol (CQL) attempted or successfully executed. - -Limitations -^^^^^^^^^^^ - -Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log. - -What does it log -^^^^^^^^^^^^^^^^^^^ -Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with `|` s to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - -How to configure -^^^^^^^^^^^^^^^^^^ -Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -cassandra.yaml configurations for AuditLog -""""""""""""""""""""""""""""""""""""""""""""" - - ``enabled``: This option enables/ disables audit log - - ``logger``: Class name of the logger/ custom logger. - - ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ - - ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces - - ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except `system`, `system_schema` and `system_virtual_schema` - - ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories - - ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category - - ``included_users``: Comma separated list of users to be included in audit log, default - includes all users - - ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -NodeTool command to enable AuditLog -""""""""""""""""""""""""""""""""""""" -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - -:: - - nodetool enableauditlog - -Options -********** - - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used. - Please remeber that `system`, `system_schema` and `system_virtual_schema` are excluded by default, - if you are overwriting this option via nodetool, - remember to add these keyspaces back if you dont want them in audit logs - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -NodeTool command to disable AuditLog -""""""""""""""""""""""""""""""""""""""" - -``disableauditlog``: Disables AuditLog. - -:: - - nodetool disableuditlog - - - - - - - -NodeTool command to reload AuditLog filters -""""""""""""""""""""""""""""""""""""""""""""" - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - -E.g., - -:: - - nodetool enableauditlog --loggername --included-keyspaces - - - -View the contents of AuditLog Files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``auditlogviewer`` is the new tool introduced to help view the contents of binlog file in human readable text format. - -:: - - auditlogviewer [...] [options] - -Options -"""""""" - -``-f,--follow`` - Upon reacahing the end of the log continue indefinitely - waiting for more records -``-r,--roll_cycle`` - How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -``-h,--help`` - display this help message - -For example, to dump the contents of audit log files on the console - -:: - - auditlogviewer /logs/cassandra/audit - -Sample output -""""""""""""" - -:: - - LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1" - - - -Configuring BinAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``BinAuditLogger`` as a logger in AuditLogging, set the logger to ``BinAuditLogger`` in cassandra.yaml under ``audit_logging_options`` section. ``BinAuditLogger`` can be futher configued using its advanced options in cassandra.yaml. - - -Adcanced Options for BinAuditLogger -"""""""""""""""""""""""""""""""""""""" - -``block`` - Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to ``true`` so that AuditLog records wont be lost - -``max_queue_weight`` - Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to ``256 * 1024 * 1024`` - -``max_log_size`` - Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to ``16L * 1024L * 1024L * 1024L`` - -``roll_cycle`` - How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to ``"HOURLY"`` - -Configuring FileAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``FileAuditLogger`` as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log - - -.. code-block:: xml - - - - ${cassandra.logdir}/audit/audit.log - - - ${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip - - 50MB - 30 - 5GB - - - %-5level [%thread] %date{ISO8601} %F:%L - %msg%n - - - - - - - diff --git a/src/doc/4.0-alpha2/_sources/operating/backups.rst.txt b/src/doc/4.0-alpha2/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/4.0-alpha2/_sources/operating/bloom_filters.rst.txt b/src/doc/4.0-alpha2/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/4.0-alpha2/_sources/operating/bulk_loading.rst.txt b/src/doc/4.0-alpha2/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/4.0-alpha2/_sources/operating/cdc.rst.txt b/src/doc/4.0-alpha2/_sources/operating/cdc.rst.txt deleted file mode 100644 index a7177b544..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,96 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property ``cdc=true`` (either when :ref:`creating the table ` or -:ref:`altering it `). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in ``cassandra.yaml``. On segment fsync to disk, if CDC data is present anywhere in the segment a -_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word "COMPLETED" will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file. - -We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable. - -A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disabling CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -Use a `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -If CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `JIRA ticket `__ -- `JIRA ticket `__ diff --git a/src/doc/4.0-alpha2/_sources/operating/compaction.rst.txt b/src/doc/4.0-alpha2/_sources/operating/compaction.rst.txt deleted file mode 100644 index ace9aa9e4..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,447 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). With ``TimeWindowCompactionStrategy`` -it is possible to remove the guarantee (not check for shadowing data) by enabling ``unsafe_aggressive_sstable_expiration``. - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. -``unsafe_aggressive_sstable_expiration`` (default: false) - Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially - risky option that can lead to data loss or deleted data re-appearing, going beyond what - `unchecked_tombstone_compaction` does for single sstable compaction. Due to the risk the jvm must also be - started with `-Dcassandra.unsafe_aggressive_sstable_expiration=true`. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled). - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/4.0-alpha2/_sources/operating/compression.rst.txt b/src/doc/4.0-alpha2/_sources/operating/compression.rst.txt deleted file mode 100644 index b4308b31a..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,97 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides four classes (``LZ4Compressor``, - ``SnappyCompressor``, ``DeflateCompressor`` and ``ZstdCompressor``). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. -- ``compression_level`` is only applicable for ``ZstdCompressor`` and accepts values between ``-131072`` and ``22``. - The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called - "ultra levels" and should be used with caution, as they require more memory. The default is 3. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/4.0-alpha2/_sources/operating/hardware.rst.txt b/src/doc/4.0-alpha2/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/4.0-alpha2/_sources/operating/hints.rst.txt b/src/doc/4.0-alpha2/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha2/_sources/operating/index.rst.txt b/src/doc/4.0-alpha2/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/4.0-alpha2/_sources/operating/metrics.rst.txt b/src/doc/4.0-alpha2/_sources/operating/metrics.rst.txt deleted file mode 100644 index e87bd5ac1..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,789 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _monitoring-metrics: - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -.. _table-metrics: - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorWriteLatency Timer Coordinator write latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -BytesRepaired Gauge Size of table data repaired on disk -BytesUnrepaired Gauge Size of table data unrepaired on disk -BytesPendingRepair Gauge Size of table data isolated for an ongoing incremental repair -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -SpeculativeFailedRetries Counter Number of speculative retries that failed to prevent a timeout -SpeculativeInsufficientReplicas Counter Number of speculative retries that couldn't be attempted due to lack of replicas -SpeculativeSampleLatencyNanos Gauge Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -AnticompactionTime Timer Time spent anticompacting before a consistent repair. -ValidationTime Timer Time spent doing validation compaction during repair. -SyncTime Timer Time spent doing streaming during repair. -BytesValidated Histogram Histogram over the amount of bytes read during validation. -PartitionsValidated Histogram Histogram over the number of partitions read during validation. -BytesAnticompacted Counter How many bytes we anticompacted. -BytesMutatedAnticompaction Counter How many bytes we avoided anticompacting because the sstable was fully contained in the repaired range. -MutatedAnticompactionGauge Gauge Ratio of bytes mutated vs total bytes repaired. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -Most of these metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -WriteFailedIdeaCL Counter Number of writes that failed to achieve the configured ideal consistency level or 0 if none is configured -IdealCLWriteLatency Latency Coordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured -RepairTime Timer Total time spent as repair coordinator. -RepairPrepareTime Timer Total time spent preparing for repair. -======================================= ============== =========== - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools path= scope= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -MaxTasksQueued Gauge The maximum number of tasks queued before a task get blocked. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -ViewBuildExecutor internal Performs materialized views initial build -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - -.. _dropped-metrics: - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessage..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMessage scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -HintsService Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintsService.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintsService name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -HintsSucceeded Meter A meter of the hints successfully delivered -HintsFailed Meter A meter of the hints that failed deliver -HintsTimedOut Meter A meter of the hints that timed out -Hint_delays Histogram Histogram of hint delivery delays (in milliseconds) -Hint_delays- Histogram Histogram of hint delivery delays (in milliseconds) per peer -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -============================== =============================== =========== -Name Type Description -============================== =============================== =========== -connectedNativeClients Gauge Number of clients connected to this nodes native protocol server -connections Gauge> List of all connections and their state information -connectedNativeClientsByUser Gauge Number of connnective native clients by username -============================== =============================== =========== - - -Batch Metrics -^^^^^^^^^^^^^ - -Metrics specifc to batch statements. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Batch.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Batch name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -PartitionsPerCounterBatch Histogram Distribution of the number of partitions processed per counter batch -PartitionsPerLoggedBatch Histogram Distribution of the number of partitions processed per logged batch -PartitionsPerUnloggedBatch Histogram Distribution of the number of partitions processed per unlogged batch -=========================== ============== =========== - - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/4.0-alpha2/_sources/operating/read_repair.rst.txt b/src/doc/4.0-alpha2/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha2/_sources/operating/repair.rst.txt b/src/doc/4.0-alpha2/_sources/operating/repair.rst.txt deleted file mode 100644 index 97115dc66..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _repair: - -Repair ------- - -Cassandra is designed to remain available if one of it's nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren't guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire. - -These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes. - -Incremental and Full Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that's been written since the previous incremental repair. - -Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it's important to understand that once an incremental repair marks data as repaired, it won't -try to repair it again. This is fine for syncing up missed writes, but it doesn't protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally. - -Usage and Best Practices -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since repair can result in a lot of disk and network io, it's not run automatically by Cassandra. It is run by the operator -via nodetool. - -Incremental repair is the default and is run with the following command: - -:: - - nodetool repair - -A full repair can be run with the following command: - -:: - - nodetool repair --full - -Additionally, repair can be run on a single keyspace: - -:: - - nodetool repair [options] - -Or even on specific tables: - -:: - - nodetool repair [options] - - -The repair command only repairs token ranges on the node being repaired, it doesn't repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you're running repair on, which will cause duplicate work if you run it -on every node. The ``-pr`` flag will only repair the "primary" ranges on a node, so you can repair your entire cluster by running -``nodetool repair -pr`` on each node in a single datacenter. - -The specific frequency of repair that's right for your cluster, of course, depends on several factors. However, if you're -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don't want to run incremental repairs, a full repair every 5 days is a good place -to start. - -At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays. - -Other Options -^^^^^^^^^^^^^ - -``-pr, --partitioner-range`` - Restricts repair to the 'primary' token ranges of the node being repaired. A primary range is just a token range for - which a node is the first replica in the ring. - -``-prv, --preview`` - Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints - the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, - add the ``--full`` flag to estimate a full repair. - -``-vd, --validate`` - Verifies that the repaired data is the same across all nodes. Similiar to ``--preview``, this builds and compares merkle - trees of repaired data, but doesn't do any streaming. This is useful for troubleshooting. If this shows that the repaired - data is out of sync, a full repair should be run. - -.. seealso:: - :ref:`nodetool repair docs ` diff --git a/src/doc/4.0-alpha2/_sources/operating/security.rst.txt b/src/doc/4.0-alpha2/_sources/operating/security.rst.txt deleted file mode 100644 index c2d8b79b0..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/security.rst.txt +++ /dev/null @@ -1,441 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still: - -- Craft internode messages to insert users into authentication schema -- Craft internode messages to truncate or drop schema -- Use tools such as ``sstableloader`` to overwrite ``system_auth`` tables -- Attach to the cluster directly to capture write traffic - -Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra's -security features is crucial to configuring your cluster to meet your security needs. - - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -SSL Certificate Hot Reloading -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes. - -Certificate Hot reloading may also be triggered using the ``nodetool reloadssl`` command. Use this if you want to Cassandra to -immediately notice the changed certificates. - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -.. _authorization: - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -.. _auth-caching: - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/4.0-alpha2/_sources/operating/snitch.rst.txt b/src/doc/4.0-alpha2/_sources/operating/snitch.rst.txt deleted file mode 100644 index 5f6760a41..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this will allow 'pinning' of replicas to hosts - in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/4.0-alpha2/_sources/operating/topo_changes.rst.txt b/src/doc/4.0-alpha2/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index 6c8f8ecdf..000000000 --- a/src/doc/4.0-alpha2/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,129 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in ``nodetool netstats``. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344) - -Once the bootstrapping is complete the node will be marked "UP". - -.. Note:: If any of the following cases apply, you **MUST** run repair to make the replaced node consistent again, since - it missed ongoing writes during/prior to bootstrapping. The *replacement* timeframe refers to the period from when the - node initially dies to when a new node completes the replacement process. - - 1. The node is down for longer than ``max_hint_window_in_ms`` before being replaced. - 2. You are replacing using the same IP address as the dead node **and** replacement takes longer than ``max_hint_window_in_ms``. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/4.0-alpha2/_sources/plugins/index.rst.txt b/src/doc/4.0-alpha2/_sources/plugins/index.rst.txt deleted file mode 100644 index 4073a92cb..000000000 --- a/src/doc/4.0-alpha2/_sources/plugins/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Third-Party Plugins -=================== - -Available third-party plugins for Apache Cassandra - -CAPI-Rowcache -------------- - -The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments. - -The official page for the `CAPI-Rowcache plugin `__ contains further details how to build/run/download the plugin. - - -Stratio’s Cassandra Lucene Index --------------------------------- - -Stratio’s Lucene index is a Cassandra secondary index implementation based on `Apache Lucene `__. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or `Apache Solr `__, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed. - -The official Github repository `Cassandra Lucene Index `__ contains everything you need to build/run/configure the plugin. \ No newline at end of file diff --git a/src/doc/4.0-alpha2/_sources/tools/cassandra_stress.rst.txt b/src/doc/4.0-alpha2/_sources/tools/cassandra_stress.rst.txt deleted file mode 100644 index bcac54ec1..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/cassandra_stress.rst.txt +++ /dev/null @@ -1,269 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: yaml - -.. _cassandra_stress: - -Cassandra Stress ----------------- - -cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model. - -This documentation focuses on user mode as this allows the testing of your -actual schema. - -Usage -^^^^^ -There are several operation types: - - * write-only, read-only, and mixed workloads of standard data - * write-only and read-only workloads for counter columns - * user configured workloads, running custom queries on custom schemas - -The syntax is `cassandra-stress [options]`. If you want more information on a given command -or options, just run `cassandra-stress help `. - -Commands: - read: - Multiple concurrent reads - the cluster must first be populated by a write test - write: - Multiple concurrent writes against the cluster - mixed: - Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test - counter_write: - Multiple concurrent updates of counters. - counter_read: - Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. - user: - Interleaving of user provided queries, with configurable ratio and distribution. - help: - Print help for a command or option - print: - Inspect the output of a distribution definition - legacy: - Legacy support mode - -Primary Options: - -pop: - Population distribution and intra-partition visit order - -insert: - Insert specific options relating to various methods for batching and splitting partition updates - -col: - Column details such as size and count distribution, data generator, names, comparator and if super columns should be used - -rate: - Thread count, rate limit or automatic mode (default is auto) - -mode: - Thrift or CQL with options - -errors: - How to handle errors when encountered during stress - -sample: - Specify the number of samples to collect for measuring latency - -schema: - Replication settings, compression, compaction, etc. - -node: - Nodes to connect to - -log: - Where to log progress to, and the interval at which to do it - -transport: - Custom transport factories - -port: - The port to connect to cassandra nodes on - -sendto: - Specify a stress server to send this command to - -graph: - Graph recorded metrics - -tokenrange: - Token range settings - - -Suboptions: - Every command and primary option has its own collection of suboptions. These are too numerous to list here. - For information on the suboptions for each command or option, please use the help command, - `cassandra-stress help `. - -User mode -^^^^^^^^^ - -User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn't scale. - -Profile -+++++++ - -User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname. - -An identifier for the profile:: - - specname: staff_activities - -The keyspace for the test:: - - keyspace: staff - -CQL for the keyspace. Optional if the keyspace already exists:: - - keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -The table to be stressed:: - - table: staff_activities - -CQL for the table. Optional if the table already exists:: - - table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when, what) - ) - - -Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:: - - columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -Supported types are: - -An exponential distribution over the range [min..max]:: - - EXP(min..max) - -An extreme value (Weibull) distribution over the range [min..max]:: - - EXTREME(min..max,shape) - -A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:: - - GAUSSIAN(min..max,stdvrng) - -A gaussian/normal distribution, with explicitly defined mean and stdev:: - - GAUSSIAN(min..max,mean,stdev) - -A uniform distribution over the range [min, max]:: - - UNIFORM(min..max) - -A fixed distribution, always returning the same value:: - - FIXED(val) - -If preceded by ~, the distribution is inverted - -Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) - -Insert distributions:: - - insert: - # How many partition to insert per batch - partitions: fixed(1) - # How many rows to update per partition - select: fixed(1)/500 - # UNLOGGED or LOGGED batch for insert - batchtype: UNLOGGED - - -Currently all inserts are done inside batches. - -Read statements to use during the test:: - - queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - -Running a user mode test:: - - cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once - -This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test. - -The full example can be found here :download:`yaml <./stress-example.yaml>` - -Running a user mode test with multiple yaml files:: - cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m "ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)" truncate=once - -This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table - although care must be taken that the table definition is identical (data generation specs can be different). - -Lightweight transaction support -+++++++++++++++++++++++++++++++ - -cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s). - -Lightweight transaction update query:: - - queries: - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow - -The full example can be found here :download:`yaml <./stress-lwt-example.yaml>` - -Graphing -^^^^^^^^ - -Graphs can be generated for each run of stress. - -.. image:: example-stress-graph.png - -To create a new graph:: - - cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" - -To add a new run to an existing graph point to an existing file and add a revision name:: - - cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run" - -FAQ -^^^^ - -**How do you use NetworkTopologyStrategy for the keyspace?** - -Use the schema option making sure to either escape the parenthesis or enclose in quotes:: - - cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)" - -**How do you use SSL?** - -Use the transport option:: - - cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra" \ No newline at end of file diff --git a/src/doc/4.0-alpha2/_sources/tools/cqlsh.rst.txt b/src/doc/4.0-alpha2/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/4.0-alpha2/_sources/tools/index.rst.txt b/src/doc/4.0-alpha2/_sources/tools/index.rst.txt deleted file mode 100644 index d28929c84..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 3 - - cqlsh - nodetool/nodetool - sstable/index - cassandra_stress diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/compact.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/decommission.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/describering.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/drain.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/flush.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/help.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/import.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/info.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/join.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/move.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/netstats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index 468e29ec4..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,250 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-p | --port )] [(-pp | --print-port)] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] [(-h | --host )] - [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/profileload.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/refresh.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/removenode.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/repair.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/ring.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/scrub.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/sjk.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/status.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/stop.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/verify.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/version.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/4.0-alpha2/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/index.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/index.rst.txt deleted file mode 100644 index b9e483f45..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -SSTable Tools -============= - -This section describes the functionality of the various sstable tools. - -Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped. - -.. toctree:: - :maxdepth: 2 - - sstabledump - sstableexpiredblockers - sstablelevelreset - sstableloader - sstablemetadata - sstableofflinerelevel - sstablerepairedset - sstablescrub - sstablesplit - sstableupgrade - sstableutil - sstableverify - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstabledump.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstabledump.rst.txt deleted file mode 100644 index 8f38afa09..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstabledump.rst.txt +++ /dev/null @@ -1,294 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstabledump ------------ - -Dump contents of a given SSTable to standard output in JSON format. - -You must supply exactly one sstable. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstabledump - -=================================== ================================================================================ --d CQL row per line internal representation --e Enumerate partition keys only --k Partition key --x Excluded partition key(s) --t Print raw timestamps instead of iso8601 date strings --l Output each row as a separate JSON object -=================================== ================================================================================ - -If necessary, use sstableutil first to find out the sstables used by a table. - -Dump entire table -^^^^^^^^^^^^^^^^^ - -Dump the entire table without any options. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26 - - cat eventlog_dump_2018Jul26 - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - ] - -Dump table in a more manageable format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848 - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines - - cat eventlog_dump_2018Jul26_justlines - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Dump only keys -^^^^^^^^^^^^^^ - -Dump only the keys by using the -e option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys - - cat eventlog_dump_2018Jul26b - [ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ] - -Dump row for a single key -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a single key using the -k option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey - - cat eventlog_dump_2018Jul26_singlekey - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Exclude a key or keys in dump of rows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a table except for the rows excluded with the -x option. Multiple keys can be used. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e > eventlog_dump_2018Jul26_excludekeys - - cat eventlog_dump_2018Jul26_excludekeys - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Display raw timestamps -^^^^^^^^^^^^^^^^^^^^^^ - -By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times - - cat eventlog_dump_2018Jul26_times - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "1532118147028809" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - - -Display internal structure in output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump the table in a format that reflects the internal structure. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d - - cat eventlog_dump_2018Jul26_d - [3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]: | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711] - [d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]: | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522] - [cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]: | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809] - - - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableexpiredblockers.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableexpiredblockers.rst.txt deleted file mode 100644 index ec837944c..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableexpiredblockers.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableexpiredblockers ----------------------- - -During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable. - -This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-10015 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableexpiredblockers
- -Output blocked sstables -^^^^^^^^^^^^^^^^^^^^^^^ - -If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing. - -Otherwise, the script will return ` blocks <#> expired sstables from getting dropped` followed by a list of the blocked sstables. - -Example:: - - sstableexpiredblockers keyspace1 standard1 - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablelevelreset.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstablelevelreset.rst.txt deleted file mode 100644 index 7069094dd..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablelevelreset.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablelevelreset ------------------ - -If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration. - -See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5271 - -Usage -^^^^^ - -sstablelevelreset --really-reset
- -The really-reset flag is required, to ensure this intrusive command is not run accidentally. - -Table not found -^^^^^^^^^^^^^^^ - -If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error. - -Example:: - - ColumnFamily not found: keyspace/evenlog. - -Table has no sstables -^^^^^^^^^^^^^^^^^^^^^ - -Example:: - - Found no sstables, did you give the correct keyspace/table? - - -Table already at level 0 -^^^^^^^^^^^^^^^^^^^^^^^^ - -The script will not set the level if it is already set to 0. - -Example:: - - Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0 - -Table levels reduced to 0 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the level is not already 0, then this will reset it to 0. - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 1 - - sstablelevelreset --really-reset keyspace eventlog - Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 0 - - - - - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableloader.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableloader.rst.txt deleted file mode 100644 index a9b37342c..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableloader.rst.txt +++ /dev/null @@ -1,273 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableloader ---------------- - -Bulk-load the sstables found in the directory to the configured cluster. The parent directories of are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files. - -Several of the options listed below don't work quite as intended, and in those cases, workarounds are mentioned for specific use cases. - -To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-1278 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableloader - -=================================================== ================================================================================ --d, --nodes Required. Try to connect to these hosts (comma-separated) - initially for ring information --u, --username username for Cassandra authentication --pw, --password password for Cassandra authentication --p, --port port used for native connection (default 9042) --sp, --storage-port port used for internode communication (default 7000) --ssp, --ssl-storage-port port used for TLS internode communication (default 7001) ---no-progress don't display progress --t, --throttle throttle speed in Mbits (default unlimited) --idct, --inter-dc-throttle inter-datacenter throttle speed in Mbits (default unlimited) --cph, --connections-per-host number of concurrent connections-per-host --i, --ignore don't stream to this (comma separated) list of nodes --alg, --ssl-alg Client SSL: algorithm (default: SunX509) --ciphers, --ssl-ciphers Client SSL: comma-separated list of encryption suites to use --ks, --keystore Client SSL: full path to keystore --kspw, --keystore-password Client SSL: password of the keystore --st, --store-type Client SSL: type of store --ts, --truststore Client SSL: full path to truststore --tspw, --truststore-password Client SSL: password of the truststore --prtcl, --ssl-protocol Client SSL: connections protocol to use (default: TLS) --ap, --auth-provider custom AuthProvider class name for cassandra authentication --f, --conf-path cassandra.yaml file path for streaming throughput and client/server SSL --v, --verbose verbose output --h, --help display this help message -=================================================== ================================================================================ - -You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options. - -Load sstables from a Snapshot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Copy the snapshot sstables into an accessible directory and use sstableloader to restore them. - -Example:: - - cp snapshots/1535397029191/* /path/to/keyspace1/standard1/ - - sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4700000 - Total duration (ms): : 4390 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -The -d or --nodes option is required, or the script will not run. - -Example:: - - sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Initial hosts must be specified (-d) - -Use a Config File for SSL Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If SSL encryption is enabled in the cluster, use the --conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line. - -Example:: - - sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 9.165KiB/s (avg: 9.165KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 5.147MiB/s (avg: 18.299KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 9.751MiB/s (avg: 27.423KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 8.203MiB/s (avg: 36.524KiB/s) - ... - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 9356 ms - Average transfer rate : 480.105KiB/s - Peak transfer rate : 586.410KiB/s - -Hide Progress Output -^^^^^^^^^^^^^^^^^^^^ - -To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the --no-progress option. - -Example:: - - sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2] - -Get More Detail -^^^^^^^^^^^^^^^ - -Using the --verbose option will provide much more progress output. - -Example:: - - sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 12.056KiB/s (avg: 12.056KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 9.092MiB/s (avg: 24.081KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 18.832MiB/s (avg: 36.099KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 2.253MiB/s (avg: 47.882KiB/s) - progress: [/172.17.0.2]0:0/1 7 % total: 7% 6.388MiB/s (avg: 59.743KiB/s) - progress: [/172.17.0.2]0:0/1 8 % total: 8% 14.606MiB/s (avg: 71.635KiB/s) - progress: [/172.17.0.2]0:0/1 9 % total: 9% 8.880MiB/s (avg: 83.465KiB/s) - progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s) - progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s) - progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s) - progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s) - progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s) - progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s) - progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s) - progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s) - progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s) - progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s) - progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s) - progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s) - progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s) - progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s) - progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s) - progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s) - progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s) - progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s) - progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s) - progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s) - progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s) - progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s) - progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s) - progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s) - progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s) - progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s) - progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s) - progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s) - progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s) - progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s) - progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s) - progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s) - progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s) - progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s) - progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s) - progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s) - progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s) - progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s) - progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s) - progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s) - progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s) - progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s) - progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s) - progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s) - progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s) - progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s) - progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s) - progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s) - progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s) - progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s) - progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s) - progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s) - progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s) - progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s) - progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s) - progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s) - progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s) - progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s) - progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s) - progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s) - progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s) - progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s) - progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 6706 ms - Average transfer rate : 669.835KiB/s - Peak transfer rate : 767.802KiB/s - - -Throttling Load -^^^^^^^^^^^^^^^ - -To prevent the table loader from overloading the system resources, you can throttle the process with the --throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below. - -Example:: - - sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 0 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 37634 - Average transfer rate (MB/s): : 0 - Peak transfer rate (MB/s): : 0 - -Speeding up Load -^^^^^^^^^^^^^^^^ - -To speed up the load process, the number of connections per host can be increased. - -Example:: - - sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 100 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 3486 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -This small data set doesn't benefit much from the increase in connections per host, but note that the total duration has decreased in this example. - - - - - - - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablemetadata.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstablemetadata.rst.txt deleted file mode 100644 index 0a7a42211..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablemetadata.rst.txt +++ /dev/null @@ -1,300 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablemetadata ---------------- - -Print information about an sstable from the related Statistics.db and Summary.db files to standard output. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstablemetadata - -========================= ================================================================================ ---gc_grace_seconds The gc_grace_seconds to use when calculating droppable tombstones -========================= ================================================================================ - -Print all the metadata -^^^^^^^^^^^^^^^^^^^^^^ - -Run sstablemetadata against the *Data.db file(s) related to a table. If necessary, find the *Data.db file(s) using sstableutil. - -Example:: - - sstableutil keyspace1 standard1 | grep Data - /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - SSTable: /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big - Partitioner: org.apache.cassandra.dht.Murmur3Partitioner - Bloom Filter FP chance: 0.010000 - Minimum timestamp: 1535025576141000 - Maximum timestamp: 1535025604309000 - SSTable min local deletion time: 2147483647 - SSTable max local deletion time: 2147483647 - Compressor: org.apache.cassandra.io.compress.LZ4Compressor - TTL min: 86400 - TTL max: 86400 - First token: -9223004712949498654 (key=39373333373831303130) - Last token: 9222554117157811897 (key=4f3438394e39374d3730) - Estimated droppable tombstones: 0.9188263888888889 - SSTable Level: 0 - Repaired at: 0 - Replay positions covered: {CommitLogPosition(segmentId=1535025390651, position=226400)=CommitLogPosition(segmentId=1535025390651, position=6849139)} - totalColumnsSet: 100000 - totalRows: 20000 - Estimated tombstone drop times: - 1535039100: 80390 - 1535039160: 5645 - 1535039220: 13965 - Count Row Size Cell Count - 1 0 0 - 2 0 0 - 3 0 0 - 4 0 0 - 5 0 20000 - 6 0 0 - 7 0 0 - 8 0 0 - 10 0 0 - 12 0 0 - 14 0 0 - 17 0 0 - 20 0 0 - 24 0 0 - 29 0 0 - 35 0 0 - 42 0 0 - 50 0 0 - 60 0 0 - 72 0 0 - 86 0 0 - 103 0 0 - 124 0 0 - 149 0 0 - 179 0 0 - 215 0 0 - 258 20000 0 - 310 0 0 - 372 0 0 - 446 0 0 - 535 0 0 - 642 0 0 - 770 0 0 - 924 0 0 - 1109 0 0 - 1331 0 0 - 1597 0 0 - 1916 0 0 - 2299 0 0 - 2759 0 0 - 3311 0 0 - 3973 0 0 - 4768 0 0 - 5722 0 0 - 6866 0 0 - 8239 0 0 - 9887 0 0 - 11864 0 0 - 14237 0 0 - 17084 0 0 - 20501 0 0 - 24601 0 0 - 29521 0 0 - 35425 0 0 - 42510 0 0 - 51012 0 0 - 61214 0 0 - 73457 0 0 - 88148 0 0 - 105778 0 0 - 126934 0 0 - 152321 0 0 - 182785 0 0 - 219342 0 0 - 263210 0 0 - 315852 0 0 - 379022 0 0 - 454826 0 0 - 545791 0 0 - 654949 0 0 - 785939 0 0 - 943127 0 0 - 1131752 0 0 - 1358102 0 0 - 1629722 0 0 - 1955666 0 0 - 2346799 0 0 - 2816159 0 0 - 3379391 0 0 - 4055269 0 0 - 4866323 0 0 - 5839588 0 0 - 7007506 0 0 - 8409007 0 0 - 10090808 0 0 - 12108970 0 0 - 14530764 0 0 - 17436917 0 0 - 20924300 0 0 - 25109160 0 0 - 30130992 0 0 - 36157190 0 0 - 43388628 0 0 - 52066354 0 0 - 62479625 0 0 - 74975550 0 0 - 89970660 0 0 - 107964792 0 0 - 129557750 0 0 - 155469300 0 0 - 186563160 0 0 - 223875792 0 0 - 268650950 0 0 - 322381140 0 0 - 386857368 0 0 - 464228842 0 0 - 557074610 0 0 - 668489532 0 0 - 802187438 0 0 - 962624926 0 0 - 1155149911 0 0 - 1386179893 0 0 - 1663415872 0 0 - 1996099046 0 0 - 2395318855 0 0 - 2874382626 0 - 3449259151 0 - 4139110981 0 - 4966933177 0 - 5960319812 0 - 7152383774 0 - 8582860529 0 - 10299432635 0 - 12359319162 0 - 14831182994 0 - 17797419593 0 - 21356903512 0 - 25628284214 0 - 30753941057 0 - 36904729268 0 - 44285675122 0 - 53142810146 0 - 63771372175 0 - 76525646610 0 - 91830775932 0 - 110196931118 0 - 132236317342 0 - 158683580810 0 - 190420296972 0 - 228504356366 0 - 274205227639 0 - 329046273167 0 - 394855527800 0 - 473826633360 0 - 568591960032 0 - 682310352038 0 - 818772422446 0 - 982526906935 0 - 1179032288322 0 - 1414838745986 0 - Estimated cardinality: 20196 - EncodingStats minTTL: 0 - EncodingStats minLocalDeletionTime: 1442880000 - EncodingStats minTimestamp: 1535025565275000 - KeyType: org.apache.cassandra.db.marshal.BytesType - ClusteringTypes: [org.apache.cassandra.db.marshal.UTF8Type] - StaticColumns: {C3:org.apache.cassandra.db.marshal.BytesType, C4:org.apache.cassandra.db.marshal.BytesType, C0:org.apache.cassandra.db.marshal.BytesType, C1:org.apache.cassandra.db.marshal.BytesType, C2:org.apache.cassandra.db.marshal.BytesType} - RegularColumns: {} - -Specify gc grace seconds -^^^^^^^^^^^^^^^^^^^^^^^^ - -To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn't access the schema directly, this is a way to more accurately estimate droppable tombstones -- for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds). - -ref: https://issues.apache.org/jira/browse/CASSANDRA-12208 - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4 - Estimated tombstone drop times: - 1536599100: 1 - 1536599640: 1 - 1536599700: 2 - - echo $(date +%s) - 1536602005 - - # if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 4.0E-5 - - # if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 9.61111111111111E-6 - - # if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 0.0 - -Explanation of each value printed above -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -=================================== ================================================================================ - Value Explanation -=================================== ================================================================================ -SSTable prefix of the sstable filenames related to this sstable -Partitioner partitioner type used to distribute data across nodes; defined in cassandra.yaml -Bloom Filter FP precision of Bloom filter used in reads; defined in the table definition -Minimum timestamp minimum timestamp of any entry in this sstable, in epoch microseconds -Maximum timestamp maximum timestamp of any entry in this sstable, in epoch microseconds -SSTable min local deletion time minimum timestamp of deletion date, based on TTL, in epoch seconds -SSTable max local deletion time maximum timestamp of deletion date, based on TTL, in epoch seconds -Compressor blank (-) by default; if not blank, indicates type of compression enabled on the table -TTL min time-to-live in seconds; default 0 unless defined in the table definition -TTL max time-to-live in seconds; default 0 unless defined in the table definition -First token lowest token and related key found in the sstable summary -Last token highest token and related key found in the sstable summary -Estimated droppable tombstones ratio of tombstones to columns, using configured gc grace seconds if relevant -SSTable level compaction level of this sstable, if leveled compaction (LCS) is used -Repaired at the timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds -Replay positions covered the interval of time and commitlog positions related to this sstable -totalColumnsSet number of cells in the table -totalRows number of rows in the table -Estimated tombstone drop times approximate number of rows that will expire, ordered by epoch seconds -Count Row Size Cell Count two histograms in two columns; one represents distribution of Row Size - and the other represents distribution of Cell Count -Estimated cardinality an estimate of unique values, used for compaction -EncodingStats* minTTL in epoch milliseconds -EncodingStats* minLocalDeletionTime in epoch seconds -EncodingStats* minTimestamp in epoch microseconds -KeyType the type of partition key, useful in reading and writing data - from/to storage; defined in the table definition -ClusteringTypes the type of clustering key, useful in reading and writing data - from/to storage; defined in the table definition -StaticColumns a list of the shared columns in the table -RegularColumns a list of non-static, non-key columns in the table -=================================== ================================================================================ -* For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way. - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableofflinerelevel.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableofflinerelevel.rst.txt deleted file mode 100644 index c031d2987..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableofflinerelevel.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableofflinerelevel ---------------------- - -When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-8301 - -The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):: - - L3 [][][][][][][][][][][] - L2 [ ][ ][ ][ ] - L1 [ ][ ] - L0 [ ] - -Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):: - - [][][] - [ ][][][] - [ ] - [ ] - ... - -Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below. - -If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableofflinerelevel [--dry-run]
- -Doing a dry run -^^^^^^^^^^^^^^^ - -Use the --dry-run option to see the current level distribution and predicted level after the change. - -Example:: - - sstableofflinerelevel --dry-run keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - Potential leveling: - L0=1 - L1=1 - -Running a relevel -^^^^^^^^^^^^^^^^^ - -Example:: - - sstableofflinerelevel keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - New leveling: - L0=1 - L1=1 - -Keyspace or table not found -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an invalid keyspace and/or table is provided, an exception will be thrown. - -Example:: - - sstableofflinerelevel --dry-run keyspace evenlog - - Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog - at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96) - - - - - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablerepairedset.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstablerepairedset.rst.txt deleted file mode 100644 index ebacef335..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablerepairedset.rst.txt +++ /dev/null @@ -1,79 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablerepairedset ------------------- - -Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired. - -Note that running a repair (e.g., via nodetool repair) doesn't set the status of this metadata. Only setting the status of this metadata via this tool does. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5351 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablerepairedset --really-set [-f | ] - -=================================== ================================================================================ ---really-set required if you want to really set the status ---is-repaired set the repairedAt status to the last modified time ---is-unrepaired set the repairedAt status to 0 --f use a file containing a list of sstables as the input -=================================== ================================================================================ - -Set a lot of sstables to unrepaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many ways to do this programmatically. This way would likely include variables for the keyspace and table. - -Example:: - - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired % - -Set one to many sstables to repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice. - -Example:: - - nodetool repair keyspace1 standard1 - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired % - -Print metadata showing repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -sstablemetadata can be used to view the status set or unset using this command. - -Example: - - sstablerepairedset --really-set --is-repaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 1534443974000 - - sstablerepairedset --really-set --is-unrepaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 0 - -Using command in a script -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you know you ran repair 2 weeks ago, you can do something like the following:: - - sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablescrub.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstablescrub.rst.txt deleted file mode 100644 index 0bbda9f32..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablescrub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablescrub ------------- - -Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4321 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablescrub
- -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --m,--manifest-check only check and repair the leveled manifest, without actually scrubbing the sstables --n,--no-validate do not validate columns using column validator --r,--reinsert-overflowed-ttl Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 - with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows. --s,--skip-corrupted skip corrupt rows in counter tables --v,--verbose verbose output -=================================== ================================================================================ - -Basic Scrub -^^^^^^^^^^^ - -The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable. - -Example:: - - sstablescrub keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped - Checking leveled manifest - -Scrub without Validation -^^^^^^^^^^^^^^^^^^^^^^^^ -ref: https://issues.apache.org/jira/browse/CASSANDRA-9406 - -Use the --no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client. - -Example:: - - sstablescrub --no-validate keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned - -Skip Corrupted Counter Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5930 - -If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the --skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+. - -Example:: - - sstablescrub --skip-corrupted keyspace1 counter1 - -Dealing with Overflow Dates -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-14092 - -Using the option --reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow). - -Example:: - - sstablescrub --reinsert-overflowed-ttl keyspace1 counter1 - -Manifest Check -^^^^^^^^^^^^^^ - -As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata. - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablesplit.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstablesplit.rst.txt deleted file mode 100644 index 5386fa48b..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstablesplit.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablesplit ------------- - -Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4766 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablesplit - -=================================== ================================================================================ ---debug display stack traces --h, --help display this help message ---no-snapshot don't snapshot the sstables before splitting --s, --size maximum size in MB for the output sstables (default: 50) -=================================== ================================================================================ - -This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped. - -Split a File -^^^^^^^^^^^^ - -Split a large sstable into smaller sstables. By default, unless the option --no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - Pre-split sstables snapshotted into snapshot pre-split-1533144514795 - -Split Multiple Files -^^^^^^^^^^^^^^^^^^^^ - -Wildcards can be used in the filename portion of the command to split multiple files. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1* - -Attempt to Split a Small File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the file is already smaller than the split size provided, the sstable will not be split. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB) - No sstables needed splitting. - -Split a File into Specified Size -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default size used for splitting is 50MB. Specify another size with the --size option. The size is in megabytes (MB). Specify only the number, not the units. For example --size 50 is correct, but --size 50MB is not. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db - Pre-split sstables snapshotted into snapshot pre-split-1533144996008 - - -Split Without Snapshot -^^^^^^^^^^^^^^^^^^^^^^ - -By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the --no-snapshot option to skip it. - -Example:: - - sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db - -Note: There is no output, but you can see the results in your file system. - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableupgrade.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableupgrade.rst.txt deleted file mode 100644 index 66386aca1..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableupgrade.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableupgrade --------------- - -Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version. - -The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableupgrade
[snapshot_name] - -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --k,--keep-source do not delete the source sstables -=================================== ================================================================================ - -Rewrite tables to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start with a set of sstables in one version of Cassandra:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables:: - - sstableupgrade keyspace1 standard1 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 13:48 backups - -rw-r--r-- 1 user wheel 292 Aug 22 13:48 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4599475 Aug 22 13:48 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:48 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 13:48 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330807 Aug 22 13:48 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 13:48 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 13:48 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 13:48 mc-2-big-TOC.txt - -Rewrite tables to the current Cassandra version, and keep tables in old version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Again, starting with a set of sstables in one version:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:: - - sstableupgrade keyspace1 standard1 -k - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 14:00 backups - -rw-r--r--@ 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r--@ 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r--@ 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r--@ 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r--@ 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r--@ 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r--@ 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r--@ 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -rw-r--r-- 1 user wheel 292 Aug 22 14:01 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4596370 Aug 22 14:01 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 14:01 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 14:01 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330801 Aug 22 14:01 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 14:01 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 14:01 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 14:01 mc-2-big-TOC.txt - - -Rewrite a snapshot to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Find the snapshot name:: - - nodetool listsnapshots - - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - ... - 1534962986979 keyspace1 standard1 5.85 MB 5.85 MB - -Then rewrite the snapshot:: - - sstableupgrade keyspace1 standard1 1534962986979 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete. - - - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableutil.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableutil.rst.txt deleted file mode 100644 index 30becd0e0..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableutil.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableutil ------------ - -List sstable files for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7066 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableutil
- -=================================== ================================================================================ --c, --cleanup clean up any outstanding transactions --d, --debug display stack traces --h, --help display this help message --o, --oplog include operation logs --t, --type all (list all files, final or temporary), tmp (list temporary files only), - final (list final files only), --v, --verbose verbose output -=================================== ================================================================================ - -List all sstables -^^^^^^^^^^^^^^^^^ - -The basic command lists the sstables associated with a given keyspace/table. - -Example:: - - sstableutil keyspace eventlog - Listing files... - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt - -List only temporary sstables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `tmp` will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra. - -List only final sstables -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `final` will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option. - -Include transaction logs -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -o option will include transaction logs in the listing, in the format above. - -Clean up sstables -^^^^^^^^^^^^^^^^^ - -Using the -c option removes any transactions left over from incomplete writes or compactions. - -From the 3.0 upgrade notes: - -New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix "add:" or "remove:". They also contain a special line "commit", only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the "add" prefix) and delete the old sstables (those with the "remove" prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first. - - - diff --git a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableverify.rst.txt b/src/doc/4.0-alpha2/_sources/tools/sstable/sstableverify.rst.txt deleted file mode 100644 index dad3f4487..000000000 --- a/src/doc/4.0-alpha2/_sources/tools/sstable/sstableverify.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableverify -------------- - -Check sstable(s) for errors or corruption, for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5791 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableverify
- -=================================== ================================================================================ ---debug display stack traces --e, --extended extended verification --h, --help display this help message --v, --verbose verbose output -=================================== ================================================================================ - -Basic Verification -^^^^^^^^^^^^^^^^^^ - -This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - -Extended Verification -^^^^^^^^^^^^^^^^^^^^^ - -During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time. - -Example:: - - root@DC1C1:/# sstableverify -e keyspace eventlog - WARN 14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully - -Corrupted File -^^^^^^^^^^^^^^ - -Corrupted files are listed if they are detected by the script. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db - -A similar (but less verbose) tool will show the suggested actions:: - - nodetool verify keyspace eventlog - error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair - - - diff --git a/src/doc/4.0-alpha2/_sources/troubleshooting/finding_nodes.rst.txt b/src/doc/4.0-alpha2/_sources/troubleshooting/finding_nodes.rst.txt deleted file mode 100644 index df5e16c93..000000000 --- a/src/doc/4.0-alpha2/_sources/troubleshooting/finding_nodes.rst.txt +++ /dev/null @@ -1,149 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Find The Misbehaving Nodes -========================== - -The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware). - -There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below. - -Client Logs and Errors ----------------------- -Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter's nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with. - -Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax :ref:`drivers `: - -* ``SyntaxError`` (**client**). This and other ``QueryValidationException`` - indicate that the client sent a malformed request. These are rarely server - issues and usually indicate bad queries. -* ``UnavailableException`` (**server**): This means that the Cassandra - coordinator node has rejected the query as it believes that insufficent - replica nodes are available. If many coordinators are throwing this error it - likely means that there really are (typically) multiple nodes down in the - cluster and you can identify them using :ref:`nodetool status - ` If only a single coordinator is throwing this error it may - mean that node has been partitioned from the rest. -* ``OperationTimedOutException`` (**server**): This is the most frequent - timeout message raised when clients set timeouts and means that the query - took longer than the supplied timeout. This is a *client side* timeout - meaning that it took longer than the client specified timeout. The error - message will include the coordinator node that was last tried which is - usually a good starting point. This error usually indicates either - aggressive client timeout values or latent server coordinators/replicas. -* ``ReadTimeoutException`` or ``WriteTimeoutException`` (**server**): These - are raised when clients do not specify lower timeouts and there is a - *coordinator* timeouts based on the values supplied in the ``cassandra.yaml`` - configuration file. They usually indicate a serious server side problem as - the default values are usually multiple seconds. - -Metrics -------- - -If you have Cassandra :ref:`metrics ` reporting to a -centralized location such as `Graphite `_ or -`Grafana `_ you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are: - -Errors -^^^^^^ -Cassandra refers to internode messaging errors as "drops", and provided a -number of :ref:`Dropped Message Metrics ` to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue. - -Latency -^^^^^^^ -For timeouts or latency related issues you can start with :ref:`Table -Metrics ` by comparing Coordinator level metrics e.g. -``CoordinatorReadLatency`` or ``CoordinatorWriteLatency`` with their associated -replica metrics e.g. ``ReadLatency`` or ``WriteLatency``. Issues usually show -up on the ``99th`` percentile before they show up on the ``50th`` percentile or -the ``mean``. While ``maximum`` coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, ``maximum`` replica latencies that correlate with increased ``99th`` -percentiles on coordinators can help narrow down the problem. - -There are usually three main possibilities: - -1. Coordinator latencies are high on all nodes, but only a few node's local - read latencies are high. This points to slow replica nodes and the - coordinator's are just side-effects. This usually happens when clients are - not token aware. -2. Coordinator latencies and replica latencies increase at the - same time on the a few nodes. If clients are token aware this is almost - always what happens and points to slow replicas of a subset of token - ranges (only part of the ring). -3. Coordinator and local latencies are high on many nodes. This usually - indicates either a tipping point in the cluster capacity (too many writes or - reads per second), or a new query pattern. - -It's important to remember that depending on the client's load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use ``TokenAware`` policies the same -node's coordinator and replica latencies will often increase together, but if -you just use normal ``DCAwareRoundRobin`` coordinator latencies can increase -with unrelated replica node's latencies. For example: - -* ``TokenAware`` + ``LOCAL_ONE``: should always have coordinator and replica - latencies on the same node rise together -* ``TokenAware`` + ``LOCAL_QUORUM``: should always have coordinator and - multiple replica latencies rise together in the same datacenter. -* ``TokenAware`` + ``QUORUM``: replica latencies in other datacenters can - affect coordinator latencies. -* ``DCAwareRoundRobin`` + ``LOCAL_ONE``: coordinator latencies and unrelated - replica node's latencies will rise together. -* ``DCAwareRoundRobin`` + ``LOCAL_QUORUM``: different coordinator and replica - latencies will rise together with little correlation. - -Query Rates -^^^^^^^^^^^ -Sometimes the :ref:`Table ` query rate metrics can help -narrow down load issues as "small" increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with ``BATCH`` writes, where a client may send a single ``BATCH`` -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator ``BATCH`` write turns into 450 -replica writes! This is why keeping ``BATCH``'s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a "single" -query. - - -Next Step: Investigate the Node(s) ----------------------------------- - -Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -:ref:`logs `, :ref:`nodetool `, and -:ref:`os tools `. If you are not able to login you may still -have access to :ref:`logs ` and :ref:`nodetool ` -remotely. diff --git a/src/doc/4.0-alpha2/_sources/troubleshooting/index.rst.txt b/src/doc/4.0-alpha2/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 79b46d636..000000000 --- a/src/doc/4.0-alpha2/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you. - -These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don't -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use. - -.. toctree:: - :maxdepth: 2 - - finding_nodes - reading_logs - use_nodetool - use_tools diff --git a/src/doc/4.0-alpha2/_sources/troubleshooting/reading_logs.rst.txt b/src/doc/4.0-alpha2/_sources/troubleshooting/reading_logs.rst.txt deleted file mode 100644 index 08f7d4da6..000000000 --- a/src/doc/4.0-alpha2/_sources/troubleshooting/reading_logs.rst.txt +++ /dev/null @@ -1,267 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _reading-logs: - -Cassandra Logs -============== -Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs. - -Common Log Files ----------------- -Cassandra has three main logs, the ``system.log``, ``debug.log`` and -``gc.log`` which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively. - -These logs by default live in ``${CASSANDRA_HOME}/logs``, but most Linux -distributions relocate logs to ``/var/log/cassandra``. Operators can tune -this location as well as what levels are logged using the provided -``logback.xml`` file. - -``system.log`` -^^^^^^^^^^^^^^ -This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log: - -* Uncaught exceptions. These can be very useful for debugging errors. -* ``GCInspector`` messages indicating long garbage collector pauses. When long - pauses happen Cassandra will print how long and also what was the state of - the system (thread state) at the time of that pause. This can help narrow - down a capacity issue (either not enough heap or not enough spare CPU). -* Information about nodes joining and leaving the cluster as well as token - metadata (data ownersip) changes. This is useful for debugging network - partitions, data movements, and more. -* Keyspace/Table creation, modification, deletion. -* ``StartupChecks`` that ensure optimal configuration of the operating system - to run Cassandra -* Information about some background operational tasks (e.g. Index - Redistribution). - -As with any application, looking for ``ERROR`` or ``WARN`` lines can be a -great first step:: - - $ # Search for warnings or errors in the latest system.log - $ grep 'WARN\|ERROR' system.log | tail - ... - - $ # Search for warnings or errors in all rotated system.log - $ zgrep 'WARN\|ERROR' system.log.* | less - ... - -``debug.log`` -^^^^^^^^^^^^^^ -This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal ``system.log``. Some -examples of activities logged to this log: - -* Information about compactions, including when they start, which sstables - they contain, and when they finish. -* Information about memtable flushes to disk, including when they happened, - how large the flushes were, and which commitlog segments the flush impacted. - -This log can be *very* noisy, so it is highly recommended to use ``grep`` and -other log analysis tools to dive deep. For example:: - - $ # Search for messages involving a CompactionTask with 5 lines of context - $ grep CompactionTask debug.log -C 5 - ... - - $ # Look at the distribution of flush tasks per keyspace - $ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c - 6 compaction_history: - 1 test_keyspace: - 2 local: - 17 size_estimates: - 17 sstable_activity: - - -``gc.log`` -^^^^^^^^^^^^^^ -The gc log is a standard Java GC log. With the default ``jvm.options`` -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:: - - $ grep stopped gc.log.0.current | tail - 2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds - 2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds - 2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds - 2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds - 2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds - 2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds - 2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds - 2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds - 2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds - 2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds - - -This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current | sort -k 1 - 2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds - 2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds - 2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds - 2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds - 2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds - 2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds - 2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds - 2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds - 2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds - 2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds - -In this case any client waiting on a query would have experienced a `56ms` -latency at 17:13:41. - -Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn't know could have disk latency, so the JVM safepoint logic -doesn't handle a blocking memory mapped read particularly well). - -Using these logs you can even get a pause distribution with something like -`histogram.py `_:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py - # NumSamples = 410293; Min = 0.00; Max = 11.49 - # Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498 - # each ∎ represents a count of 5470 - 0.0001 - 1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ - 1.1496 - 2.2991 [ 15]: - 2.2991 - 3.4486 [ 5]: - 3.4486 - 4.5981 [ 1]: - 4.5981 - 5.7475 [ 5]: - 5.7475 - 6.8970 [ 9]: - 6.8970 - 8.0465 [ 1]: - 8.0465 - 9.1960 [ 0]: - 9.1960 - 10.3455 [ 0]: - 10.3455 - 11.4949 [ 2]: - -We can see in this case while we have very good average performance something -is causing multi second JVM pauses ... In this case it was mostly safepoint -pauses caused by slow disks:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current| sort -k 1 - 2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds - 2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds - 2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds - 2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds - 2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds - 2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds - 2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds - 2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds - 2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds - 2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds - -Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as `GCViewer -`_ which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -`200ms` and GC throughput greater than `99%` (ymmv). - -Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues. - - -Getting More Information ------------------------- - -If the default logging levels are insuficient, ``nodetool`` can set higher -or lower logging levels for various packages and classes using the -``nodetool setlogginglevel`` command. Start by viewing the current levels:: - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - -Perhaps the ``Gossiper`` is acting up and we wish to enable it at ``TRACE`` -level for even more insight:: - - - $ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - org.apache.cassandra.gms.Gossiper TRACE - - $ grep TRACE debug.log | tail -2 - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating - heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ... - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local - heartbeat version 2341 greater than 2340 for 127.0.0.1:7000 - - -Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -``logback.xml``. - -.. code-block:: diff - - diff --git a/conf/logback.xml b/conf/logback.xml - index b2c5b10..71b0a49 100644 - --- a/conf/logback.xml - +++ b/conf/logback.xml - @@ -98,4 +98,5 @@ appender reference in the root level section below. - - - - + - - -Full Query Logger -^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -``nodetool`` and the logs are read with the provided ``bin/fqltool`` utility:: - - $ mkdir /var/tmp/fql_logs - $ nodetool enablefullquerylog --path /var/tmp/fql_logs - - # ... do some querying - - $ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail - Query time: 1530750927224 - Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = - 'system_views' AND table_name = 'sstable_tasks'; - Values: - - Type: single - Protocol version: 4 - Query time: 1530750934072 - Query: select * from keyspace1.standard1 ; - Values: - - $ nodetool disablefullquerylog - -Note that if you want more information than this tool provides, there are other -live capture options available such as :ref:`packet capture `. diff --git a/src/doc/4.0-alpha2/_sources/troubleshooting/use_nodetool.rst.txt b/src/doc/4.0-alpha2/_sources/troubleshooting/use_nodetool.rst.txt deleted file mode 100644 index 5072f85d1..000000000 --- a/src/doc/4.0-alpha2/_sources/troubleshooting/use_nodetool.rst.txt +++ /dev/null @@ -1,245 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-nodetool: - -Use Nodetool -============ - -Cassandra's ``nodetool`` allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see ``nodetool help`` -for all the commands), but briefly some of the most useful for troubleshooting: - -.. _nodetool-status: - -Cluster Status --------------- - -You can use ``nodetool status`` to assess status of the cluster:: - - $ nodetool status - - Datacenter: dc1 - ======================= - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - UN 127.0.1.1 4.69 GiB 1 100.0% 35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e r1 - UN 127.0.1.2 4.71 GiB 1 100.0% 752e278f-b7c5-4f58-974b-9328455af73f r2 - UN 127.0.1.3 4.69 GiB 1 100.0% 9dc1a293-2cc0-40fa-a6fd-9e6054da04a7 r3 - -In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all "up". The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -``nodetool status`` on multiple nodes in a cluster to see the full view. - -You can use ``nodetool status`` plus a little grep to see which nodes are -down:: - - $ nodetool status | grep -v '^UN' - Datacenter: dc1 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - Datacenter: dc2 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - DN 127.0.0.5 105.73 KiB 1 33.3% df303ac7-61de-46e9-ac79-6e630115fd75 r1 - -In this case there are two datacenters and there is one node down in datacenter -``dc2`` and rack ``r1``. This may indicate an issue on ``127.0.0.5`` -warranting investigation. - -.. _nodetool-proxyhistograms: - -Coordinator Query Latency -------------------------- -You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using ``nodetool proxyhistograms``:: - - $ nodetool proxyhistograms - Percentile Read Latency Write Latency Range Latency CAS Read Latency CAS Write Latency View Write Latency - (micros) (micros) (micros) (micros) (micros) (micros) - 50% 454.83 219.34 0.00 0.00 0.00 0.00 - 75% 545.79 263.21 0.00 0.00 0.00 0.00 - 95% 654.95 315.85 0.00 0.00 0.00 0.00 - 98% 785.94 379.02 0.00 0.00 0.00 0.00 - 99% 3379.39 2346.80 0.00 0.00 0.00 0.00 - Min 42.51 105.78 0.00 0.00 0.00 0.00 - Max 25109.16 43388.63 0.00 0.00 0.00 0.00 - -Here you can see the full latency distribution of reads, writes, range requests -(e.g. ``select * from keyspace.table``), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds). - -.. _nodetool-tablehistograms: - -Local Query Latency -------------------- - -If you know which table is having latency/error issues, you can use -``nodetool tablehistograms`` to get a better idea of what is happening -locally on a node:: - - $ nodetool tablehistograms keyspace table - Percentile SSTables Write Latency Read Latency Partition Size Cell Count - (micros) (micros) (bytes) - 50% 0.00 73.46 182.79 17084 103 - 75% 1.00 88.15 315.85 17084 103 - 95% 2.00 126.93 545.79 17084 103 - 98% 2.00 152.32 654.95 17084 103 - 99% 2.00 182.79 785.94 17084 103 - Min 0.00 42.51 24.60 14238 87 - Max 2.00 12108.97 17436.92 17084 103 - -This shows you percentile breakdowns particularly critical metrics. - -The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. ``SizeTieredCompactionStrategy`` typically has many more reads -per read than ``LeveledCompactionStrategy`` does for update heavy workloads. - -The second column shows you a latency breakdown of *local* write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments. - -The third column shows you a latency breakdown of *local* read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read. - -The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it's read. - -.. _nodetool-tpstats: - -Threadpool State ----------------- - -You can use ``nodetool tpstats`` to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:: - - $ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 2 0 12 0 0 - MiscStage 0 0 0 0 0 - CompactionExecutor 0 0 1940 0 0 - MutationStage 0 0 0 0 0 - GossipStage 0 0 10293 0 0 - Repair-Task 0 0 0 0 0 - RequestResponseStage 0 0 16 0 0 - ReadRepairStage 0 0 0 0 0 - CounterMutationStage 0 0 0 0 0 - MemtablePostFlush 0 0 83 0 0 - ValidationExecutor 0 0 0 0 0 - MemtableFlushWriter 0 0 30 0 0 - ViewMutationStage 0 0 0 0 0 - CacheCleanupExecutor 0 0 0 0 0 - MemtableReclaimMemory 0 0 30 0 0 - PendingRangeCalculator 0 0 11 0 0 - SecondaryIndexManagement 0 0 0 0 0 - HintsDispatcher 0 0 0 0 0 - Native-Transport-Requests 0 0 192 0 0 - MigrationStage 0 0 14 0 0 - PerDiskMemtableFlushWriter_0 0 0 30 0 0 - Sampler 0 0 0 0 0 - ViewBuildExecutor 0 0 0 0 0 - InternalResponseStage 0 0 0 0 0 - AntiEntropyStage 0 0 0 0 0 - - Message type Dropped Latency waiting in queue (micros) - 50% 95% 99% Max - READ 0 N/A N/A N/A N/A - RANGE_SLICE 0 0.00 0.00 0.00 0.00 - _TRACE 0 N/A N/A N/A N/A - HINT 0 N/A N/A N/A N/A - MUTATION 0 N/A N/A N/A N/A - COUNTER_MUTATION 0 N/A N/A N/A N/A - BATCH_STORE 0 N/A N/A N/A N/A - BATCH_REMOVE 0 N/A N/A N/A N/A - REQUEST_RESPONSE 0 0.00 0.00 0.00 0.00 - PAGED_RANGE 0 N/A N/A N/A N/A - READ_REPAIR 0 N/A N/A N/A N/A - -This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the ``RequestResponseState`` queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ``ALL`` ties up RF -``RequestResponseState`` threads whereas ``LOCAL_ONE`` only uses a single -thread in the ``ReadStage`` threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the ``concurrent_compactors`` or ``compaction_throughput`` options. - -The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation. - -.. _nodetool-compactionstats: - -Compaction State ----------------- - -As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS `page cache `_, -and can put a lot of load on your disk drives. There are great -:ref:`os tools ` to determine if this is the case, but often it's a -good idea to check if compactions are even running using -``nodetool compactionstats``:: - - $ nodetool compactionstats - pending tasks: 2 - - keyspace.table: 2 - - id compaction type keyspace table completed total unit progress - 2062b290-7f3a-11e8-9358-cd941b956e60 Compaction keyspace table 21848273 97867583 bytes 22.32% - Active compaction remaining time : 0h00m04s - -In this case there is a single compaction running on the ``keyspace.table`` -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass ``-H`` to get the units in a human readable format. - -Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don't take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra's ``concurrent_compactors`` -or ``compaction_throughput`` options. diff --git a/src/doc/4.0-alpha2/_sources/troubleshooting/use_tools.rst.txt b/src/doc/4.0-alpha2/_sources/troubleshooting/use_tools.rst.txt deleted file mode 100644 index b1347cc6d..000000000 --- a/src/doc/4.0-alpha2/_sources/troubleshooting/use_tools.rst.txt +++ /dev/null @@ -1,542 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-os-tools: - -Diving Deep, Use External Tools -=============================== - -Machine access allows operators to dive even deeper than logs and ``nodetool`` -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes. - -JVM Tooling ------------ -The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks. - -**NOTE**: There are two common gotchas with JVM tooling and Cassandra: - -1. By default Cassandra ships with ``-XX:+PerfDisableSharedMem`` set to prevent - long pauses (see ``CASSANDRA-9242`` and ``CASSANDRA-9483`` for details). If - you want to use JVM tooling you can instead have ``/tmp`` mounted on an in - memory ``tmpfs`` which also effectively works around ``CASSANDRA-9242``. -2. Make sure you run the tools as the same user as Cassandra is running as, - e.g. if the database is running as ``cassandra`` the tool also has to be - run as ``cassandra``, e.g. via ``sudo -u cassandra ``. - -Garbage Collection State (jstat) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you suspect heap pressure you can use ``jstat`` to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):: - - - jstat -gcutil 500ms - S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - 0.00 0.00 81.53 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.94 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - -In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies. - -Thread Information (jstack) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To get a point in time snapshot of exactly what Cassandra is doing, run -``jstack`` against the Cassandra PID. **Note** that this does pause the JVM for -a very brief period (<20ms).:: - - $ jstack > threaddump - - # display the threaddump - $ cat threaddump - ... - - # look at runnable threads - $grep RUNNABLE threaddump -B 1 - "Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000] - java.lang.Thread.State: RUNNABLE - -- - "Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - ... - - # Note that the nid is the Linux thread id - -Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on. - -Basic OS Tooling ----------------- -A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of: - -* CPU cores. For executing concurrent user queries -* CPU processing time. For query activity (data decompression, row merging, - etc...) -* CPU processing time (low priority). For background tasks (compaction, - streaming, etc ...) -* RAM for Java Heap. Used to hold internal data-structures and by default the - Cassandra memtables. Heap space is a crucial component of write performance - as well as generally. -* RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS - disk cache is a crucial component of read performance. -* Disks. Cassandra cares a lot about disk read latency, disk write throughput, - and of course disk space. -* Network latency. Cassandra makes many internode requests, so network latency - between nodes can directly impact performance. -* Network throughput. Cassandra (as other databases) frequently have the - so called "incast" problem where a small request (e.g. ``SELECT * from - foo.bar``) returns a massively large result set (e.g. the entire dataset). - In such situations outgoing bandwidth is crucial. - -Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource. - -High Level Resource Usage (top/htop) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra makes signifiant use of system resources, and often the very first -useful action is to run ``top`` or ``htop`` (`website -`_)to see the state of the machine. - -Useful things to look at: - -* System load levels. While these numbers can be confusing, generally speaking - if the load average is greater than the number of CPU cores, Cassandra - probably won't have very good (sub 100 millisecond) latencies. See - `Linux Load Averages `_ - for more information. -* CPU utilization. ``htop`` in particular can help break down CPU utilization - into ``user`` (low and normal priority), ``system`` (kernel), and ``io-wait`` - . Cassandra query threads execute as normal priority ``user`` threads, while - compaction threads execute as low priority ``user`` threads. High ``system`` - time could indicate problems like thread contention, and high ``io-wait`` - may indicate slow disk drives. This can help you understand what Cassandra - is spending processing resources doing. -* Memory usage. Look for which programs have the most resident memory, it is - probably Cassandra. The number for Cassandra is likely inaccurately high due - to how Linux (as of 2018) accounts for memory mapped file memory. - -.. _os-iostat: - -IO Usage (iostat) -^^^^^^^^^^^^^^^^^ -Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:: - - $ sudo iostat -xdm 2 - Linux 4.13.0-13-generic (hostname) 07/03/2018 _x86_64_ (8 CPU) - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.28 0.32 5.42 0.01 0.13 48.55 0.01 2.21 0.26 2.32 0.64 0.37 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 79.34 0.00 0.20 0.20 0.00 0.16 0.00 - sdc 0.34 0.27 0.76 0.36 0.01 0.02 47.56 0.03 26.90 2.98 77.73 9.21 1.03 - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.00 2.00 32.00 0.01 4.04 244.24 0.54 16.00 0.00 17.00 1.06 3.60 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - sdc 0.00 24.50 0.00 114.00 0.00 11.62 208.70 5.56 48.79 0.00 48.79 1.12 12.80 - - -In this case we can see that ``/dev/sdc1`` is a very slow drive, having an -``await`` close to 50 milliseconds and an ``avgqu-sz`` close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user. - -Important metrics to assess using iostat: - -* Reads and writes per second. These numbers will change with the workload, - but generally speaking the more reads Cassandra has to do from disk the - slower Cassandra read latencies are. Large numbers of reads per second - can be a dead giveaway that the cluster has insufficient memory for OS - page caching. -* Write throughput. Cassandra's LSM model defers user writes and batches them - together, which means that throughput to the underlying medium is the most - important write metric for Cassandra. -* Read latency (``r_await``). When Cassandra missed the OS page cache and reads - from SSTables, the read latency directly determines how fast Cassandra can - respond with the data. -* Write latency. Cassandra is less sensitive to write latency except when it - syncs the commit log. This typically enters into the very high percentiles of - write latency. - -Note that to get detailed latency breakdowns you will need a more advanced -tool such as :ref:`bcc-tools `. - -OS page Cache Usage -^^^^^^^^^^^^^^^^^^^ -As Cassandra makes heavy use of memory mapped files, the health of the -operating system's `Page Cache `_ is -crucial to performance. Start by finding how much available cache is in the -system:: - - $ free -g - total used free shared buff/cache available - Mem: 15 9 2 0 3 5 - Swap: 0 0 0 - -In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap. - -If you suspect that you are missing the OS page cache frequently you can use -advanced tools like :ref:`cachestat ` or -:ref:`vmtouch ` to dive deeper. - -Network Latency and Reliability -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Whenever Cassandra does writes or reads that involve other replicas, -``LOCAL_QUORUM`` reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ``ping`` and ``traceroute`` or most -effectively ``mtr``:: - - $ mtr -nr www.google.com - Start: Sun Jul 22 13:10:28 2018 - HOST: hostname Loss% Snt Last Avg Best Wrst StDev - 1.|-- 192.168.1.1 0.0% 10 2.0 1.9 1.1 3.7 0.7 - 2.|-- 96.123.29.15 0.0% 10 11.4 11.0 9.0 16.4 1.9 - 3.|-- 68.86.249.21 0.0% 10 10.6 10.7 9.0 13.7 1.1 - 4.|-- 162.141.78.129 0.0% 10 11.5 10.6 9.6 12.4 0.7 - 5.|-- 162.151.78.253 0.0% 10 10.9 12.1 10.4 20.2 2.8 - 6.|-- 68.86.143.93 0.0% 10 12.4 12.6 9.9 23.1 3.8 - 7.|-- 96.112.146.18 0.0% 10 11.9 12.4 10.6 15.5 1.6 - 9.|-- 209.85.252.250 0.0% 10 13.7 13.2 12.5 13.9 0.0 - 10.|-- 108.170.242.238 0.0% 10 12.7 12.4 11.1 13.0 0.5 - 11.|-- 74.125.253.149 0.0% 10 13.4 13.7 11.8 19.2 2.1 - 12.|-- 216.239.62.40 0.0% 10 13.4 14.7 11.5 26.9 4.6 - 13.|-- 108.170.242.81 0.0% 10 14.4 13.2 10.9 16.0 1.7 - 14.|-- 72.14.239.43 0.0% 10 12.2 16.1 11.0 32.8 7.1 - 15.|-- 216.58.195.68 0.0% 10 25.1 15.3 11.1 25.1 4.8 - -In this example of ``mtr``, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between ``200ms`` and ``3s`` of additional latency, so that -can be a common cause of latency issues. - -Network Throughput -^^^^^^^^^^^^^^^^^^ -As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is `iftop `_ which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ``ccm`` cluster:: - - $ # remove the -t for ncurses instead of pure text - $ sudo iftop -nNtP -i lo - interface: lo - IP address is: 127.0.0.1 - MAC address is: 00:00:00:00:00:00 - Listening on lo - # Host name (port/service if enabled) last 2s last 10s last 40s cumulative - -------------------------------------------------------------------------------------------- - 1 127.0.0.1:58946 => 869Kb 869Kb 869Kb 217KB - 127.0.0.3:9042 <= 0b 0b 0b 0B - 2 127.0.0.1:54654 => 736Kb 736Kb 736Kb 184KB - 127.0.0.1:9042 <= 0b 0b 0b 0B - 3 127.0.0.1:51186 => 669Kb 669Kb 669Kb 167KB - 127.0.0.2:9042 <= 0b 0b 0b 0B - 4 127.0.0.3:9042 => 3.30Kb 3.30Kb 3.30Kb 845B - 127.0.0.1:58946 <= 0b 0b 0b 0B - 5 127.0.0.1:9042 => 2.79Kb 2.79Kb 2.79Kb 715B - 127.0.0.1:54654 <= 0b 0b 0b 0B - 6 127.0.0.2:9042 => 2.54Kb 2.54Kb 2.54Kb 650B - 127.0.0.1:51186 <= 0b 0b 0b 0B - 7 127.0.0.1:36894 => 1.65Kb 1.65Kb 1.65Kb 423B - 127.0.0.5:7000 <= 0b 0b 0b 0B - 8 127.0.0.1:38034 => 1.50Kb 1.50Kb 1.50Kb 385B - 127.0.0.2:7000 <= 0b 0b 0b 0B - 9 127.0.0.1:56324 => 1.50Kb 1.50Kb 1.50Kb 383B - 127.0.0.1:7000 <= 0b 0b 0b 0B - 10 127.0.0.1:53044 => 1.43Kb 1.43Kb 1.43Kb 366B - 127.0.0.4:7000 <= 0b 0b 0b 0B - -------------------------------------------------------------------------------------------- - Total send rate: 2.25Mb 2.25Mb 2.25Mb - Total receive rate: 0b 0b 0b - Total send and receive rate: 2.25Mb 2.25Mb 2.25Mb - -------------------------------------------------------------------------------------------- - Peak rate (sent/received/total): 2.25Mb 0b 2.25Mb - Cumulative (sent/received/total): 576KB 0B 576KB - ============================================================================================ - -In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring. - -Advanced tools --------------- -Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy. - -.. _use-bcc-tools: - -bcc-tools -^^^^^^^^^ -Most modern Linux distributions (kernels newer than ``4.1``) support `bcc-tools -`_ for diving deep into performance problems. -First install ``bcc-tools``, e.g. via ``apt`` on Debian:: - - $ apt install bcc-tools - -Then you can use all the tools that ``bcc-tools`` contains. One of the most -useful tools is ``cachestat`` -(`cachestat examples `_) -which allows you to determine exactly how many OS page cache hits and misses -are happening:: - - $ sudo /usr/share/bcc/tools/cachestat -T 1 - TIME TOTAL MISSES HITS DIRTIES BUFFERS_MB CACHED_MB - 18:44:08 66 66 0 64 88 4427 - 18:44:09 40 40 0 75 88 4427 - 18:44:10 4353 45 4308 203 88 4427 - 18:44:11 84 77 7 13 88 4428 - 18:44:12 2511 14 2497 14 88 4428 - 18:44:13 101 98 3 18 88 4428 - 18:44:14 16741 0 16741 58 88 4428 - 18:44:15 1935 36 1899 18 88 4428 - 18:44:16 89 34 55 18 88 4428 - -In this case there are not too many page cache ``MISSES`` which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node's "hot" dataset. If you don't have enough cache, ``MISSES`` will -be high and performance will be slow. If you have enough cache, ``MISSES`` will -be low and performance will be fast (as almost all reads are being served out -of memory). - -You can also measure disk latency distributions using ``biolatency`` -(`biolatency examples `_) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:: - - $ sudo /usr/share/bcc/tools/biolatency -D 10 - Tracing block device I/O... Hit Ctrl-C to end. - - - disk = 'sda' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 12 |****************************************| - 32 -> 63 : 9 |****************************** | - 64 -> 127 : 1 |*** | - 128 -> 255 : 3 |********** | - 256 -> 511 : 7 |*********************** | - 512 -> 1023 : 2 |****** | - - disk = 'sdc' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 0 | | - 32 -> 63 : 0 | | - 64 -> 127 : 41 |************ | - 128 -> 255 : 17 |***** | - 256 -> 511 : 13 |*** | - 512 -> 1023 : 2 | | - 1024 -> 2047 : 0 | | - 2048 -> 4095 : 0 | | - 4096 -> 8191 : 56 |***************** | - 8192 -> 16383 : 131 |****************************************| - 16384 -> 32767 : 9 |** | - -In this case most ios on the data drive (``sdc``) are fast, but many take -between 8 and 16 milliseconds. - -Finally ``biosnoop`` (`examples `_) -can be used to dive even deeper and see per IO latencies:: - - $ sudo /usr/share/bcc/tools/biosnoop | grep java | head - 0.000000000 java 17427 sdc R 3972458600 4096 13.58 - 0.000818000 java 17427 sdc R 3972459408 4096 0.35 - 0.007098000 java 17416 sdc R 3972401824 4096 5.81 - 0.007896000 java 17416 sdc R 3972489960 4096 0.34 - 0.008920000 java 17416 sdc R 3972489896 4096 0.34 - 0.009487000 java 17427 sdc R 3972401880 4096 0.32 - 0.010238000 java 17416 sdc R 3972488368 4096 0.37 - 0.010596000 java 17427 sdc R 3972488376 4096 0.34 - 0.011236000 java 17410 sdc R 3972488424 4096 0.32 - 0.011825000 java 17427 sdc R 3972488576 16384 0.65 - ... time passes - 8.032687000 java 18279 sdc R 10899712 122880 3.01 - 8.033175000 java 18279 sdc R 10899952 8192 0.46 - 8.073295000 java 18279 sdc R 23384320 122880 3.01 - 8.073768000 java 18279 sdc R 23384560 8192 0.46 - - -With ``biosnoop`` you see every single IO and how long they take. This data -can be used to construct the latency distributions in ``biolatency`` but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (``128kb``) of ``read_ahead_kb``. To improve point read -performance you may may want to decrease ``read_ahead_kb`` on fast data volumes -such as SSDs while keeping the a higher value like ``128kb`` value is probably -right for HDs. There are tradeoffs involved, see `queue-sysfs -`_ docs for more -information, but regardless ``biosnoop`` is useful for understanding *how* -Cassandra uses drives. - -.. _use-vmtouch: - -vmtouch -^^^^^^^ -Sometimes it's useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -`vmtouch `_. - -First install it:: - - $ git clone https://github.com/hoytech/vmtouch.git - $ cd vmtouch - $ make - -Then run it on the Cassandra data directory:: - - $ ./vmtouch /var/lib/cassandra/data/ - Files: 312 - Directories: 92 - Resident Pages: 62503/64308 244M/251M 97.2% - Elapsed: 0.005657 seconds - -In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn't really matter unless reads are missing the -cache (per e.g. :ref:`cachestat `), in which case having -additional memory may help read performance. - -CPU Flamegraphs -^^^^^^^^^^^^^^^ -Cassandra often uses a lot of CPU, but telling *what* it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -`CPU Flamegraphs `_ -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a "compaction problem dropping -tombstones" or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -`Java Flamegraphs -`_. - -Generally: - -1. Enable the ``-XX:+PreserveFramePointer`` option in Cassandra's - ``jvm.options`` configuation file. This has a negligible performance impact - but allows you actually see what Cassandra is doing. -2. Run ``perf`` to get some data. -3. Send that data through the relevant scripts in the FlameGraph toolset and - convert the data into a pretty flamegraph. View the resulting SVG image in - a browser or other image browser. - -For example just cloning straight off github we first install the -``perf-map-agent`` to the location of our JVMs (assumed to be -``/usr/lib/jvm``):: - - $ sudo bash - $ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/ - $ cd /usr/lib/jvm - $ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent - $ cd perf-map-agent - $ cmake . - $ make - -Now to get a flamegraph:: - - $ git clone --depth=1 https://github.com/brendangregg/FlameGraph - $ sudo bash - $ cd FlameGraph - $ # Record traces of Cassandra and map symbols for all java processes - $ perf record -F 49 -a -g -p -- sleep 30; ./jmaps - $ # Translate the data - $ perf script > cassandra_stacks - $ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \ - ./flamegraph.pl --color=java --hash > cassandra_flames.svg - - -The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser. - -.. _packet-capture: - -Packet Capture -^^^^^^^^^^^^^^ -Sometimes you have to understand what queries a Cassandra node is performing -*right now* to troubleshoot an issue. For these times trusty packet capture -tools like ``tcpdump`` and `Wireshark -`_ can be very helpful to dissect packet captures. -Wireshark even has native `CQL support -`_ although it sometimes has -compatibility issues with newer Cassandra protocol releases. - -To get a packet capture first capture some packets:: - - $ sudo tcpdump -U -s0 -i -w cassandra.pcap -n "tcp port 9042" - -Now open it up with wireshark:: - - $ wireshark cassandra.pcap - -If you don't see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> ``Decode as`` -> select CQL from the -dropdown for port 9042. - -If you don't want to do this manually or use a GUI, you can also use something -like `cqltrace `_ to ease obtaining and -parsing CQL packet captures. diff --git a/src/doc/4.0-alpha2/_static/ajax-loader.gif b/src/doc/4.0-alpha2/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/4.0-alpha2/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/basic.css b/src/doc/4.0-alpha2/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/4.0-alpha2/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/4.0-alpha2/_static/comment-bright.png b/src/doc/4.0-alpha2/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/4.0-alpha2/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/comment-close.png b/src/doc/4.0-alpha2/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/4.0-alpha2/_static/comment-close.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/comment.png b/src/doc/4.0-alpha2/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/4.0-alpha2/_static/comment.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/doctools.js b/src/doc/4.0-alpha2/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/4.0-alpha2/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/4.0-alpha2/_static/documentation_options.js b/src/doc/4.0-alpha2/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/4.0-alpha2/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/4.0-alpha2/_static/down-pressed.png b/src/doc/4.0-alpha2/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/4.0-alpha2/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/down.png b/src/doc/4.0-alpha2/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/4.0-alpha2/_static/down.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/extra.css b/src/doc/4.0-alpha2/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/4.0-alpha2/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/4.0-alpha2/_static/file.png b/src/doc/4.0-alpha2/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/4.0-alpha2/_static/file.png and /dev/null differ diff --git a/src/doc/4.0-alpha2/_static/jquery-3.2.1.js b/src/doc/4.0-alpha2/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/4.0-alpha2/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-

Transient Replication

-

Transient replication allows you to configure a subset of replicas to only replicate data that hasn’t been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn’t been incrementally repaired.

-

To use transient replication, you first need to enable it in cassandra.yaml. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as <total_replicas>/<transient_replicas Both SimpleStrategy and NetworkTopologyStrategy support configuring transient -replication.

-

Transiently replicated keyspaces only support tables created with read_repair set to NONE and monotonic reads are not currently supported. -You also can’t use LWT, logged batches, and counters in 4.0. You will possibly never be able to use materialized views -with transiently replicated keyspaces and probably never be able to use 2i with them.

-

Transient replication is an experimental feature that may not be ready for production use. The expected audienced is experienced -users of Cassandra capable of fully validating a deployment of their particular application. That means being able check -that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, -configuration, operational practices, and availability requirements.

-

It is anticipated that 4.next will support monotonic reads with transient replication as well as LWT, logged batches, and -counters.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level, with one exception. Speculative retry may issue a redundant read request to an extra replica if the other replicas -have not responded within a specified time window.

-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/architecture/guarantees.html b/src/doc/4.0-alpha2/architecture/guarantees.html deleted file mode 100644 index 9cff7c1ad..000000000 --- a/src/doc/4.0-alpha2/architecture/guarantees.html +++ /dev/null @@ -1,114 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
- -
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/architecture/index.html b/src/doc/4.0-alpha2/architecture/index.html deleted file mode 100644 index f05af8c4d..000000000 --- a/src/doc/4.0-alpha2/architecture/index.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/architecture/overview.html b/src/doc/4.0-alpha2/architecture/overview.html deleted file mode 100644 index 2a2b88ec7..000000000 --- a/src/doc/4.0-alpha2/architecture/overview.html +++ /dev/null @@ -1,114 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/architecture/storage_engine.html b/src/doc/4.0-alpha2/architecture/storage_engine.html deleted file mode 100644 index 37535e399..000000000 --- a/src/doc/4.0-alpha2/architecture/storage_engine.html +++ /dev/null @@ -1,293 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables.

-

All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the “commitlog_segment_size_in_mb” option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running “nodetool drain” before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup.

-
    -
  • commitlog_segment_size_in_mb: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
  • -
-

*NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*

-

Default Value: 32

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied.

-
    -
  • commitlog_sync: may be either “periodic” or “batch.”

    -
      -
    • batch: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait “commitlog_sync_batch_window_in_ms” milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason.

      -
        -
      • commitlog_sync_batch_window_in_ms: Time to wait between “batch” fsyncs
      • -
      -

      Default Value: 2

      -
    • -
    • periodic: In periodic mode, writes are immediately ack’ed, and the CommitLog is simply synced every “commitlog_sync_period_in_ms” milliseconds.

      -
        -
      • commitlog_sync_period_in_ms: Time to wait between “periodic” fsyncs
      • -
      -

      Default Value: 10000

      -
    • -
    -
  • -
-

Default Value: batch

-

* NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using “batch” mode, it is recommended to store commitlogs in a separate, dedicated device.

-
    -
  • commitlog_directory: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
  • -
-

Default Value: /var/lib/cassandra/commitlog

-
    -
  • commitlog_compression: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported.
  • -
-

(Default Value: (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
    -
  • commitlog_total_space_in_mb: Total space to use for commit logs on disk.
  • -
-

If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume.

-

Default Value: 8192

-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
-

SSTable Versions

-

This section was created using the following -gist -which utilized this original -source.

-

The version numbers, to date are:

-
-

Version 0

-
    -
  • b (0.7.0): added version to sstable filenames
  • -
  • c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings
  • -
  • d (0.7.0): row size in data component becomes a long instead of int
  • -
  • e (0.7.0): stores undecorated keys in data and index components
  • -
  • f (0.7.0): switched bloom filter implementations in data component
  • -
  • g (0.8): tracks flushed-at context in metadata component
  • -
-
-
-

Version 1

-
    -
  • h (1.0): tracks max client timestamp in metadata component
  • -
  • hb (1.0.3): records compression ration in metadata component
  • -
  • hc (1.0.4): records partitioner in metadata component
  • -
  • hd (1.0.10): includes row tombstones in maxtimestamp
  • -
  • he (1.1.3): includes ancestors generation in metadata component
  • -
  • hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782)
  • -
  • ia (1.2.0):
      -
    • column indexes are promoted to the index file
    • -
    • records estimated histogram of deletion times in tombstones
    • -
    • bloom filter (keys and columns) upgraded to Murmur3
    • -
    -
  • -
  • ib (1.2.1): tracks min client timestamp in metadata component
  • -
  • ic (1.2.5): omits per-row bloom filter of column names
  • -
-
-
-

Version 2

-
    -
  • ja (2.0.0):
      -
    • super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format)
    • -
    • tracks max local deletiontime in sstable metadata
    • -
    • records bloom_filter_fp_chance in metadata component
    • -
    • remove data size and column count from data file (CASSANDRA-4180)
    • -
    • tracks max/min column values (according to comparator)
    • -
    -
  • -
  • jb (2.0.1):
      -
    • switch from crc32 to adler32 for compression checksums
    • -
    • checksum the compressed data
    • -
    -
  • -
  • ka (2.1.0):
      -
    • new Statistics.db file format
    • -
    • index summaries can be downsampled and the sampling level is persisted
    • -
    • switch uncompressed checksums to adler32
    • -
    • tracks presense of legacy (local and remote) counter shards
    • -
    -
  • -
  • la (2.2.0): new file name format
  • -
  • lb (2.2.7): commit log lower bound included
  • -
-
-
-

Version 3

-
    -
  • ma (3.0.0):
      -
    • swap bf hash order
    • -
    • store rows natively
    • -
    -
  • -
  • mb (3.0.7, 3.7): commit log lower bound included
  • -
  • mc (3.0.8, 3.9): commit log intervals included
  • -
-
-
-

Example Code

-

The following example is useful for finding all sstables that do not match the “ib” SSTable version

-
find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots"
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/bugs.html b/src/doc/4.0-alpha2/bugs.html deleted file mode 100644 index c67aac28e..000000000 --- a/src/doc/4.0-alpha2/bugs.html +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the cassandra Slack room.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Contributing to Cassandra section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/configuration/cassandra_config_file.html b/src/doc/4.0-alpha2/configuration/cassandra_config_file.html deleted file mode 100644 index 273006343..000000000 --- a/src/doc/4.0-alpha2/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1957 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Replica factor is determined via the replication strategy used by the specified -keyspace.

-

Default Value: KEYSPACE

-
-
-

allocate_tokens_for_local_replication_factor

-

This option is commented out by default.

-

Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS.

-

Default Value: 3

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

network_authorizer

-

Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}.

-
    -
  • AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization.
  • -
  • CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllNetworkAuthorizer

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using.

-

The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down the node and kill the JVM, so the node can be replaced.
-
stop
-
shut down the node, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

commitlog_sync may be either “periodic”, “group”, or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed.

-

Default Value: 2

-
-
-

commitlog_sync_group_window_in_ms

-

This option is commented out by default.

-

group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes.

-

Default Value: 1000

-
-
-

commitlog_sync

-

the default option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

periodic_commitlog_sync_lag_block_in_ms

-

This option is commented out by default.

-

When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete.

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1:7000"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_space_in_mb

-

This option is commented out by default.

-

Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_frame_block_size_in_kb

-

This option is commented out by default.

-

If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed.

-

Default Value: 32

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_allow_older_protocols

-

Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored.

-

Default Value: true

-
-
-

native_transport_idle_timeout_in_ms

-

This option is commented out by default.

-

Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period.

-

Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side.

-

Idle connection timeouts are disabled by default.

-

Default Value: 60000

-
-
-

rpc_address

-

The address or interface to bind the native transport server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

concurrent_validations

-

This option is commented out by default.

-

Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default)

-

Default Value: 0

-
-
-

concurrent_materialized_view_builders

-

Number of simultaneous materialized view builder tasks to allow.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_entire_sstables

-

This option is commented out by default.

-

When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696.

-

Default Value: true

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms.

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms.

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

internode_application_send_queue_capacity_in_bytes

-

This option is commented out by default.

-

Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details.

-

The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000

-

The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000

-

The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000

-

Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received.

-

The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth.

-

The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_send_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_send_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

internode_application_receive_queue_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_receive_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_receive_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

streaming_connections_per_host

-

This option is commented out by default.

-

Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files).

-

Default Value: 1

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html

-

NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
# set to true for allowing secure incoming connections
-enabled: false
-# If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port
-optional: false
-# if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used
-# during upgrade to 4.0; otherwise, set to false.
-enable_legacy_ssl_storage_port: false
-# on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true.
-internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client-to-server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

gc_warn_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature.

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

ideal_consistency_level

-

This option is commented out by default.

-

Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability.

-

Default Value: EACH_QUORUM

-
-
-

full_query_log_dir

-

This option is commented out by default.

-

Path to write full query log data to when the full query log is enabled -The full query log will recrusively delete the contents of this path at -times. Don’t place links in this directory to other parts of the filesystem.

-

Default Value: /tmp/cassandrafullquerylog

-
-
-

automatic_sstable_upgrade

-

This option is commented out by default.

-

Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version

-

Default Value: false

-
-
-

max_concurrent_automatic_sstable_upgrades

-

This option is commented out by default. -Limit the number of concurrent sstable upgrades

-

Default Value: 1

-
-
-

audit_logging_options

-

Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options.

-
-
-

full_query_logging_options

-

This option is commented out by default.

-

default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog

-
-
-

corrupted_tombstone_strategy

-

This option is commented out by default.

-

validate tombstones on reads and compaction -can be either “disabled”, “warn” or “exception”

-

Default Value: disabled

-
-
-

diagnostic_events_enabled

-

Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX.

-

Default Value: false

-
-
-

native_transport_flush_in_batches_legacy

-

This option is commented out by default.

-

Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.

-

Default Value: false

-
-
-

repaired_data_tracking_for_range_reads_enabled

-

Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don’t use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads

-

Default Value: false

-
-
-

repaired_data_tracking_for_partition_reads_enabled

-

Default Value: false

-
-
-

report_unconfirmed_repaired_data_mismatches

-

If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones.

-

Default Value: false

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-

enable_transient_replication

-

Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use.

-

Default Value: false

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/configuration/index.html b/src/doc/4.0-alpha2/configuration/index.html deleted file mode 100644 index 5e1d1c618..000000000 --- a/src/doc/4.0-alpha2/configuration/index.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/contactus.html b/src/doc/4.0-alpha2/contactus.html deleted file mode 100644 index b023cb6dd..000000000 --- a/src/doc/4.0-alpha2/contactus.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or Slack rooms.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

Slack

-

To chat with developers or users in real-time, join our rooms on ASF Slack:

-
    -
  • cassandra - for user questions and general discussions.
  • -
  • cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/appendices.html b/src/doc/4.0-alpha2/cql/appendices.html deleted file mode 100644 index 93992428d..000000000 --- a/src/doc/4.0-alpha2/cql/appendices.html +++ /dev/null @@ -1,567 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/changes.html b/src/doc/4.0-alpha2/cql/changes.html deleted file mode 100644 index cf21101e3..000000000 --- a/src/doc/4.0-alpha2/cql/changes.html +++ /dev/null @@ -1,363 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.5

- -
-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

- -
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/ddl.html b/src/doc/4.0-alpha2/cql/ddl.html deleted file mode 100644 index a558fa70c..000000000 --- a/src/doc/4.0-alpha2/cql/ddl.html +++ /dev/null @@ -1,856 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-    AND durable_writes = false;
-
-
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
-

SimpleStrategy

-

A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -NetworkTopologyStrategy. SimpleStrategy supports a single mandatory argument:

- ------ - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'replication_factor'intallThe number of replicas to store per range
-
-
-

NetworkTopologyStrategy

-

A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options:

- ------ - - - - - - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'<datacenter>'intallThe number of replicas to store per range in -the provided datacenter.
'replication_factor'int4.0The number of replicas to use as a default -per datacenter if not specifically provided. -Note that this always defers to existing -definitions or explicit datacenter settings. -For example, to have three replicas per -datacenter, supply this with a value of 3.
-

Note that when ALTER ing keyspaces and supplying replication_factor, -auto-expansion will only add new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying replication_factor, -explicitly zero out the datacenter you want to have zero replicas.

-

An example of auto-expanding datacenters with two datacenters: DC1 and DC2:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true;
-
-
-

An example of auto-expanding and overriding a datacenter:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true;
-
-
-

An example that excludes a datacenter while using replication_factor:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ;
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
-
-
-

If transient replication has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format '<total_replicas>/<transient_replicas>'

-

For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-
-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records';
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, b, c)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage. It only exists for historical reason and is preserved for backward compatibility And as COMPACT -STORAGE cannot, as of Cassandra 4.0-alpha2, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
speculative_retrysimple99PERCENTILESpeculative retry options.
additional_write_policysimple99PERCENTILESpeculative retry options.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
memtable_flush_period_in_mssimple0Time (in ms) before Cassandra flushes memtables to disk.
read_repairsimpleBLOCKINGSets read repair behavior (see below)
-

By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ONE, a quorum for QUORUM, and so on. -speculative_retry determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. additional_write_policy specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive):

-

This setting does not affect reads with consistency level ALL because they already query all replicas.

-

Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default 99PERCENTILE.

-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-

The read_repair options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back -in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of -replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. -Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement -is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it -is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a -batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-

The default setting. When read_repair is set to BLOCKING, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table';
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/definitions.html b/src/doc/4.0-alpha2/cql/definitions.html deleted file mode 100644 index 08bce8f64..000000000 --- a/src/doc/4.0-alpha2/cql/definitions.html +++ /dev/null @@ -1,316 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term                 ::=  constant | literal | function_call | arithmetic_operation | type_hint | bind_marker
-literal              ::=  collection_literal | udt_literal | tuple_literal
-function_call        ::=  identifier '(' [ term (',' term)* ] ')'
-arithmetic_operation ::=  '-' term | term ('+' | '-' | '*' | '/' | '%') term
-type_hint            ::=  '(' cql_type `)` term
-bind_marker          ::=  '?' | ':' identifier
-
-

A term is thus one of:

- -
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/dml.html b/src/doc/4.0-alpha2/cql/dml.html deleted file mode 100644 index 55617ec29..000000000 --- a/src/doc/4.0-alpha2/cql/dml.html +++ /dev/null @@ -1,560 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/functions.html b/src/doc/4.0-alpha2/cql/functions.html deleted file mode 100644 index 3f12b487f..000000000 --- a/src/doc/4.0-alpha2/cql/functions.html +++ /dev/null @@ -1,705 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-

currentTimeUUID is an alias of now.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Datetime functions

-
-
Retrieving the current date/time
-

The following functions can be used to retrieve the date/time at the time where the function is invoked:

- ---- - - - - - - - - - - - - - - - - - - - -
Function nameOutput type
currentTimestamptimestamp
currentDatedate
currentTimetime
currentTimeUUIDtimeUUID
-

For example the last 2 days of data can be retrieved using:

-
SELECT * FROM myTable WHERE date >= currentDate() - 2d
-
-
-
-
-
Time conversion functions
-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/index.html b/src/doc/4.0-alpha2/cql/index.html deleted file mode 100644 index 4b2f1fcf7..000000000 --- a/src/doc/4.0-alpha2/cql/index.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL.

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/indexes.html b/src/doc/4.0-alpha2/cql/indexes.html deleted file mode 100644 index 40c4450ac..000000000 --- a/src/doc/4.0-alpha2/cql/indexes.html +++ /dev/null @@ -1,170 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/json.html b/src/doc/4.0-alpha2/cql/json.html deleted file mode 100644 index 9138635ae..000000000 --- a/src/doc/4.0-alpha2/cql/json.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/mvs.html b/src/doc/4.0-alpha2/cql/mvs.html deleted file mode 100644 index a08324175..000000000 --- a/src/doc/4.0-alpha2/cql/mvs.html +++ /dev/null @@ -1,260 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

Note

-

By default, materialized views are built in a single thread. The initial build can be parallelized by -increasing the number of threads specified by the property concurrent_materialized_view_builders in -cassandra.yaml. This property can also be manipulated at runtime through both JMX and the -setconcurrentviewbuilders and getconcurrentviewbuilders nodetool commands.

-
-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-

MV Limitations

-
-

Note

-

Removal of columns not selected in the Materialized View (via UPDATE base SET unselected_column = null or -DELETE unselected_column FROM base) may shadow missed updates to other columns received by hints or repair. -For this reason, we advise against doing deletions on base columns not selected in views until this is -fixed on CASSANDRA-13826.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/operators.html b/src/doc/4.0-alpha2/cql/operators.html deleted file mode 100644 index 583b4de62..000000000 --- a/src/doc/4.0-alpha2/cql/operators.html +++ /dev/null @@ -1,300 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Arithmetic Operators" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Arithmetic Operators

-

CQL supports the following operators:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - -
OperatorDescription
- (unary)Negates operand
+Addition
-Substraction
*Multiplication
/Division
%Returns the remainder of a division
-
-

Number Arithmetic

-

All arithmetic operations are supported on numeric types or counters.

-

The return type of the operation will be based on the operand types:

- ------------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
left/righttinyintsmallintintbigintcounterfloatdoublevarintdecimal
tinyinttinyintsmallintintbigintbigintfloatdoublevarintdecimal
smallintsmallintsmallintintbigintbigintfloatdoublevarintdecimal
intintintintbigintbigintfloatdoublevarintdecimal
bigintbigintbigintbigintbigintbigintdoubledoublevarintdecimal
counterbigintbigintbigintbigintbigintdoubledoublevarintdecimal
floatfloatfloatfloatdoubledoublefloatdoubledecimaldecimal
doubledoubledoubledoubledoubledoubledoubledoubledecimaldecimal
varintvarintvarintvarintdecimaldecimaldecimaldecimaldecimaldecimal
decimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimal
-

*, / and % operators have a higher precedence level than + and - operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression.

-
-
-

Datetime Arithmetic

-

A duration can be added (+) or substracted (-) from a timestamp or a date to create a new -timestamp or date. So for instance:

-
SELECT * FROM myTable WHERE t = '2017-01-01' - 2d
-
-
-

will select all the records with a value of t which is in the last 2 days of 2016.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/security.html b/src/doc/4.0-alpha2/cql/security.html deleted file mode 100644 index bb042ccb1..000000000 --- a/src/doc/4.0-alpha2/cql/security.html +++ /dev/null @@ -1,742 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-                          | ACCESS TO DATACENTERS set_literal
-                          | ACCESS TO ALL DATACENTERS
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS;
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ACCESS TO ALL DATACENTERS can be used for -explicitness, but there’s no functional difference.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ACCESS TO ALL DATACENTERS clause.

-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-

Note

-

DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain -connected and will retain the ability to perform any database actions which do not require authorization. -However, if authorization is enabled, permissions of the dropped role are also revoked, -subject to the caching options configured in cassandra.yaml. -Should a dropped role be subsequently recreated and have new permissions or -roles granted to it, any client sessions still connected will acquire the newly granted -permissions and roles.

-
-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-

Because of their function in normal driver operations, certain tables cannot have their SELECT permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:

-
* `system_schema.keyspaces`
-* `system_schema.columns`
-* `system_schema.tables`
-* `system.local`
-* `system.peers`
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/triggers.html b/src/doc/4.0-alpha2/cql/triggers.html deleted file mode 100644 index 13c1713f5..000000000 --- a/src/doc/4.0-alpha2/cql/triggers.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/cql/types.html b/src/doc/4.0-alpha2/cql/types.html deleted file mode 100644 index 4022f4d1d..000000000 --- a/src/doc/4.0-alpha2/cql/types.html +++ /dev/null @@ -1,699 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 4.0-alpha2, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/data_modeling/index.html b/src/doc/4.0-alpha2/data_modeling/index.html deleted file mode 100644 index bfd525810..000000000 --- a/src/doc/4.0-alpha2/data_modeling/index.html +++ /dev/null @@ -1,105 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Modeling

-
-

Todo

-

TODO

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/ci.html b/src/doc/4.0-alpha2/development/ci.html deleted file mode 100644 index 49d58c9da..000000000 --- a/src/doc/4.0-alpha2/development/ci.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Jenkins CI Environment" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Jenkins CI Environment

-
-

About CI testing and Apache Cassandra

-

Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the dtest scripts written in Python. As outlined in Testing, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at builds.apache.org, running Jenkins.

-
-
-

Setting up your own Jenkins server

-

Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution.

-

Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment.

-
-

Required plugins

-

The following plugins need to be installed additionally to the standard plugins (git, ant, ..).

-

You can install any missing plugins through the install manager.

-

Go to Manage Jenkins -> Manage Plugins -> Available and install the following plugins and respective dependencies:

-
    -
  • Job DSL
  • -
  • Javadoc Plugin
  • -
  • description setter plugin
  • -
  • Throttle Concurrent Builds Plug-in
  • -
  • Test stability history
  • -
  • Hudson Post build task
  • -
-
-
-

Setup seed job

-

Config New Item

-
    -
  • Name it Cassandra-Job-DSL
  • -
  • Select Freestyle project
  • -
-

Under Source Code Management select Git using the repository: https://github.com/apache/cassandra-builds

-

Under Build, confirm Add build step -> Process Job DSLs and enter at Look on Filesystem: jenkins-dsl/cassandra_job_dsl_seed.groovy

-

Generated jobs will be created based on the Groovy script’s default settings. You may want to override settings by checking This project is parameterized and add String Parameter for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches).

-

When done, confirm “Save”

-

You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message “Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use”. Goto Manage Jenkins -> In-process Script Approval to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates.

-

Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label “cassandra”, once the job is to be run. Please make sure to make any executors available by selecting Build Executor Status -> Configure -> Add “cassandra” as label and save.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/code_style.html b/src/doc/4.0-alpha2/development/code_style.html deleted file mode 100644 index 09312d30e..000000000 --- a/src/doc/4.0-alpha2/development/code_style.html +++ /dev/null @@ -1,214 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/dependencies.html b/src/doc/4.0-alpha2/development/dependencies.html deleted file mode 100644 index 147d569e0..000000000 --- a/src/doc/4.0-alpha2/development/dependencies.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Dependency Management" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Dependency Management

-

Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the Jenkins CI Environment and reported related issues on Jira/ML, in case of any project dependency changes.

-

As Cassandra is an Apache product, all included libraries must follow Apache’s software license requirements.

-
-

Required steps to add or update libraries

-
    -
  • Add or replace jar file in lib directory
  • -
  • Add or update lib/license files
  • -
  • Update dependencies in build.xml
      -
    • Add to parent-pom with correct version
    • -
    • Add to all-pom if simple Cassandra dependency (see below)
    • -
    -
  • -
-
-
-

POM file types

-
    -
  • parent-pom - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here.
  • -
  • build-deps-pom(-sources) + coverage-deps-pom - used by ant build compile target. Listed dependenices will be resolved and copied to build/lib/{jar,sources} by executing the maven-ant-tasks-retrieve-build target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution.
  • -
  • test-deps-pom - refered by maven-ant-tasks-retrieve-test to retrieve and save dependencies to build/test/lib. Exclusively used during JUnit test execution.
  • -
  • all-pom - pom for cassandra-all.jar that can be installed or deployed to public maven repos via ant publish
  • -
  • dist-pom - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ant artifacts. Should be left as is, but needed for installing or deploying releases.
  • -
-
-
-

Troubleshooting and conflict resolution

-

Here are some useful commands that may help you out resolving conflicts.

-
    -
  • ant realclean - gets rid of the build directory, including build artifacts.
  • -
  • mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ant mvn-install.
  • -
  • rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/ - removes cached local Cassandra maven artifacts
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/documentation.html b/src/doc/4.0-alpha2/development/documentation.html deleted file mode 100644 index d513346e1..000000000 --- a/src/doc/4.0-alpha2/development/documentation.html +++ /dev/null @@ -1,192 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Working on Documentation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Working on Documentation

-
-

How Cassandra is documented

-

The official Cassandra documentation lives in the project’s git repository. We use a static site generator, Sphinx, to create pages hosted at cassandra.apache.org. You’ll also find developer centric content about Cassandra internals in our retired wiki (not covered by this guide).

-

Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses reStructuredText for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at existing documents to get a better idea how we use reStructuredText to write our documents.

-

So how do you actually start making contributions?

-
-
-

GitHub based work flow

-

Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)

-

Follow these steps to contribute using GitHub. It’s assumed that you’re logged in with an existing account.

-
    -
  1. Fork the GitHub mirror of the Cassandra repository
  2. -
-../_images/docs_fork.png -
    -
  1. Create a new branch that you can use to make your edits. It’s recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work.
  2. -
-../_images/docs_create_branch.png -
    -
  1. Navigate to document sources doc/source to find the .rst file to edit. The URL of the document should correspond to the directory structure. New files can be created using the “Create new file” button:
  2. -
-../_images/docs_create_file.png -
    -
  1. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing .rst files to get a better idea what format elements to use.
  2. -
-../_images/docs_editor.png -

Make sure to preview added content before committing any changes.

-../_images/docs_preview.png -
    -
  1. Commit your work when you’re done. Make sure to add a short description of all your edits since the last time you committed before.
  2. -
-../_images/docs_commit.png -
    -
  1. Finally if you decide that you’re done working on your branch, it’s time to create a pull request!
  2. -
-../_images/docs_pr.png -

Afterwards the GitHub Cassandra mirror will list your pull request and you’re done. Congratulations! Please give us some time to look at your suggested changes before we get back to you.

-
-
-

Jira based work flow

-

Recommended for major changes

-

Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same contribution guides as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed.

-
-
-

Working on documents locally using Sphinx

-

Recommended for advanced editing

-

Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at doc/README.md. Setup is very easy (at least on OSX and Linux).

-
-
-

Notes for committers

-

Please feel free to get involved and merge pull requests created on the GitHub mirror if you’re a committer. As this is a read-only repository, you won’t be able to merge a PR directly on GitHub. You’ll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub.

-

You may use a git work flow like this:

-
git remote add github https://github.com/apache/cassandra.git
-git fetch github pull/<PR-ID>/head:<PR-ID>
-git checkout <PR-ID>
-
-
-

Now either rebase or squash the commit, e.g. for squashing:

-
git reset --soft origin/trunk
-git commit --author <PR Author>
-
-
-

Make sure to add a proper commit message including a “Closes #<PR-ID>” text to automatically close the PR.

-
-

Publishing

-

Details for building and publishing of the site at cassandra.apache.org can be found here.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/gettingstarted.html b/src/doc/4.0-alpha2/development/gettingstarted.html deleted file mode 100644 index 776e4c7fb..000000000 --- a/src/doc/4.0-alpha2/development/gettingstarted.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Getting Started" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Getting Started

-
-

Initial Contributions

-
-
Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we’d suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work.
-
    -
  • Add to or update the documentation
  • -
  • Answer questions on the user list
  • -
  • Review and test a submitted patch
  • -
  • Investigate and fix a reported bug
  • -
  • Create unit tests and d-tests
  • -
-
-
-
-
-

Updating documentation

-

The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (Contributing Code Changes).

-
-
-

Answering questions on the user list

-

Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the community page for details on how to subscribe to the mailing list.

-
-
-

Reviewing and testing a submitted patch

-

Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in _development_how_to_review or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, “I tested this performance enhacement on our application’s standard production load test and found a 3% improvement.”)

-
-
-

Investigate and/or fix a reported bug

-

Often, the hardest work in fixing a bug is reproducing it. Even if you don’t have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (Contributing Code Changes).

-
-
-

Create unit tests and Dtests

-

Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See Testing and Contributing Code Changes for more detail.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/how_to_commit.html b/src/doc/4.0-alpha2/development/how_to_commit.html deleted file mode 100644 index 24856881d..000000000 --- a/src/doc/4.0-alpha2/development/how_to_commit.html +++ /dev/null @@ -1,186 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

-atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/how_to_review.html b/src/doc/4.0-alpha2/development/how_to_review.html deleted file mode 100644 index 80bc6dcf2..000000000 --- a/src/doc/4.0-alpha2/development/how_to_review.html +++ /dev/null @@ -1,178 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/ide.html b/src/doc/4.0-alpha2/development/ide.html deleted file mode 100644 index 4b3c236f2..000000000 --- a/src/doc/4.0-alpha2/development/ide.html +++ /dev/null @@ -1,267 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-

-
-
-
-
-

Opening Cassandra in Apache NetBeans

-

Apache NetBeans is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans.

-
-

Open Cassandra as a Project (C* 4.0 and newer)

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Start Apache NetBeans
  2. -
  3. Open the NetBeans project from the ide/ folder of the checked out Cassandra directory using the menu item “Open Project…” in NetBeans’ File menu
  4. -
-

The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant build.xml script.

-
-
    -
  • Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu.
  • -
  • Profile Project is available via the Profile menu. In the opened Profiler tab, click the green “Profile” button.
  • -
  • Cassandra’s code style is honored in ide/nbproject/project.properties
  • -
-
-

The JAVA8_HOME system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute.

-
-

-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/index.html b/src/doc/4.0-alpha2/development/index.html deleted file mode 100644 index dcef417ea..000000000 --- a/src/doc/4.0-alpha2/development/index.html +++ /dev/null @@ -1,184 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contributing to Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- - -
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/patches.html b/src/doc/4.0-alpha2/development/patches.html deleted file mode 100644 index 88534d8e6..000000000 --- a/src/doc/4.0-alpha2/development/patches.html +++ /dev/null @@ -1,273 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue marked as Low Hanging Fruit Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or Slack.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - - - - -
VersionPolicy
4.0Code freeze (see below)
3.11Critical bug fixes only
3.0Critical bug fixes only
2.2Critical bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

4.0 Code Freeze

-

Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance:

-
-
    -
  • Bug fixes
  • -
  • Measurable performance improvements
  • -
  • Changes not distributed as part of the release such as:
  • -
  • Testing related improvements and fixes
  • -
  • Build and infrastructure related changes
  • -
  • Documentation
  • -
-
-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below.

    -
    <One sentence description, usually Jira title and CHANGES.txt summary>
    -<Optional lengthier description>
    -patch by <Authors>; reviewed by <Reviewers> for CASSANDRA-#####
    -
    -
    -
  2. -
  3. When you’re happy with the result, create a patch:

    -
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/release_process.html b/src/doc/4.0-alpha2/development/release_process.html deleted file mode 100644 index 2945866ea..000000000 --- a/src/doc/4.0-alpha2/development/release_process.html +++ /dev/null @@ -1,389 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Release Process" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Release Process

- -
-

-

-
-
-

Attention

-
-
WORK IN PROGRESS
-
    -
  • A number of these steps still have been finalised/tested.
  • -
  • The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org
  • -
-
-
-
-

The steps for Release Managers to create, vote and publish releases for Apache Cassandra.

-

While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release.

-
-

Prerequisites

-
-
Background docs
-
-
-
-

A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools.

-
-

Create and publish your GPG key

-

To create a GPG key, follow the guidelines. -Include your public key in:

-
https://dist.apache.org/repos/dist/release/cassandra/KEYS
-
-
-

Publish your GPG key in a PGP key server, such as MIT Keyserver.

-
-
-
-

Create Release Artifacts

-

Any committer can perform the following steps to create and call a vote on a proposed release.

-

Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there’s security vulnerabilities currently being worked on in private.

-
-

Perform the Release

-

Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh`
-
-# After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout,
-# on the branch/commit that we wish to tag for the tentative release along with version number to tag.
-# For example here <version-branch> might be `3.11` and <version> `3.11.3`
-cd ~/git/cassandra/
-git checkout cassandra-<version-branch>
-../cassandra-builds/cassandra-release/prepare_release.sh -v <version>
-
-
-

If successful, take note of the email text output which can be used in the next section “Call for a Vote”.

-

The prepare_release.sh script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:

-
cd ~/git/cassandra-build
-docker build -f docker/centos7-image.docker docker/
-docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh <version>-tentative
-rpmsign --addsign dist/*.rpm
-
-
-

For more information on the above steps see the cassandra-builds documentation. -The next step is to copy and commit these binaries to staging svnpubsub:

-
# FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org
-cd ~/git
-svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev
-mkdir cassandra-dist-dev/<version>
-cp cassandra-build/dist/*.rpm cassandra-dist-dev/<version>/
-
-svn add cassandra-dist-dev/<version>
-svn ci cassandra-dist-dev/<version>
-
-
-

After committing the binaries to staging, increment the version number in Cassandra on the cassandra-<version-branch>

-
-
cd ~/git/cassandra/ -git checkout cassandra-<version-branch> -edit build.xml # update `<property name=”base.version” value=”…”/> ` -edit debian/changelog # add entry for new version -edit CHANGES.txt # add entry for new version -git commit -m “Update version to <next-version>” build.xml debian/changelog CHANGES.txt -git push
-
-
-
-

Call for a Vote

-

Fill out the following email template and send to the dev mailing list:

-
I propose the following artifacts for release as <version>.
-
-sha1: <git-sha>
-
-Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/<version>-tentative
-
-Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/org/apache/cassandra/apache-cassandra/<version>/
-
-Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/
-
-The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/
-
-The vote will be open for 72 hours (longer if needed).
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>-tentative
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>-tentative
-
-
-
-
-

Post-vote operations

-

Any PMC can perform the following steps to formalize and publish a successfully voted release.

-
-

Publish Artifacts

-

Run the following commands to publish the voted release artifacts:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# edit the variables at the top of `finish_release.sh`
-
-# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout,
-# on the tentative release tag that we wish to tag for the final release version number tag.
-cd ~/git/cassandra/
-git checkout <version>-tentative
-../cassandra-builds/cassandra-release/finish_release.sh -v <version> <staging_number>
-
-
-

If successful, take note of the email text output which can be used in the next section “Send Release Announcement”. -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout.

-
-
-

Promote Nexus Repository

-
-
    -
  • Login to Nexus repository again.
  • -
  • Click on “Staging” and then on the repository with id “cassandra-staging”.
  • -
  • Find your closed staging repository, right click on it and choose “Promote”.
  • -
  • Select the “Releases” repository and click “Promote”.
  • -
  • Next click on “Repositories”, select the “Releases” repository and validate that your artifacts exist as you expect them.
  • -
-
-
-
-

Sign and Upload Distribution Packages to Bintray

-

Run the following command:

-
cd ~/git
-# FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org
-svn mv https://dist.apache.org/repos/dist/dev/cassandra/<version> https://dist.apache.org/repos/dist/release/cassandra/
-
-# Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on
-svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat
-cd cassandra-dist-redhat/<abbreviated-version>x/
-createrepo .
-gpg --detach-sign --armor repodata/repomd.xml
-for f in `find repodata/ -name *.bz2`; do
-  gpg --detach-sign --armor $f;
-done
-
-svn co https://dist.apache.org/repos/dist/release/cassandra/<version> cassandra-dist-<version>
-cd cassandra-dist-<version>
-cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist-<version>
-
-
-
-
-

Update and Publish Website

-

See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate.

-
-
-

Release version in JIRA

-

Release the JIRA version.

-
-
    -
  • In JIRA go to the version that you want to release and release it.
  • -
  • Create a new version, if it has not been done before.
  • -
-
-
-
-

Update to Next Development Version

-

Edit and commit build.xml so the base.version property points to the next version.

-
- -
-

Send Release Announcement

-

Fill out the following email template and send to both user and dev mailing lists:

-
The Cassandra team is pleased to announce the release of Apache Cassandra version <version>.
-
-Apache Cassandra is a fully distributed database. It is the right choice
-when you need scalability and high availability without compromising
-performance.
-
- http://cassandra.apache.org/
-
-Downloads of source and binary distributions are listed in our download
-section:
-
- http://cassandra.apache.org/download/
-
-This version is <the first|a bug fix> release[1] on the <version-base> series. As always,
-please pay attention to the release notes[2] and let us know[3] if you
-were to encounter any problem.
-
-Enjoy!
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>
-[3]: https://issues.apache.org/jira/browse/CASSANDRA
-
-
-
-
-

Update Slack Cassandra topic

-
-
Update topic in cassandra Slack room
-
/topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don’t ask to ask
-
-
-
-

Tweet from @Cassandra

-

Tweet the new release, from the @Cassandra account

-
-
-

Delete Old Releases

-

As described in When to Archive. -Also check people.apache.org as previous release scripts used it.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/development/testing.html b/src/doc/4.0-alpha2/development/testing.html deleted file mode 100644 index 9e6c28467..000000000 --- a/src/doc/4.0-alpha2/development/testing.html +++ /dev/null @@ -1,184 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-

If you see an error like this:

-
Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found:
-org/apache/tools/ant/taskdefs/optional/junit/JUnitTask  using the classloader
-AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar]
-
-
-

You will need to install the ant-optional package since it contains the JUnitTask class.

-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

See Cassandra Stress

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/faq/index.html b/src/doc/4.0-alpha2/faq/index.html deleted file mode 100644 index 10cde9acf..000000000 --- a/src/doc/4.0-alpha2/faq/index.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
- -
-
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair -full to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it. Note that you will need to run a full repair (-full) to make sure that already repaired -sstables are not skipped.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/genindex.html b/src/doc/4.0-alpha2/genindex.html deleted file mode 100644 index 893e3c509..000000000 --- a/src/doc/4.0-alpha2/genindex.html +++ /dev/null @@ -1,94 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/getting_started/configuring.html b/src/doc/4.0-alpha2/getting_started/configuring.html deleted file mode 100644 index 03fb3dd15..000000000 --- a/src/doc/4.0-alpha2/getting_started/configuring.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the default configuration file present at ./conf/cassandra.yaml is enough, -you shouldn’t need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/getting_started/drivers.html b/src/doc/4.0-alpha2/getting_started/drivers.html deleted file mode 100644 index 2208d85fd..000000000 --- a/src/doc/4.0-alpha2/getting_started/drivers.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
- -
-

Elixir

- -
-
-

Dart

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/getting_started/index.html b/src/doc/4.0-alpha2/getting_started/index.html deleted file mode 100644 index f88dd1db5..000000000 --- a/src/doc/4.0-alpha2/getting_started/index.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/getting_started/installing.html b/src/doc/4.0-alpha2/getting_started/installing.html deleted file mode 100644 index 641d580a0..000000000 --- a/src/doc/4.0-alpha2/getting_started/installing.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xzvf apache-cassandra-3.6-bin.tar.gz
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/getting_started/querying.html b/src/doc/4.0-alpha2/getting_started/querying.html deleted file mode 100644 index 6a8c70be3..000000000 --- a/src/doc/4.0-alpha2/getting_started/querying.html +++ /dev/null @@ -1,145 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/index.html b/src/doc/4.0-alpha2/index.html deleted file mode 100644 index c93bb9811..000000000 --- a/src/doc/4.0-alpha2/index.html +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v4.0-alpha2

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- -
-
-
- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/4.0-alpha2/objects.inv b/src/doc/4.0-alpha2/objects.inv deleted file mode 100644 index c10e4da94..000000000 Binary files a/src/doc/4.0-alpha2/objects.inv and /dev/null differ diff --git a/src/doc/4.0-alpha2/operating/audit_logging.html b/src/doc/4.0-alpha2/operating/audit_logging.html deleted file mode 100644 index 0f8fd27c0..000000000 --- a/src/doc/4.0-alpha2/operating/audit_logging.html +++ /dev/null @@ -1,280 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Audit Logging" -doc-header-links: ' - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml.

-
    -
  • BinAuditLogger An efficient way to log events to file in a binary format.
  • -
  • FileAuditLogger Logs events to audit/audit.log file using slf4j logger.
  • -
-

Recommendation BinAuditLogger is a community recommended logger considering the performance

-
-

What does it capture

-

Audit logging captures following events

-
    -
  • Successful as well as unsuccessful login attempts.
  • -
  • All database commands executed via Native protocol (CQL) attempted or successfully executed.
  • -
-
-
-

Limitations

-

Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log.

-
-
-

What does it log

-

Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with | s to yield the final message.

-
-
    -
  • user: User name(if available)
  • -
  • host: Host IP, where the command is being executed
  • -
  • source ip address: Source IP address from where the request initiated
  • -
  • source port: Source port number from where the request initiated
  • -
  • timestamp: unix time stamp
  • -
  • type: Type of the request (SELECT, INSERT, etc.,)
  • -
  • category - Category of the request (DDL, DML, etc.,)
  • -
  • keyspace - Keyspace(If applicable) on which request is targeted to be executed
  • -
  • scope - Table/Aggregate name/ function name/ trigger name etc., as applicable
  • -
  • operation - CQL command being executed
  • -
-
-
-
-

How to configure

-

Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using nodetool.

-
-

cassandra.yaml configurations for AuditLog

-
-
    -
  • enabled: This option enables/ disables audit log
  • -
  • logger: Class name of the logger/ custom logger.
  • -
  • audit_logs_dir: Auditlogs directory location, if not set, default to cassandra.logdir.audit or cassandra.logdir + /audit/
  • -
  • included_keyspaces: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces
  • -
  • excluded_keyspaces: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except system, system_schema and system_virtual_schema
  • -
  • included_categories: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories
  • -
  • excluded_categories: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category
  • -
  • included_users: Comma separated list of users to be included in audit log, default - includes all users
  • -
  • excluded_users: Comma separated list of users to be excluded from audit log, default - excludes no user
  • -
-
-

List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE

-
-
-

NodeTool command to enable AuditLog

-

enableauditlog: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command.

-
nodetool enableauditlog
-
-
-
-

Options

-
-
--excluded-categories
-
Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
-
--excluded-keyspaces
-
Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used. -Please remeber that system, system_schema and system_virtual_schema are excluded by default, -if you are overwriting this option via nodetool, -remember to add these keyspaces back if you dont want them in audit logs
-
--excluded-users
-
Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
-
--included-categories
-
Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
-
--included-keyspaces
-
Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
-
--included-users
-
Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
-
--logger
-
Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
-
-
-
-
-

NodeTool command to disable AuditLog

-

disableauditlog: Disables AuditLog.

-
nodetool disableuditlog
-
-
-
-
-

NodeTool command to reload AuditLog filters

-

enableauditlog: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous loggername and updated filters

-

E.g.,

-
nodetool enableauditlog --loggername <Default/ existing loggerName> --included-keyspaces <New Filter values>
-
-
-
-
-
-

View the contents of AuditLog Files

-

auditlogviewer is the new tool introduced to help view the contents of binlog file in human readable text format.

-
auditlogviewer <path1> [<path2>...<pathN>] [options]
-
-
-
-

Options

-
-
-f,--follow
-
-
Upon reacahing the end of the log continue indefinitely
-
waiting for more records
-
-
-
-r,--roll_cycle
-
-
How often to roll the log file was rolled. May be
-
necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, -DAILY). Default HOURLY.
-
-
-
-h,--help
-
display this help message
-
-

For example, to dump the contents of audit log files on the console

-
auditlogviewer /logs/cassandra/audit
-
-
-
-
-

Sample output

-
LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1"
-
-
-
-
-
-

Configuring BinAuditLogger

-

To use BinAuditLogger as a logger in AuditLogging, set the logger to BinAuditLogger in cassandra.yaml under audit_logging_options section. BinAuditLogger can be futher configued using its advanced options in cassandra.yaml.

-
-

Adcanced Options for BinAuditLogger

-
-
block
-
Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to true so that AuditLog records wont be lost
-
max_queue_weight
-
Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to 256 * 1024 * 1024
-
max_log_size
-
Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to 16L * 1024L * 1024L * 1024L
-
roll_cycle
-
How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to "HOURLY"
-
-
-
-
-

Configuring FileAuditLogger

-

To use FileAuditLogger as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log

-
<!-- Audit Logging (FileAuditLogger) rolling file appender to audit.log -->
-<appender name="AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
-  <file>${cassandra.logdir}/audit/audit.log</file>
-  <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
-    <!-- rollover daily -->
-    <fileNamePattern>${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
-    <!-- each file should be at most 50MB, keep 30 days worth of history, but at most 5GB -->
-    <maxFileSize>50MB</maxFileSize>
-    <maxHistory>30</maxHistory>
-    <totalSizeCap>5GB</totalSizeCap>
-  </rollingPolicy>
-  <encoder>
-    <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-  </encoder>
-</appender>
-
-<!-- Audit Logging additivity to redirect audt logging events to audit/audit.log -->
-<logger name="org.apache.cassandra.audit" additivity="false" level="INFO">
-        <appender-ref ref="AUDIT"/>
-</logger>
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/backups.html b/src/doc/4.0-alpha2/operating/backups.html deleted file mode 100644 index 93f9df39b..000000000 --- a/src/doc/4.0-alpha2/operating/backups.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/bloom_filters.html b/src/doc/4.0-alpha2/operating/bloom_filters.html deleted file mode 100644 index 4f4d8cdd2..000000000 --- a/src/doc/4.0-alpha2/operating/bloom_filters.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/bulk_loading.html b/src/doc/4.0-alpha2/operating/bulk_loading.html deleted file mode 100644 index 2079b8008..000000000 --- a/src/doc/4.0-alpha2/operating/bulk_loading.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/cdc.html b/src/doc/4.0-alpha2/operating/cdc.html deleted file mode 100644 index 55e2a3bda..000000000 --- a/src/doc/4.0-alpha2/operating/cdc.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property cdc=true (either when creating the table or -altering it). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in cassandra.yaml. On segment fsync to disk, if CDC data is present anywhere in the segment a -<segment_name>_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word “COMPLETED” will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file.

-

We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable.

-

A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory.

-
-
-

Configuration

-
-

Enabling or disabling CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

Use a CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

If CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/compaction.html b/src/doc/4.0-alpha2/operating/compaction.html deleted file mode 100644 index cb84050d1..000000000 --- a/src/doc/4.0-alpha2/operating/compaction.html +++ /dev/null @@ -1,520 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy). With TimeWindowCompactionStrategy -it is possible to remove the guarantee (not check for shadowing data) by enabling unsafe_aggressive_sstable_expiration.

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
unsafe_aggressive_sstable_expiration (default: false)
-
Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially -risky option that can lead to data loss or deleted data re-appearing, going beyond what -unchecked_tombstone_compaction does for single sstable compaction. Due to the risk the jvm must also be -started with -Dcassandra.unsafe_aggressive_sstable_expiration=true.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled).

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/compression.html b/src/doc/4.0-alpha2/operating/compression.html deleted file mode 100644 index 922c4d4d1..000000000 --- a/src/doc/4.0-alpha2/operating/compression.html +++ /dev/null @@ -1,194 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides four classes (LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
  • -
    compression_level is only applicable for ZstdCompressor and accepts values between -131072 and 22.
    -
    The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called -“ultra levels” and should be used with caution, as they require more memory. The default is 3.
    -
    -
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/hardware.html b/src/doc/4.0-alpha2/operating/hardware.html deleted file mode 100644 index 032ef463e..000000000 --- a/src/doc/4.0-alpha2/operating/hardware.html +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/hints.html b/src/doc/4.0-alpha2/operating/hints.html deleted file mode 100644 index c5413dc59..000000000 --- a/src/doc/4.0-alpha2/operating/hints.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/index.html b/src/doc/4.0-alpha2/operating/index.html deleted file mode 100644 index afccbbda2..000000000 --- a/src/doc/4.0-alpha2/operating/index.html +++ /dev/null @@ -1,224 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/metrics.html b/src/doc/4.0-alpha2/operating/metrics.html deleted file mode 100644 index fbcede49e..000000000 --- a/src/doc/4.0-alpha2/operating/metrics.html +++ /dev/null @@ -1,1800 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorWriteLatencyTimerCoordinator write latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
BytesRepairedGauge<Long>Size of table data repaired on disk
BytesUnrepairedGauge<Long>Size of table data unrepaired on disk
BytesPendingRepairGauge<Long>Size of table data isolated for an ongoing incremental repair
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
SpeculativeFailedRetriesCounterNumber of speculative retries that failed to prevent a timeout
SpeculativeInsufficientReplicasCounterNumber of speculative retries that couldn’t be attempted due to lack of replicas
SpeculativeSampleLatencyNanosGauge<Long>Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
AnticompactionTimeTimerTime spent anticompacting before a consistent repair.
ValidationTimeTimerTime spent doing validation compaction during repair.
SyncTimeTimerTime spent doing streaming during repair.
BytesValidatedHistogramHistogram over the amount of bytes read during validation.
PartitionsValidatedHistogramHistogram over the number of partitions read during validation.
BytesAnticompactedCounterHow many bytes we anticompacted.
BytesMutatedAnticompactionCounterHow many bytes we avoided anticompacting because the sstable was fully contained in the repaired range.
MutatedAnticompactionGaugeGauge<Double>Ratio of bytes mutated vs total bytes repaired.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

Most of these metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
WriteFailedIdeaCLCounterNumber of writes that failed to achieve the configured ideal consistency level or 0 if none is configured
IdealCLWriteLatencyLatencyCoordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured
RepairTimeTimerTotal time spent as repair coordinator.
RepairPrepareTimeTimerTotal time spent preparing for repair.
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools path=<Path> scope=<ThreadPoolName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
MaxTasksQueuedGauge<Integer>The maximum number of tasks queued before a task get blocked.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
ViewBuildExecutorinternalPerforms materialized views initial build
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessage.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMessage scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

HintsService Metrics

-

Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintsService.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintsService name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
HintsSucceededMeterA meter of the hints successfully delivered
HintsFailedMeterA meter of the hints that failed deliver
HintsTimedOutMeterA meter of the hints that timed out
Hint_delaysHistogramHistogram of hint delivery delays (in milliseconds)
Hint_delays-<PeerIP>HistogramHistogram of hint delivery delays (in milliseconds) per peer
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsGauge<Integer>Number of clients connected to this nodes native protocol server
connectionsGauge<List<Map<String, String>>List of all connections and their state information
connectedNativeClientsByUserGauge<Map<String, Int>Number of connnective native clients by username
-
-
-

Batch Metrics

-

Metrics specifc to batch statements.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Batch.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Batch name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PartitionsPerCounterBatchHistogramDistribution of the number of partitions processed per counter batch
PartitionsPerLoggedBatchHistogramDistribution of the number of partitions processed per logged batch
PartitionsPerUnloggedBatchHistogramDistribution of the number of partitions processed per unlogged batch
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/read_repair.html b/src/doc/4.0-alpha2/operating/read_repair.html deleted file mode 100644 index 706c5ed3f..000000000 --- a/src/doc/4.0-alpha2/operating/read_repair.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/repair.html b/src/doc/4.0-alpha2/operating/repair.html deleted file mode 100644 index cb3cf272e..000000000 --- a/src/doc/4.0-alpha2/operating/repair.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Repair

-

Cassandra is designed to remain available if one of it’s nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren’t guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire.

-

These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes.

-
-

Incremental and Full Repairs

-

There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that’s been written since the previous incremental repair.

-

Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it’s important to understand that once an incremental repair marks data as repaired, it won’t -try to repair it again. This is fine for syncing up missed writes, but it doesn’t protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally.

-
-
-

Usage and Best Practices

-

Since repair can result in a lot of disk and network io, it’s not run automatically by Cassandra. It is run by the operator -via nodetool.

-

Incremental repair is the default and is run with the following command:

-
nodetool repair
-
-
-

A full repair can be run with the following command:

-
nodetool repair --full
-
-
-

Additionally, repair can be run on a single keyspace:

-
nodetool repair [options] <keyspace_name>
-
-
-

Or even on specific tables:

-
nodetool repair [options] <keyspace_name> <table1> <table2>
-
-
-

The repair command only repairs token ranges on the node being repaired, it doesn’t repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you’re running repair on, which will cause duplicate work if you run it -on every node. The -pr flag will only repair the “primary” ranges on a node, so you can repair your entire cluster by running -nodetool repair -pr on each node in a single datacenter.

-

The specific frequency of repair that’s right for your cluster, of course, depends on several factors. However, if you’re -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don’t want to run incremental repairs, a full repair every 5 days is a good place -to start.

-

At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays.

-
-
-

Other Options

-
-
-pr, --partitioner-range
-
Restricts repair to the ‘primary’ token ranges of the node being repaired. A primary range is just a token range for -which a node is the first replica in the ring.
-
-prv, --preview
-
Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints -the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, -add the --full flag to estimate a full repair.
-
-vd, --validate
-
Verifies that the repaired data is the same across all nodes. Similiar to --preview, this builds and compares merkle -trees of repaired data, but doesn’t do any streaming. This is useful for troubleshooting. If this shows that the repaired -data is out of sync, a full repair should be run.
-
-
-

See also

-

nodetool repair docs

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/security.html b/src/doc/4.0-alpha2/operating/security.html deleted file mode 100644 index d3071652a..000000000 --- a/src/doc/4.0-alpha2/operating/security.html +++ /dev/null @@ -1,473 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-

By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still:

-
    -
  • Craft internode messages to insert users into authentication schema
  • -
  • Craft internode messages to truncate or drop schema
  • -
  • Use tools such as sstableloader to overwrite system_auth tables
  • -
  • Attach to the cluster directly to capture write traffic
  • -
-

Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra’s -security features is crucial to configuring your cluster to meet your security needs.

-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-
-

SSL Certificate Hot Reloading

-

Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes.

-

Certificate Hot reloading may also be triggered using the nodetool reloadssl command. Use this if you want to Cassandra to -immediately notice the changed certificates.

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/snitch.html b/src/doc/4.0-alpha2/operating/snitch.html deleted file mode 100644 index 375cfffe2..000000000 --- a/src/doc/4.0-alpha2/operating/snitch.html +++ /dev/null @@ -1,177 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero, this will allow ‘pinning’ of replicas to hosts -in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/operating/topo_changes.html b/src/doc/4.0-alpha2/operating/topo_changes.html deleted file mode 100644 index ba2476aa7..000000000 --- a/src/doc/4.0-alpha2/operating/topo_changes.html +++ /dev/null @@ -1,221 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in nodetool netstats.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344)

-

Once the bootstrapping is complete the node will be marked “UP”.

-
-

Note

-

If any of the following cases apply, you MUST run repair to make the replaced node consistent again, since -it missed ongoing writes during/prior to bootstrapping. The replacement timeframe refers to the period from when the -node initially dies to when a new node completes the replacement process.

-
    -
  1. The node is down for longer than max_hint_window_in_ms before being replaced.
  2. -
  3. You are replacing using the same IP address as the dead node and replacement takes longer than max_hint_window_in_ms.
  4. -
-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/plugins/index.html b/src/doc/4.0-alpha2/plugins/index.html deleted file mode 100644 index d550b81a1..000000000 --- a/src/doc/4.0-alpha2/plugins/index.html +++ /dev/null @@ -1,116 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Third-Party Plugins" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Third-Party Plugins

-

Available third-party plugins for Apache Cassandra

-
-

CAPI-Rowcache

-

The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments.

-

The official page for the CAPI-Rowcache plugin contains further details how to build/run/download the plugin.

-
-
-

Stratio’s Cassandra Lucene Index

-

Stratio’s Lucene index is a Cassandra secondary index implementation based on Apache Lucene. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or Apache Solr, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed.

-

The official Github repository Cassandra Lucene Index contains everything you need to build/run/configure the plugin.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/search.html b/src/doc/4.0-alpha2/search.html deleted file mode 100644 index f2ade0d0a..000000000 --- a/src/doc/4.0-alpha2/search.html +++ /dev/null @@ -1,104 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/searchindex.js b/src/doc/4.0-alpha2/searchindex.js deleted file mode 100644 index 8a6b348f9..000000000 --- a/src/doc/4.0-alpha2/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/operators","cql/security","cql/triggers","cql/types","data_modeling/index","development/ci","development/code_style","development/dependencies","development/documentation","development/gettingstarted","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/release_process","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","operating/audit_logging","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","plugins/index","tools/cassandra_stress","tools/cqlsh","tools/index","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","tools/sstable/index","tools/sstable/sstabledump","tools/sstable/sstableexpiredblockers","tools/sstable/sstablelevelreset","tools/sstable/sstableloader","tools/sstable/sstablemetadata","tools/sstable/sstableofflinerelevel","tools/sstable/sstablerepairedset","tools/sstable/sstablescrub","tools/sstable/sstablesplit","tools/sstable/sstableupgrade","tools/sstable/sstableutil","tools/sstable/sstableverify","troubleshooting/finding_nodes","troubleshooting/index","troubleshooting/reading_logs","troubleshooting/use_nodetool","troubleshooting/use_tools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/operators.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/index.rst","development/ci.rst","development/code_style.rst","development/dependencies.rst","development/documentation.rst","development/gettingstarted.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/release_process.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","operating/audit_logging.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","plugins/index.rst","tools/cassandra_stress.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","tools/sstable/index.rst","tools/sstable/sstabledump.rst","tools/sstable/sstableexpiredblockers.rst","tools/sstable/sstablelevelreset.rst","tools/sstable/sstableloader.rst","tools/sstable/sstablemetadata.rst","tools/sstable/sstableofflinerelevel.rst","tools/sstable/sstablerepairedset.rst","tools/sstable/sstablescrub.rst","tools/sstable/sstablesplit.rst","tools/sstable/sstableupgrade.rst","tools/sstable/sstableutil.rst","tools/sstable/sstableverify.rst","troubleshooting/finding_nodes.rst","troubleshooting/index.rst","troubleshooting/reading_logs.rst","troubleshooting/use_nodetool.rst","troubleshooting/use_tools.rst"],objects:{},objnames:{},objtypes:{},terms:{"000kib":186,"00t89":22,"011mib":186,"018kib":186,"01t02":197,"021kib":186,"028809z":183,"031mib":186,"03t04":22,"054mib":186,"055z":183,"056kib":186,"061kib":186,"062mib":186,"063kib":186,"064kib":186,"0665ae80b2d711e886c66d2c86545d91":184,"06t22":197,"077mib":186,"078kib":186,"081kib":186,"082kib":186,"090kib":186,"092mib":186,"096gib":194,"0974e5a0aa5811e8a0a06d2c86545d91":186,"099kib":186,"0ee8b91fdd0":198,"0h00m04":198,"0x0000000000000000":199,"0x0000000000000003":14,"0x00000004":13,"0x00007f829c001000":199,"0x00007f82d0856000":199,"0x00007f82e800e000":199,"0x00007f82e80cc000":199,"0x00007f82e80d7000":199,"0x00007f82e84d0800":199,"0x2a19":199,"0x2a29":199,"0x2a2a":199,"0x2a2c":199,"0x3a74":199,"100b":60,"100k":60,"100mb":6,"1024l":43,"105kib":186,"10mb":6,"10s":[61,199],"10x":[6,48],"115mib":186,"11e6":61,"11e8":198,"122kib":186,"128kb":199,"128mib":6,"128th":4,"12gb":50,"12h30m":22,"130mib":186,"142mib":190,"147mib":186,"14t00":197,"150kib":186,"155kib":186,"15m":53,"160mb":48,"162kib":186,"165kib":186,"167kb":199,"16l":43,"16mb":[36,48],"16th":6,"173kib":186,"176kib":186,"17t06":197,"184kb":199,"19t03":[147,190],"1mo":22,"1st":22,"200m":[197,199],"203mib":186,"2062b290":198,"20m":199,"20t20":183,"217kb":199,"217mib":186,"22z":183,"232mib":186,"23t06":197,"23z":183,"244m":199,"245mib":186,"247mib":186,"24h":22,"25005289beb2":183,"250m":6,"251m":199,"253mib":186,"256mb":6,"256th":6,"258mib":186,"25mb":199,"265kib":186,"270mib":186,"27t04":197,"280mib":186,"28t17":197,"295kib":186,"299kib":186,"29d":22,"29t00":197,"2cc0":198,"2e10":10,"2gb":50,"2nd":[6,11,57],"2xlarg":50,"300mib":186,"300s":6,"307kib":186,"30kb":199,"30s":6,"30t23":197,"314kib":186,"322kib":186,"325kib":186,"327e":61,"32gb":50,"32mb":[6,36],"331mib":186,"333kib":186,"33m":197,"348mib":186,"353mib":194,"3578d7de":183,"35ea8c9f":198,"361kib":186,"366b":199,"36x":40,"370mib":186,"378711z":183,"383b":199,"384z":183,"385b":199,"386kib":186,"387mib":186,"388mib":186,"392kib":186,"392mib":186,"394kib":186,"3f22a07b2bc6":183,"3ff3e5109f22":13,"3gb":[49,199],"3ms":199,"3rd":[6,53,57],"401mib":186,"406mib":186,"40a7":198,"40f3":13,"40fa":198,"40s":199,"410kib":186,"412kib":186,"416mib":194,"41b52700b4ed11e896476d2c86545d91":187,"423b":199,"423kib":186,"4248dc9d790e":183,"431kib":186,"43kb":199,"440kib":186,"443kib":186,"449mib":186,"452kib":186,"457mib":186,"458mib":186,"461mib":186,"465kib":186,"46e9":198,"476mib":186,"481mib":186,"482mib":190,"48d6":183,"4ae3":13,"4d40":183,"4f3438394e39374d3730":187,"4f58":198,"4kb":11,"4mib":6,"4xlarg":50,"500m":199,"501mib":186,"50kb":[6,199],"50m":199,"50mb":[6,43,48,191],"50th":195,"512mb":6,"512mib":6,"513kib":186,"521kib":186,"524kib":186,"536kib":186,"543mib":186,"545kib":186,"54kb":199,"550mib":186,"5573e5b09f14":13,"559kib":186,"561mib":186,"563kib":186,"563mib":186,"56m":197,"571kib":186,"576kb":199,"5850e9f0a63711e8a5c5091830ac5256":192,"591mib":186,"592kib":186,"5gb":43,"5kb":6,"5level":43,"5mb":48,"603kib":186,"606mib":186,"61111111111111e":187,"613mib":186,"619kib":186,"61de":198,"635kib":186,"6365332094dd11e88f324f9c503e4753":[185,188,190,191,193,194],"638mib":186,"640kib":186,"646mib":186,"64k":6,"64kb":49,"650b":199,"65c429e08c5a11e8939edf4f403979ef":[183,185],"65kb":199,"663kib":186,"665kib":186,"669kb":199,"684mib":186,"688kib":186,"690mib":186,"6e630115fd75":198,"6gb":198,"6ms":6,"6tb":50,"701mib":186,"715b":199,"718mib":186,"71b0a49":197,"725mib":186,"730kib":186,"732mib":186,"734mib":186,"736kb":199,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":29,"737mib":186,"738mib":186,"743kib":186,"744mib":186,"751mib":186,"752e278f":198,"75th":53,"771mib":186,"775mib":194,"780mib":186,"782kib":186,"783522z":183,"789z":183,"791mib":186,"793kib":186,"798mib":186,"79kb":199,"7f3a":198,"802kib":186,"812mib":186,"813kib":186,"814kib":186,"832mib":186,"835kib":186,"840kib":186,"843mib":186,"845b":199,"846kib":186,"848kib":186,"84fc":183,"861mib":186,"86400s":48,"869kb":199,"872kib":186,"877mib":186,"880mib":186,"882kib":186,"889mib":186,"892kib":186,"894mib":186,"89h4m48":22,"8gb":[50,199],"8th":[6,47],"903mib":186,"90th":53,"911kib":186,"920kib":186,"920mib":186,"9328455af73f":198,"938kib":186,"954kib":186,"957mib":186,"95ac6470":61,"95th":53,"965kib":186,"9695b790a63211e8a6fb091830ac5256":192,"974b":198,"975kib":186,"983kib":186,"98th":53,"993mib":186,"996kib":186,"99percentil":11,"99th":[53,195],"9dc1a293":198,"9e6054da04a7":198,"9gb":199,"9th":53,"\u00eatre":9,"abstract":[25,30],"boolean":[9,12,14,17,20,22,61],"break":[11,33,48,192,196,199],"byte":[4,6,9,13,22,53,70,88,124,174,186,198],"case":[4,6,10,11,12,13,14,16,17,18,22,26,29,30,33,35,36,45,50,56,58,60,61,186,197,198,199],"catch":[25,188],"class":[6,11,14,22,25,31,35,43,48,49,52,56,60,125,137,158,186,197],"default":[4,6,10,11,13,14,17,18,20,22,24,31,35,36,37,40,43,45,47,48,49,53,55,56,58,60,61,65,84,88,95,124,125,127,130,140,141,147,162,164,175,183,186,187,191,195,197,198,199],"enum":9,"export":[31,53,61,199],"final":[14,20,25,27,31,34,43,47,48,50,56,62,141,182,199],"float":[9,10,11,12,14,17,19,22,45,49],"function":[6,9,10,11,12,15,16,18,20,22,30,38,42,43,56,57,59,61,182],"goto":24,"import":[11,14,22,31,32,35,37,48,50,53,55,61,125,195,198,199],"int":[4,9,10,11,13,14,17,18,19,20,22,35,47,49,53],"long":[4,6,13,22,29,30,36,48,53,60,189,190,197,199],"new":[0,4,6,10,11,14,16,17,18,19,20,21,22,24,25,27,28,30,31,33,34,35,39,42,43,45,48,50,56,58,60,116,123,125,185,186,188,190,193,195],"null":[9,10,12,13,14,17,18,22,25,61],"public":[6,14,25,26,34,35,36,40,56,57],"return":[6,9,11,13,14,16,17,18,19,20,22,30,60,140,184,185,199],"short":[4,22,27],"static":[6,9,10,11,18,27,53,57,187],"super":[4,56,59,60],"switch":[4,6,10,20,31,36,52,53,56,57],"throw":[6,14,25,35,195],"transient":[6,11],"true":[6,11,12,17,20,22,31,36,43,47,48,56,58,61,122,125,192],"try":[6,11,25,26,31,33,36,43,48,55,140,186,198,199],"var":[4,6,25,40,183,184,185,186,187,188,189,190,191,192,193,194,197,199],"void":35,"while":[4,6,10,11,12,13,22,29,33,34,45,48,49,50,55,56,61,186,195,197,198,199],AES:6,AND:[9,11,13,14,18,20,56,60,61,197],AWS:50,Added:10,Adding:[6,11,20,22,36,42,52,56],And:[11,14,20,56],Are:30,Ave:22,BUT:25,But:[13,20,22,24,25,33,36,61],CAS:[6,198],CCS:199,CFs:[140,147],CLS:61,CMS:199,DCs:6,DNS:36,Doing:[10,62,182],EBS:50,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,21,22,28,33,34,35,36,37,40,41,43,48,50,55,56,57,60,61,186,187,188,191,195,197,198,199],GCs:6,HDs:199,Has:30,IDE:[28,32,42],IDEs:[31,32],IDs:[125,165],INTO:[6,9,11,13,14,17,22],IPs:[6,57,146,165],Ids:171,JKS:6,JPS:199,KBs:6,LCS:[11,187],NFS:50,NOT:[6,9,10,11,13,14,16,18,20,21,22],NTS:6,Not:[13,20,33,48,49],ONE:[0,6,11,53,60,61],One:[33,35,36,48,199],PFS:6,Pis:50,QPS:195,Such:22,THE:6,TLS:[6,52,186],That:[0,11,12,18,22,33,36,48,61,199],The:[0,4,6,8,9,10,11,12,14,16,18,19,20,21,22,24,25,27,28,29,31,33,34,35,36,37,40,41,42,45,47,49,50,53,55,56,57,58,59,60,61,65,68,73,75,81,85,91,94,95,98,103,107,109,111,116,123,125,127,131,132,138,140,147,150,151,158,164,165,166,173,175,178,179,181,185,186,187,188,190,191,192,193,196,197,198,199],Their:22,Then:[13,35,36,40,48,56,188,192,199],There:[6,10,11,12,13,14,22,31,33,35,36,48,53,55,56,60,189,191,195,198,199],These:[4,6,11,14,31,53,55,56,60,61,193,195,196,197,198,199],USE:[9,14,15,43],USING:[9,13,16,21,22,48],Use:[6,11,13,20,36,41,42,47,52,56,60,61,62,63,68,125,130,140,171,178,182,183,188,189,190,193,196],Used:[53,199],Useful:199,Uses:[6,17,52,57],Using:[11,13,27,35,36,56,62,182,183,186,190,193,197],WILL:6,WITH:[9,11,12,16,18,20,45,47,48,49,56,60,61],Will:[6,42,88,125,158,188],With:[6,13,17,36,48,55,58,64,197,199],Yes:36,_cache_max_entri:56,_cdc:47,_development_how_to_review:28,_if_:6,_must_:6,_only_:197,_trace:[53,198],_udt:14,_update_interval_in_m:56,_use:14,_validity_in_m:56,_x86_64_:199,a278b781fe4b2bda:40,a6fd:198,abbrevi:34,abil:[14,20,36,49],abilityid:16,abl:[0,6,14,22,24,27,31,35,36,48,56,195,196],abort:24,about:[4,6,20,27,31,32,33,35,36,45,48,57,61,67,125,146,187,197,198,199],abov:[4,6,8,11,12,13,14,22,26,31,33,34,36,48,53,60,62,182,186,193,199],absenc:12,abstracttyp:22,ac79:198,acceler:59,accept:[0,6,10,11,12,13,17,33,35,45,49,58,83,125],access:[6,10,20,22,31,33,43,50,52,53,59,186,187,195,196,199],accident:185,accompani:6,accord:[4,6,36],accordingli:[6,14,36],account:[6,22,27,34,35,199],accru:[48,53],accumul:[6,48,53],accur:[6,36,45,58,146,187],accuraci:[45,127,175],acheiv:56,achiev:[6,48,53],achil:38,ack:[4,6],acoount:53,acquir:[20,53],across:[6,11,20,33,53,55,56,57,60,125,129,187,194],act:197,action:[6,13,20,31,194,199],activ:[4,6,28,33,47,53,55,61,125,127,175,195,197,198,199],activetask:53,actor:56,actual:[4,6,13,21,25,27,30,34,36,40,43,48,55,57,60,140,190,199],acycl:20,add:[0,6,9,10,11,22,24,27,28,29,30,32,33,34,37,40,42,43,48,55,56,60,188,193,197],addamsfamili:11,added:[0,4,6,10,11,14,19,27,30,47,48,191],adding:[6,13,14,30,50,61,188,194],addit:[0,6,9,11,13,19,20,22,31,33,37,43,48,50,53,56,61,197,199],addition:[11,13,24,48,55,60,197],additional_write_polici:11,address:[6,8,17,22,24,31,33,37,42,43,53,57,58,59,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,198,199],addsign:34,adher:10,adjac:48,adjust:[6,45],adler32:4,adv:40,advanc:[6,27,43,52,56,196],advantag:50,advers:[36,198],advic:[33,36],advis:[6,12,18,22,36],aefb:183,af08:13,afd:22,affect:[11,30,33,36,48,147,190,195,199],afford:6,after:[5,6,10,11,12,13,14,16,17,18,31,33,34,36,47,48,50,52,53,56,57,61,188,189,192],afterward:[24,27,31,35],afunct:14,again:[6,33,34,48,55,58,61,189,192],against:[6,11,14,18,27,33,35,36,50,55,58,60,61,140,187,199],agent:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,199],agentlib:31,aggreg:[6,9,10,13,15,18,20,43,53,61],aggress:195,ago:189,aid:12,aim:[6,197],akeyspac:14,alg:186,algorithm:[6,11,58,186,197],alia:[10,13,14,38],alias:[6,10,18],alic:20,align:25,aliv:6,all:[0,4,6,9,11,12,13,14,17,18,19,22,24,25,26,27,29,30,31,33,35,42,43,45,47,48,53,55,56,58,60,61,62,65,66,67,83,95,100,116,117,122,125,127,129,138,141,147,162,164,166,175,177,178,179,182,184,188,190,194,195,197,198,199],allmemtableslivedatas:53,allmemtablesoffheaps:53,allmemtablesonheaps:53,alloc:[6,36,47,50,53],allocate_tokens_for_keyspac:58,allow:[0,4,6,9,10,11,12,14,16,17,18,22,24,27,28,37,45,47,48,49,50,55,57,60,190,198,199],allowallauthent:[6,56],allowallauthor:[6,56],allowallinternodeauthent:6,allowallnetworkauthor:6,almost:[4,6,14,22,48,195,199],alon:25,along:[6,13,28,34,43,122,125,197],alongsid:[41,61],alpha2:[11,22,42],alphabet:25,alphanumer:[11,20],alreadi:[6,11,14,16,18,22,33,36,48,56,60,62,178,182,191],also:[0,4,6,10,11,12,13,14,17,18,20,22,24,27,31,33,34,35,36,37,43,47,48,50,53,56,58,61,95,179,192,193,197,198,199],alter:[0,9,10,15,17,36,45,47,48,49,56],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:22,alter_type_stat:[12,22],alter_user_stat:12,altern:[10,11,12,13,17,22,31,33,37,50,56,186],although:[6,33,60,197,199],alwai:[0,4,6,9,10,11,13,14,18,22,25,27,33,34,35,36,48,50,60,195,199],amend:29,amongst:11,amount:[6,11,13,22,31,33,35,36,48,49,50,53,55,58,61,140,199],amplif:[48,50],anaggreg:14,analogu:13,analysi:[196,197],analyt:45,analyz:[35,199],ancestor:[4,193],ani:[0,4,6,10,11,12,13,14,17,18,20,21,22,24,26,27,29,30,31,33,34,35,37,40,42,48,50,53,55,56,58,60,61,63,116,125,130,147,162,183,187,190,192,193,196,197,198],annot:25,anonym:[12,22,43],anoth:[6,11,14,20,22,35,48,56,61,184,191,196,199],anotherarg:14,answer:[32,199],ant:[24,26,31,33,35],antclassload:35,anti:[6,22],anticip:[0,11],anticompact:[48,53,191],anticompactiontim:53,antientropystag:[53,198],antipattern:50,anymor:[29,48],anyon:25,anyth:48,anywai:6,anywher:[13,47],apach:[2,5,6,7,14,21,25,26,27,29,30,32,33,34,35,36,39,40,43,48,49,53,56,59,62,183,184,185,186,187,188,189,190,191,193,194,197],apart:43,api:[6,8,17,41,57],appear:[6,11,12,14,48,61],append:[4,22,29,43,50,53,61,197],appendic:[15,42],appendix:[12,15],appl:22,appli:[4,6,9,10,11,12,13,20,22,29,33,35,36,53,58,60,61],applic:[0,6,11,20,25,28,30,31,43,49,56,60,197],appreci:33,approach:[4,48,58],appropri:[6,11,20,22,30,33,34,56,57,58,197],approv:24,approxim:[48,53,187],apt:[40,199],arbitrari:[11,12,22,60],architectur:[36,42,59],archiv:[4,6,34,47,88],archive_command:88,archive_retri:88,area:[28,199],aren:[13,55],arg:[14,125,163,183,187,193],argnam:14,argnum:14,argument:[6,11,13,14,16,17,36,37,49,60,61,63,64,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181],arguments_declar:14,arguments_signatur:14,arithmet:[10,12,15,42],arithmetic_oper:12,armor:34,around:[6,20,48,50,57,199],arrai:[6,36],arriv:[6,33,36],artifact:[26,31,32],artifici:11,asap:10,asc:[9,11,13],ascend:[11,13],ascii:[9,14,17,22],asdf:183,asf:[8,31,34],ask:[5,33,34,35,42,56],aspect:11,assassin:125,assertionerror:25,assertrow:35,assess:[198,199],assign:[6,13,20,36],associ:[6,11,59,193,195],assum:[6,11,14,27,31,56,57,195,199],assumpt:56,astyanax:38,async:[6,56],asynchron:[16,36,50],asynchroni:53,atabl:14,atom:[11,13,21,29],atomiclong:53,attach:[28,33,56,59,199],attack:56,attemp:53,attempt:[0,6,11,16,18,20,22,36,43,48,53,55,56,61,62,141,182,192,197],attent:[25,26,33,34],attribut:[43,48],audienc:0,audit:[6,74,84,125],audit_log:6,audit_logging_opt:43,audit_logs_dir:43,auditlog:84,auditlogview:43,audt:43,aug:192,auth:[6,43,186],authent:[10,43,52,61,186],authenticatedus:6,author:[9,20,22,27,33,52,60],authorizationproxi:56,authprovid:186,auto:[6,11,36,60,166],auto_bootstrap:58,autocompact:[48,75,85,125,166],autom:[25,28],automat:[6,13,14,16,24,27,31,35,36,40,48,55,56,58,60],avail:[0,6,8,11,14,20,24,31,33,34,35,40,43,47,55,56,57,59,61,65,95,138,147,158,178,195,197,199],availabil:6,averag:[6,14,48,53,186,197,198,199],average_live_cells_per_slice_last_five_minut:174,average_s:11,average_tombstones_per_slice_last_five_minut:174,averagefin:14,averagest:14,avg:[186,199],avg_bucket_s:48,avgqu:199,avgrq:199,avoid:[6,11,12,25,30,33,45,48,50,53,56,57,61,179,186],awai:[31,58,61,198],await:199,awar:[0,6,11,33,45,49,146,195,198],awesom:60,azur:50,b124:13,b2c5b10:197,b70de1d0:13,b7a2:198,b7c5:198,b957:183,b9c5:198,back:[6,11,27,43,48,53,58,198],backend:6,background:[34,36,40,56,197,199],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,42,48,52,58,61,76,86,125,167,192,193],backward:[6,10,11,15,20,22],bad:[6,14,36,56,57,195,198],balanc:[6,58,195,198],banana:22,band:22,bandwidth:[6,59,199],bar:[12,25,199],bardet:22,bare:6,base:[4,6,10,11,13,14,18,19,20,22,24,26,28,29,32,33,34,35,36,43,48,50,53,56,58,59,187,195,198],bash:[36,199],basi:[6,24,36,49],basic:[6,11,48,50,60,62,182,193,196],batch:[0,4,6,9,11,15,35,42,52,60,61,195,199],batch_remov:[53,198],batch_stat:12,batch_stor:[53,198],batchlog:[13,53,97,125,142,148],batchtyp:60,bc9cf530b1da11e886c66d2c86545d91:190,be34:13,beatl:22,beca:61,becaus:[0,4,6,11,13,14,20,40,48,49,53,56,187,190,199],becom:[4,6,11,14,20,33,48,53,56,58],been:[0,4,6,10,11,13,14,15,20,22,30,33,34,48,50,55,56,147,190,193,195],befor:[0,4,6,10,11,13,14,16,19,21,22,24,27,28,31,32,34,35,38,43,48,53,56,57,58,60,61,88,164,182,183,184,185,186,187,188,189,190,191,192,193,194,195],begin:[9,12,13,35,56,61],beginn:33,begintoken:61,behav:6,behavior:[0,6,10,11,14,17,22,25,30,45,48,58,141,195],behind:[6,25,35,36,43,48],being:[0,4,6,11,13,17,22,30,34,35,36,43,45,48,53,55,58,188,197,198,199],believ:195,belong:[11,13,14,53,65,125],below:[6,11,12,13,17,20,22,26,33,40,43,48,53,61,71,186,188,195,197],benchmark:[50,60],benefici:48,benefit:[6,28,45,48,50,52,186],best:[6,27,34,35,48,52,56,57,195,199],best_effort:6,better:[6,25,27,28,33,48,50,186,198,199],between:[0,4,6,9,10,11,12,13,15,33,36,45,48,49,53,55,56,59,60,140,162,199],beyond:[6,48,61,179],big:[6,48,68,183,184,185,186,187,189,190,191,192,193,194],bigger:[11,48],biggest:14,bigint:[9,14,17,19,22],bigintasblob:14,bigtableread:[184,190,192,194],bin:[26,31,40,41,61,197],binari:[14,34,39,43,56,77,87,125,168,197],binauditlogg:84,bind:[6,10,12,14,36],bind_mark:[12,13,18,22],binlog:43,biolat:199,biolog:11,biosnoop:199,birth:13,birth_year:13,bit:[14,17,22,26,33,36,49,50],bite:36,bitempor:59,bitrot:11,bitstr:9,black:6,blank:[6,25,36,187],blindli:36,blob:[9,10,12,17,22,42,49,60],blob_plain:34,blobasbigint:14,blobastyp:14,block:[4,6,11,29,37,43,48,50,53,56,62,88,182,197,198,199],blockedonalloc:6,blog:[6,13],blog_til:13,blog_titl:13,blogpost:60,bloom:[4,11,42,50,52,53,187],bloom_filter_false_posit:174,bloom_filter_false_ratio:174,bloom_filter_fp_ch:[4,11,45],bloom_filter_off_heap_memory_us:174,bloom_filter_space_us:174,bloomfilterdiskspaceus:53,bloomfilterfalseposit:53,bloomfilterfalseratio:53,bloomfilteroffheapmemoryus:53,blunt:56,bnf:12,bob:[13,20],bodi:[6,11,12,60],boilerpl:32,boolstyl:61,boost:6,boot:36,bootstrap:[0,6,42,49,52,53,56,125,130,158,188],born:13,both:[0,6,11,13,14,18,22,28,29,30,33,34,36,37,45,48,49,50,53,56,58,60,61,193,199],bottleneck:6,bottom:36,bound:[4,6,11,12,22,43,50,56],boundari:188,box:[6,56,57],brace:25,bracket:12,braket:12,branch:[24,27,29,30,31,34,35],branchnam:33,breadcrumb:195,breakdown:[198,199],breakpoint:31,breed:35,brendangregg:199,brief:199,briefli:198,bring:6,brk:36,broadcast:6,broadcast_address:57,broken:[6,48,53,190],brows:[6,34,183,184,185,186,187,188,189,190,191,193,194],browser:[61,199],bucket:48,bucket_high:48,bucket_low:48,buff:199,buffer:[4,6,47,53],bufferpool:52,buffers_mb:199,bug:[10,29,32,34,35,36,42,55],build:[18,24,26,27,28,32,33,34,35,42,53,55,59,60,125,181],buildenv:34,builder:[6,102,125,155],built:[18,31,53],bulk:[42,52,186],bump:[4,10,188],bunch:25,burn:47,button:[27,31,36],bytebuff:14,byteorderedpartition:[6,14],bytesanticompact:53,bytescompact:53,bytesflush:53,bytesmutatedanticompact:53,bytespendingrepair:53,bytesrepair:53,bytestyp:[9,187],bytesunrepair:53,bytesvalid:53,bz2:34,c60d:183,c73de1d3:13,cach:[6,11,20,26,36,37,50,52,57,116,118,119,120,125,149,150,198],cachecleanupexecutor:[53,198],cached_mb:199,cachenam:53,cachestat:199,calcul:[6,45,47,48,53,57,186,187],call:[9,11,12,13,14,20,25,27,32,37,42,43,48,49,50,53,58,125,158,199],callback:53,caller:25,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,33,34,35,37,40,41,42,43,45,47,48,49,50,53,55,56,57,58,60,61,63,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,184,185,186,188,189,190,191,192,195,196,197,198,199],cancel:[10,141],candid:48,cannot:[6,9,11,13,14,17,18,20,22,35,48,56,63,125,198],cap:[12,99,104,110,125,152,157,160],capabl:[0,6,36,59,61],capac:[6,47,53,57,59,125,127,149,175,194,195,197,199],capacit:195,capi:42,captur:[6,42,52,56,62,197],cardin:187,care:[6,48,60,140,199],carefulli:26,carlo:20,carri:[25,140],cascommit:53,cascontent:[111,161],casprepar:53,caspropos:53,casread:53,cassablanca:22,cassafort:38,cassandra:[0,2,4,5,8,10,11,13,14,18,20,21,22,25,26,28,29,33,38,39,41,45,48,49,50,53,55,57,58,61,84,88,125,136,140,143,147,172,180,182,183,184,185,186,187,188,189,190,191,193,194,195,196,198,199],cassandra_flam:199,cassandra_hom:[4,6,47,56,197],cassandra_job_dsl_se:24,cassandra_stack:199,cassandraauthor:[6,56],cassandradaemon:[31,40],cassandrafullquerylog:6,cassandralogin:56,cassandranetworkauthor:6,cassandrarolemanag:[6,56],casser:38,cassi:38,cast:[10,13,18],caswrit:53,cat:[22,183,199],categor:53,categori:[11,12,13,14,43,84],caught:[30,53],caus:[4,6,18,36,48,55,56,188,190,197,198,199],caution:[6,49],caveat:56,cbc:6,ccm:[30,35,199],ccmlib:35,cd941b956e60:198,cdc:[6,11],cdc_enabl:47,cdc_free_space_check_interval_m:47,cdc_free_space_in_mb:47,cdc_raw:[6,47],cdc_raw_directori:47,cdccompactor:6,cell:[6,22,53,95,179,183,187,198],center:[6,11,20,22,36,57,58,81,91,125,140],cento:34,centos7:34,central:[31,56,61,195],centric:[20,27],certain:[4,6,9,11,20,27,35,48,56,184],certainli:14,certif:[52,125,136],cf188983:183,cfname:[109,127,175],cfs:25,chain:20,challeng:[28,59],chanc:[28,45,187],chang:[4,6,11,12,15,20,22,24,26,27,28,29,31,32,34,39,40,42,49,52,53,56,158,185,188,197,199],changelog:34,charact:[11,12,13,17,20,22,25,60,61],character:6,chat:8,cheap:[6,11],check:[0,6,11,13,24,25,30,31,33,34,35,36,45,47,48,53,56,62,116,125,140,179,182,194,198],checklist:[32,33,42],checkout:[27,31,33,34],checksum:[4,6,11,49,125,179,193],cherri:29,chess:13,child:61,chmod:56,choic:[6,11,34,42,48,52,189],choos:[0,6,11,32,34,38,50,53],chosen:[0,6,11,14,198],chown:56,christoph:22,chrome:61,chronicl:43,chunk:[4,6,36,49,61],chunk_length_in_kb:[11,49],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:53,chunksiz:61,churn:6,cipher:[6,56,186],cipher_suit:6,circular:20,circumst:11,citi:22,clash:12,class_nam:[4,6],classload:35,classpath:[6,14,22,53],claus:[10,11,14,16,17,18,20,25],clean:[6,25,53,62,65,125,143,182,186],cleanli:33,cleanup:[36,48,52,53,95,125,171,193],clear:[30,33,62,67,116],clearsnapshot:125,click:[13,31,33,34,35,199],client:[0,4,6,8,10,11,13,17,20,22,30,36,37,39,42,43,47,50,52,61,67,125,186,190,196,197,198,199],client_encryption_opt:[56,186],clientrequest:53,clientstat:125,clock:6,clockr:6,clojur:39,clone:[31,34,36,61,199],close:[6,15,27,34,56,199],closer:45,cloud:52,clue:199,cluster:[0,4,6,9,10,11,13,14,21,22,30,35,37,41,42,48,50,53,55,56,57,58,60,61,62,72,93,97,113,125,148,165,182,187,194,195,196,197,199],cluster_nam:[37,41],clustering_column:11,clustering_ord:11,clusteringtyp:187,cmake:199,cmd:199,cmsparallelremarken:31,coalesc:6,coalescingstrategi:6,code:[6,10,12,14,21,24,27,28,29,30,31,32,35,42,49,53,195,199],codestyl:25,coher:59,col:[14,60],cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,50,52,53,60,95,197],collection_liter:12,collection_typ:22,collector:197,color:[22,61,199],column1:9,column:[4,6,9,10,11,12,13,14,15,16,17,18,20,22,49,53,59,60,61,109,127,147,164,175,187,190,192,197,198],column_definit:11,column_nam:[11,13,16],columnfamili:[4,6,9,25,48,185,188],columnspec:60,colupdatetimedeltahistogram:53,com:[6,14,24,25,27,29,34,56,199],combin:[4,6,10,48],come:[6,9,56,199],comingl:48,comma:[6,11,12,13,37,43,56,58,61,84,127,130,175,186],command:[0,6,18,26,29,34,35,36,37,40,41,49,52,55,56,60,62,63,64,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,185,186,191,192,193,196,197,198,199],comment:[4,6,11,15,18,25,27,28,30,56],commit:[4,6,8,11,27,32,33,34,42,53,193,199],commitlog:[2,6,36,37,50,52,187,197,198],commitlog_archiv:[4,6],commitlog_compress:4,commitlog_directori:[4,37,50],commitlog_segment_size_in_mb:[4,36],commitlog_sync:4,commitlog_sync_batch_window_in_m:4,commitlog_sync_period_in_m:4,commitlog_total_space_in_mb:4,commitlogposit:187,commitlogread:47,commitlogreadhandl:47,commitlogseg:[6,52,53],committ:[28,29,32,33,34,35],common:[0,14,15,25,27,30,33,52,55,61,195,196,199],common_nam:11,commonli:125,commun:[6,8,28,30,31,33,36,37,41,43,56,186],commut:36,compact:[4,6,11,15,36,42,45,49,50,52,60,65,69,70,95,98,99,125,138,147,151,152,158,166,171,178,184,185,186,187,188,190,193,196,197,199],compacted_partition_maximum_byt:174,compacted_partition_mean_byt:174,compacted_partition_minimum_byt:174,compaction_:171,compaction_histori:197,compaction_throughput:198,compaction_window_s:48,compaction_window_unit:48,compactionbyteswritten:53,compactionexecutor:[53,198],compactionhistori:[48,125],compactionid:171,compactionparamet:48,compactionparametersjson:48,compactions_in_progress:193,compactionstat:[48,125,198],compactionstrategi:52,compactiontask:197,compactor:[101,125,154],compar:[4,6,26,33,48,53,55,60,195,198],comparison:6,compat:[6,9,10,11,13,15,20,30,33,62,199],compatilibi:22,compet:6,compil:[25,26,31,61],compilerthread3:199,complain:31,complet:[6,13,14,33,34,36,47,48,53,56,58,61,125,139,141,190,192,193,198],completedtask:53,complex:[4,6,9,14,22,33],complexarg:14,compliant:[6,14,56],complic:33,compon:[4,6,11,30,45,53,56,125,158,199],compos:[11,13,22],composit:[4,11],compound:17,comprehens:30,compress:[4,6,11,35,42,48,50,52,53,60,187],compression_level:49,compression_metadata_off_heap_memory_us:174,compressioninfo:4,compressionmetadataoffheapmemoryus:53,compressionratio:53,compressor:[4,6,11,187],compris:[4,11,49],compromis:[34,56],comput:[4,6,14,194],concaten:[14,43],concept:[20,48],concern:[13,14,199],conclus:6,concret:[12,22],concurr:[6,24,50,60,100,101,102,125,140,153,154,155,186,198,199],concurrent_compactor:198,concurrent_materialized_view_build:18,concurrent_writ:4,concurrentmarksweep:50,condens:13,condit:[6,10,12,13,20,22,25,29,48,53,56,60,61,199],conditionnotmet:53,conf:[6,36,37,40,53,56,61,186,197],config:[24,53,56,61,62,182],configu:[43,199],configur:[0,4,11,20,22,24,31,35,36,39,40,42,52,53,56,57,59,60,61,71,88,125,143,158,185,186,187,195,197,198],confirm:[6,8,24,30,31],conflict:[13,22,29,32],conform:[18,30],confus:[10,12,36,199],congratul:27,conjunct:61,connect:[6,11,22,31,41,42,53,56,57,60,61,67,71,124,125,186,199],connectednativecli:53,connectednativeclientsbyus:53,connectionsperhost:186,connector:[36,38,56],connnect:53,consecut:37,consequ:[11,13,19,22,50],conserv:6,consid:[0,6,13,22,28,33,37,43,45,48,50,194],consider:[13,22],consist:[2,6,11,12,13,14,30,53,56,58,62,195,198],consol:[31,37,43,61],constant:[10,11,15,17,22],constantli:[6,48],construct:[12,199],constructor:[6,25],consum:[6,35,45,47,53,198],consumpt:47,contact:[6,11,36,42,195],contain:[0,6,8,9,10,11,12,13,15,16,18,20,22,26,31,33,35,48,49,53,56,59,61,164,184,189,193,195,197,198,199],contend:[6,53],content:[4,6,11,12,13,27,42,48,61,88,183,199],contentionhistogram:53,context:[4,6,9,20,22,31,33,36,56,197],contigu:13,continu:[0,6,25,35,43,48,56,57],contrarili:12,contrast:[35,56],contribut:[5,24,27,29,35,42],contributor:[27,29,33,40],control:[0,6,10,11,13,15,30,37,40,48,56,57,61],conveni:[9,12,14,17,35,58],convent:[6,11,14,15,27,29,32,33,35,56,57],convers:10,convert:[10,13,14,48,199],coordin:[0,6,11,13,14,22,36,53,141,195,196],coordinatorreadlat:[53,195],coordinatorscanlat:53,coordinatorwritelat:[53,195],cop:25,copi:[0,6,26,34,36,48,62,186,195],core:[6,14,43,50,59,153,198,199],correct:[10,26,30,40,48,49,56,125,138,185,191],correctli:[6,11,27,36,43,48,56],correl:[6,10,57,195,198],correspond:[0,4,6,9,11,13,14,18,22,27,33,35,36,47,57,186],corrupt:[6,11,48,49,50,55,62,147,179,182],cost:[6,13,22,49,55],could:[6,12,22,28,30,33,48,55,61,197,199],couldn:[40,53],count:[4,6,9,13,22,36,48,53,58,60,187,197,198,199],counter1:190,counter:[0,4,6,9,14,19,50,53,60,62,118,125,147,149,150,182],counter_mut:[53,198],counter_read:60,counter_writ:60,countercach:53,countermutationstag:[53,198],counterwrit:[60,111,161],countri:[13,22],country_cod:22,coupl:6,cours:[13,55,194,199],cover:[11,27,30,33,35,36,39,48,53,187],coverag:[26,28],cph:186,cpu:[6,11,47,49,52,195,197,198],cpu_idl:199,cq4:197,cqerl:38,cqex:38,cql3:[14,30,35,61],cql:[6,10,11,12,13,14,16,17,19,20,22,34,35,38,41,42,43,48,52,56,60,62,158,183,199],cql_type:[11,12,13,14,20,22],cqlc:38,cqldefinit:14,cqlsh:[36,39,40,42,56,62],cqlshrc:62,cqltester:[30,35],cqltrace:199,craft:56,crash:50,crc32:[4,192,193],crc:[4,192,193],crc_check_chanc:[11,49],creat:[0,4,6,9,10,12,13,15,17,19,24,26,27,31,32,35,36,47,48,49,56,58,60,61,68,186,191,199],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,22],create_user_stat:12,createkeystor:6,createrepo:34,createt:35,creation:[6,10,11,13,14,18,22,47,197],creator:20,credenti:[6,56],critic:[30,33,56,195,198],cross:[6,36,57],crossnodedroppedlat:53,crucial:[56,197,198,199],cryptographi:6,csv:61,ctrl:199,cuddli:22,culprit:195,cumul:[198,199],curent:187,curl:[29,40],current:[0,6,9,11,13,20,22,31,33,34,40,43,48,53,58,60,61,62,90,108,112,114,116,125,139,170,178,182,187,188,193,197,198],currentd:[10,14],currentlyblockedtask:53,currenttim:[10,14],currenttimestamp:[10,14],currenttimeuuid:[10,14],custom:[6,9,10,11,14,15,16,20,24,33,43,57,60,61,186],custom_option1:20,custom_option2:20,custom_typ:[14,22],cut:197,cute:22,cvh:30,cycl:[6,47,88],cython:62,d18250c0:183,d85b:183,d936bd20a17c11e8bc92a55ed562cd82:189,daemon:[31,125,172,199],dai:[14,17,19,22,43,48,55],daili:[24,43,88],danger:6,dart:39,dart_cassandra_cql:38,dash:12,data:[0,4,6,10,12,14,15,16,18,30,37,40,42,45,49,50,52,53,55,56,57,59,60,61,63,68,81,88,91,95,116,125,130,140,164,179,183,184,185,186,187,188,189,190,191,192,193,194,197,198,199],data_file_directori:[37,50],data_read:20,data_writ:20,databas:[12,13,15,21,34,43,48,50,56,196,197,199],datacent:[0,6,11,55,57,81,91,104,125,140,157,186,195,198],datacenter1:[6,60],dataset:[6,55,199],datastax:[6,14,38,195],datastor:198,datatyp:14,date:[4,9,10,15,17,19,43,62,147,182,183,187],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,48],datetim:15,daylight:22,db532690a63411e8b4ae091830ac5256:192,db_user:56,dba:56,dbd:38,dc1:[6,11,20,56,198],dc1c1:194,dc2:[6,11,56,198],dc3:20,dcassandra:[48,53,56,58],dcawareroundrobin:195,dcl:43,dcom:56,dcpar:140,ddl:[11,43,61],ddl_statement:12,deactiv:6,dead:[6,52,63,125,199],dead_node_ip:58,deal:[62,182],deb:40,debian:[34,36,39,199],debug:[37,61,190,191,192,193,194,195,196,199],decai:195,decid:[9,27,48,57],decim:[9,14,17,19,22,61],decimalsep:61,declar:[11,12,14,22],decod:[17,22,199],decommiss:[0,6,58,125],decompress:[49,199],decoupl:0,decreas:[6,48,186,199],decrement:[13,22],decrypt:6,dedic:[4,6],deem:6,deep:[42,196,197],deeper:[33,199],default_time_to_l:[10,11,13],defend:36,defens:6,defer:[11,199],defin:[0,6,9,10,11,12,13,15,16,17,18,20,21,31,48,53,56,57,58,60,61,68,125,187],definit:[9,13,14,15,18,22,42,45,60,187],deflat:[4,6],deflatecompressor:[11,49],degrad:6,delai:[4,53,55],deleg:31,delet:[4,6,9,10,11,12,15,17,18,20,22,33,42,43,55,61,88,95,125,177,187,192,193,197],delete_stat:[12,13],deletiontim:4,delimit:6,deliv:[0,6,53],deliveri:[6,53,125,126,145,156],delta:[53,187],demand:56,demonstr:196,deni:36,denorm:22,denot:[6,12],dens:45,dep:26,depend:[4,6,11,12,13,14,22,24,27,30,31,32,33,35,42,48,55,62,195],dependenic:26,deploi:[26,36,37,199],deploy:[0,6,56,57,59],deprec:[6,10,11,14,15,48],depth:199,desc:[9,11,13,61],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,20,22,30,31,33,34,45,56,62,125,182],describeclust:125,descript:[10,11,14,19,22,24,27,33,53,61],descriptor:[53,193],deseri:194,design:[14,48,50,55],desir:[16,22,24,36,189],destin:[47,61],destroyjavavm:199,detach:34,detail:[5,6,10,11,12,13,14,22,24,27,28,36,52,56,59,60,61,62,182,192,197,198,199],detect:[2,6,11,29,36,56,194],detector:[93,125],determin:[0,6,11,13,20,45,49,57,140,195,198,199],determinist:36,dev1:43,dev:[6,8,34,36,199],develop:[8,27,28,31,33,35,50],devic:[4,59,199],df303ac7:198,dfb660d92ad8:61,dfp:179,dht:[6,187],diagnost:6,dictat:[6,56],did:[30,53,185],die:6,dies:[42,58],diff:[15,25,197],differ:[0,6,11,12,13,14,15,20,22,24,29,31,33,35,36,37,40,48,49,50,53,55,58,60,195,199],difficult:[6,35,199],difficulti:22,digest:[4,6,192,193],digit:[17,22,36],diminish:22,dinclud:26,dir_path:186,direct:[6,11,17,20,33,53,199],directli:[13,18,20,27,31,48,56,187,199],director:13,directori:[4,6,21,26,27,31,35,36,39,40,41,43,47,50,52,61,116,125,143,186,199],dirti:[4,6,199],disabl:[6,11,14,48,49,56,57,61,74,75,76,77,78,79,80,81,82,91,125,148,150,152,157,160,161,162],disable_stcs_in_l0:48,disableauditlog:[43,125],disableautocompact:[48,125],disablebackup:125,disablebinari:125,disablefullquerylog:[125,197],disablegossip:125,disablehandoff:125,disablehintsfordc:125,disableoldprotocolvers:125,disablesnapshot:147,disableuditlog:43,disallow:6,disambigu:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],disappear:11,discard:6,disconnect:48,discourag:[11,22,33],discov:[36,55],discuss:[8,22,33],disk:[4,6,11,37,42,43,45,47,48,49,52,53,55,88,122,125,138,179,184,188,191,192,197,198,199],dispar:6,displai:[11,43,61,62,64,70,100,115,117,124,125,174,182,186,190,191,192,193,194,199],disrupt:[36,56],dissect:199,dist:[26,34,40],distanc:59,distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,26,33,35,36,48,53,56,58,59,60,187,188,196,197,198,199],distro:34,dive:[42,196,197],divid:12,divis:19,djava:[31,36,56],dml:[21,43],dml_statement:12,dmx4jaddress:53,dmx4jport:53,dns:36,dobar:25,doc:[6,26,27,30,34,55,56,185,199],docker:34,document:[5,12,14,15,17,24,30,32,33,34,41,56,60,61],doe:[6,11,13,14,16,17,18,20,22,29,30,33,34,42,45,48,49,55,56,57,58,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199],doesn:[6,14,22,25,35,36,55,60,186,187,189,190,197,199],dofoo:25,doing:[6,13,18,35,36,48,53,58,199],dollar:[10,12],domain:[28,56,60,146,165],domin:199,don:[5,6,13,25,28,29,30,31,33,34,36,37,48,55,116,140,186,191,196,198,199],done:[6,11,13,22,24,27,28,33,34,35,37,41,48,60,188,191,192],dont:43,doubl:[6,9,10,11,12,14,17,19,22,31,53,57],doubt:11,down:[6,20,48,53,55,57,58,79,125,140,188,195,197,198,199],downgrad:192,download:[6,24,31,34,40,53,59],downsampl:4,downstream:198,downward:20,dozen:198,dpkg:34,drain:[4,125],drive:[6,48,50,197,198,199],driver:[6,12,14,20,35,39,42,61,195],drop:[6,10,15,42,43,48,53,56,88,184,187,188,190,195,198,199],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,22],drop_user_stat:12,dropdown:199,droppabl:[6,48,187],dropped_mut:174,droppedmessag:52,droppedmut:53,dropwizard:53,drwxr:192,dry:[62,182],dsl:24,dt_socket:31,dtest:[24,30,32],due:[11,13,22,26,36,40,48,53,58,195,199],dump:[43,61,62,182,197],duplic:[30,55,193],durabl:[4,47],durable_writ:11,durat:[6,10,15,19,20,48,53,60,127,175,186],dure:[6,11,14,21,26,33,35,36,48,49,53,56,58,60,61,147,184,190,194,197,199],dverbos:26,dying:36,dynam:[6,52,56],dynamic_snitch:57,dynamic_snitch_badness_threshold:57,dynamic_snitch_reset_interval_in_m:57,dynamic_snitch_update_interval_in_m:57,dynamo:[2,42],each:[0,4,6,10,11,12,13,14,17,18,20,22,24,27,29,33,41,42,43,48,49,50,53,55,56,57,58,59,60,61,62,125,150,166,179,182,183,197,198,199],each_quorum:[0,6],earli:[6,12,33],earlier:33,eas:199,easi:[9,27,33,199],easier:[0,27,33,183],easiest:36,easili:56,ec2:[6,50,57],ec2multiregionsnitch:[6,57],ec2snitch:[6,57],ecc:50,echo:[40,187],eclips:[25,32,35],ecosystem:30,eden:199,edg:30,edit:[27,31,34,37,40,53,56,193],editor:27,effect:[6,11,22,33,36,45,49,56,79,125,195,198,199],effectiv:53,effici:[6,11,43,48,57,58],effort:[6,27,33,55],either:[4,6,8,12,13,14,16,22,24,25,27,29,31,33,36,40,41,47,48,53,56,60,177,195,197,198,199],elaps:[48,53,199],elasticsearch:59,elder:31,element:[22,27,61],elig:6,elimin:195,elixir:39,els:[11,13,25,33],email:[8,16,22,34,42],embed:35,emerg:26,emit:6,emploi:45,empti:[6,9,10,11,12,61,190],emptytyp:9,enabl:[0,6,11,14,17,20,35,36,48,49,57,58,61,84,85,86,88,91,92,125,162,186,187,197,199],enable_legacy_ssl_storage_port:6,enable_user_defined_funct:14,enableauditlog:[43,125],enableautocompact:[48,125],enablebackup:125,enablebinari:125,enablefullquerylog:[6,125,197],enablegossip:125,enablehandoff:125,enablehintsfordc:125,enableoldprotocolvers:125,encapsul:[25,53],enclos:[9,10,12,14,20,60],enclosur:12,encod:[15,22,30,43,61,187],encodingstat:187,encount:[5,13,34,40,53,60],encourag:[11,47],encrypt:[6,52,186],end:[22,24,36,43,48,56,61,68,103,125,140,188,193,199],end_token:[68,140],end_token_1:130,end_token_2:130,end_token_n:130,endpoint:[6,53,57,63,103,125,140,177],endpoint_snitch:57,endpointsnitchinfo:56,endtoken:61,enforc:[17,56],engin:[2,11,33,42,53,59],enhac:28,enhanc:[28,50],enjoi:34,enough:[0,6,22,36,37,48,55,57,61,197,199],enqueu:[6,197],ensur:[13,18,21,36,47,49,56,185,197,198],entail:36,enter:[24,36,61,197,199],entir:[0,4,6,11,14,22,36,45,48,55,56,58,61,62,182,184,195,199],entri:[4,6,9,13,16,24,33,34,42,53,56,61,187],entropi:6,entry_titl:13,enumer:[20,183],env:[36,37,53,56],environ:[0,5,6,26,31,32,35,36,39,42,50,189],ephemer:50,epoch:[22,187],epol:6,equal:[0,6,10,11,13,22,25,48,60],equival:[10,11,12,13,14,20,29,48,196],eras:11,erlang:39,erlcass:38,err:61,errfil:61,error:[6,11,12,14,16,18,20,22,24,25,30,31,34,35,40,42,43,55,60,61,141,185,190,194,196,197,198],escap:[12,17,60],especi:[33,36,48,61,199],essenti:[0,14,36,61],establish:[6,20,57,186],estim:[4,53,55,187,198],estimatedcolumncounthistogram:53,estimatedpartitioncount:53,estimatedpartitionsizehistogram:53,etc:[6,18,22,25,30,36,37,40,43,48,53,56,60,186,199],eth0:6,eth1:6,ev1:22,evalu:[6,19],even:[0,6,10,11,12,13,14,17,22,28,33,42,48,55,56,61,71,147,178,195,197,198,199],evenli:6,evenlog:[185,188],event:[4,6,13,22,43,48,60,61,140,183],event_typ:13,eventlog:[183,185,188,191,193,194],eventlog_dump_2018jul26:183,eventlog_dump_2018jul26_d:183,eventlog_dump_2018jul26_excludekei:183,eventlog_dump_2018jul26_justkei:183,eventlog_dump_2018jul26_justlin:183,eventlog_dump_2018jul26_singlekei:183,eventlog_dump_2018jul26_tim:183,eventlog_dump_2018jul26b:183,eventu:[4,13,27,55],ever:[25,35,36,50],everi:[4,6,11,13,14,18,20,21,22,41,43,45,48,50,55,60,61,195,198,199],everyth:[4,12,25,31,36,59],evict:53,evil:[6,14],ex1:60,ex2:60,exact:[11,12,14,49,196],exactli:[11,14,18,56,183,199],exampl:[0,6,11,13,14,17,20,22,28,34,35,40,41,43,48,56,57,60,61,183,184,185,186,187,188,189,190,191,193,194,195,196,197,198,199],example2:60,exaust:6,excalibur:11,exce:[4,6,17,25,197],exceed:[6,50,188],excel:11,excelsior:11,except:[0,6,13,14,17,30,32,33,35,36,43,53,183,188,197,199],excess:45,exchang:[6,36],exclud:[11,43,53,62,84,108,125,182],excluded_categori:[43,84],excluded_keyspac:[43,84],excluded_us:[43,84],exclus:[22,26,35],execut:[6,9,11,12,13,14,20,24,26,31,35,41,43,48,53,56,61,182,183,184,185,186,187,188,189,190,191,192,193,194,198,199],executor:24,exhaust:[6,195],exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,20,21,22,27,28,30,31,34,35,42,43,45,48,49,57,58,60,184],exit:[62,193],exp:60,expand:[11,62],expans:11,expect:[0,4,6,10,12,22,25,30,33,34,48,55,56,188,198],expens:[6,45,57],experi:[6,48,198],experienc:[0,6,197],experiment:[0,140],expir:[6,10,11,13,22,52,55,56,147,184,187,190],expiri:48,explain:[25,27,30,33,40],explan:[62,182],explicit:[10,11,20],explicitli:[4,6,10,11,13,17,22,25,48,57,60],explor:31,expon:10,exponenti:[53,60,195],expos:[6,9,56],express:[0,6,10,12,19,57],expung:36,extend:[22,33,35,59,62,116,179,182],extens:[6,11,56],extern:[42,53,58,59,196],extra:[0,4,6,48],extract:[25,40],extrem:[6,13,60],f6845640a6cb11e8b6836d2c86545d91:187,f8a4fa30aa2a11e8af27091830ac5256:186,facilit:6,fact:[22,28,35,36,195],factor:[0,6,11,42,49,55,56],factori:60,fail:[0,6,11,13,14,22,24,26,42,48,53,61,125,141],failur:[2,6,33,42,48,50,53,57,93,125,179,195],failuredetector:125,fairli:[6,47,56,199],fake:14,fall:[6,43],fallback:[6,57],fals:[6,11,12,17,20,22,43,45,47,48,49,53,56,58,61,147],famili:[6,50,109,127,164,175,192],fanout_s:48,faq:62,far:[27,28],fare:199,fast:[6,45,48,59,197,199],faster:[6,33,49,50,125,150,198],fastest:[6,29,57],fatal:6,fault:36,fav:[16,22],favorit:199,fax:22,fct:14,fct_using_udt:14,fear:36,feasibl:22,featur:[0,28,30,31,33,56],fed:6,feedback:33,feel:[27,29],fetch:[6,11,27,61],few:[6,48,50,195,197],fewer:[6,33],fffffffff:[17,22],fgc:199,fgct:199,field:[10,13,14,17,22,25,43,45,60,190],field_definit:22,field_nam:13,fifteen:53,fifteenminutecachehitr:53,fifth:198,figur:[28,48,184],file:[4,7,11,27,31,32,33,34,35,36,37,39,42,45,47,48,50,53,56,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,187,188,189,190,192,193,195,196,199],filenam:[4,11,61,109,125,187,191],filenamepattern:43,filesystem:[6,24,56],fill:[34,47,48],fillion:13,filter:[4,9,11,18,42,50,52,53,140,187,192,193],finalfunc:[9,14],finalis:34,find:[4,6,24,27,31,34,35,40,42,45,48,56,58,103,107,183,187,189,192,196,198,199],fine:[4,6,33,55,56],finer:[4,6],finish:[31,33,125,142,197],finish_releas:34,fip:[6,56],fire:[21,28],firefox:61,firewal:[6,36,37,57],first:[0,4,5,6,11,13,14,22,33,34,36,39,48,50,55,56,60,61,140,147,183,187,190,193,195,197,198,199],firstnam:13,fit:[6,48,53],five:53,fiveminutecachehitr:53,fix:[6,10,12,18,24,27,29,32,34,36,48,50,55,60,190],fixm:34,flag:[6,13,29,30,33,47,53,55,58,185],flash:59,flexibl:56,flight:[6,56],flip:11,floor:6,flow:[6,20,30,32,43],fluent:38,flush:[4,6,11,47,48,50,53,83,125,164,193,197],fname:14,focu:[24,33],focus:60,focuss:199,folder:[31,171,191],follow:[0,4,5,6,8,9,10,11,12,13,14,17,18,19,20,22,24,25,26,27,28,29,30,31,33,34,35,36,37,40,42,43,47,48,49,53,55,56,57,58,61,65,68,75,85,94,95,131,140,147,161,166,178,179,184,189,190,193,195,199],font:12,foo:[11,12,47,199],footprint:[125,127],forc:[4,6,11,13,61,68,71,125,139,140,141,194],forcefulli:[63,125],foreground:[37,40],forev:48,forget:5,fork:[27,33],form:[6,10,11,12,14,20,70,124,174],formal:[12,27,34],format:[4,6,10,11,17,22,27,29,30,32,33,43,53,61,62,69,88,109,130,174,176,182,193,198],former:[6,53],forward:[6,11,26],found:[5,6,12,14,24,27,28,33,35,37,41,56,58,60,61,62,171,179,182,186,187,192,193],four:[13,49],fourth:198,fqcn:35,fql:197,fql_log:197,fqltool:197,fraction:6,frame:6,framework:[30,35],franc:[13,22],free:[6,11,22,27,29,31,53,59,194,199],freed:4,freestyl:24,frequenc:[6,47,55],frequent:[6,11,42,48,56,195,199],fresh:58,friendli:[6,22,35],from:[0,4,6,9,11,12,13,14,15,17,18,19,20,22,28,29,32,33,35,39,41,42,43,45,47,48,49,50,53,56,57,58,60,62,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,134,135,138,139,140,141,143,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,184,185,187,188,190,193,195,197,198,199],fromjson:15,froom:22,frozen:[9,10,11,13,14,22],fruit:[22,33],fsync:[4,6,47,53,198],fulfil:60,full:[6,9,11,13,16,20,33,36,40,41,48,49,52,56,59,60,61,78,88,125,131,140,143,186,188,193,198],full_nam:174,fulli:[0,6,11,12,14,34,52,53,56],function_cal:12,function_nam:[13,14,20],fundament:17,further:[5,11,18,22,48,52,56,59,198],furthermor:[10,13,56],futher:43,futur:[6,9,10,11,22,33,90,125,170],g1gc:50,game:[14,22],garbag:[11,50,52,53,95,197],garbagecollect:125,gather:48,gaug:53,gaurante:0,gaussian:60,gc_grace_second:[11,187],gc_type:53,gce:[36,50],gcg:6,gcinspector:197,gcstat:125,gct:199,gcutil:199,gcviewer:197,gen:199,gener:[0,2,4,6,8,11,12,13,14,17,22,24,27,30,31,32,33,34,36,50,56,59,60,61,111,147,161,190,196,197,198,199],genuin:25,geospati:59,get:[4,6,8,24,26,27,29,31,32,33,36,40,42,45,48,53,62,100,101,102,105,108,125,182,184,188,196,198,199],getbatchlogreplaythrottl:125,getcompactionthreshold:125,getcompactionthroughput:125,getconcurrentcompactor:125,getconcurrentviewbuild:[18,125],getendpoint:125,getint:14,getinterdcstreamthroughput:125,getlocalhost:[6,36],getlogginglevel:[125,197],getlong:14,getmaxhintwindow:125,getpartition:25,getreplica:125,getse:125,getsstabl:125,getstr:14,getstreamthroughput:125,gettempsstablepath:25,getter:[20,25],gettimeout:125,gettraceprob:125,gib:[70,124,174,198],gist:[4,25],git1:34,git:[5,24,27,29,31,33,34,197,199],gitbox:[31,34],github:[24,25,29,32,33,34,35,59,199],give:[18,20,22,27,33,35,42,61,185,197,198],giveawai:199,given:[0,6,11,12,13,14,16,22,24,33,45,48,55,56,58,60,61,66,68,73,75,85,98,107,111,125,131,151,158,162,166,173,183,185,187,188,189,192,193],glanc:199,global:[6,61,125,149],gms:197,gmt:22,goal:[6,48,195],gocassa:38,gocql:38,going:[6,33,48,190,196,198,199],gone:6,good:[6,25,27,33,35,36,55,61,189,195,197,198,199],googl:[25,61,199],gori:36,gossip:[2,6,36,53,57,79,89,113,125,169,197],gossipinfo:125,gossipingpropertyfilesnitch:[6,57],gossipstag:[53,197,198],got:6,gotcha:199,gp2:50,gpg:40,grace:[52,55,62,182],grafana:195,grai:22,grain:56,grammar:[11,12,26],grant:[6,9,56],grant_permission_stat:12,grant_role_stat:12,granular:[4,6,11,95],graph:[20,62],graphit:195,gravesit:11,great:[28,33,48,196,197,198,199],greater:[0,6,22,36,57,154,155,197,199],greatli:6,green:[22,31],grep:[4,185,187,189,197,198,199],groovi:24,group:[6,10,11,20,48,53,56,57,195],group_by_claus:13,grow:[22,59],guarante:[0,2,6,11,13,14,22,33,42,45,48,55,58,59,61,184],gui:199,guid:[6,27,31],guidelin:[10,30,34,50],had:[6,9,10,48,190,196,198],half:[4,6,29,36],hand:[6,13,50,198],handi:199,handl:[6,14,30,32,33,36,47,50,53,56,60,88,197],handoff:[6,53,80,114,125,156],handoffwindow:125,hang:33,happen:[6,13,25,29,33,42,48,53,57,195,197,198,199],happi:33,happili:50,hard:[6,14,47,48,50,192,197],harder:6,hardest:28,hardwar:[6,24,42,52,195],has:[0,4,6,10,11,12,13,14,18,20,22,25,33,34,36,43,47,48,50,53,56,57,58,60,61,62,182,186,195,197,198,199],hash:[4,6,48,55,194,199],hashcod:25,haskel:39,hasn:[0,88],have:[0,5,6,9,10,11,12,13,14,15,18,19,20,22,24,25,27,28,29,30,31,33,34,35,36,37,40,43,45,48,49,50,53,56,57,88,147,184,186,188,190,193,194,195,196,197,198,199],haven:33,hayt:38,hdd:[4,6,50],head:[27,33,199],header:[31,61],headroom:6,health:199,healthi:199,heap:[4,6,31,37,42,45,49,50,53,197,198,199],heap_buff:6,heartbeat:[6,197],heavi:[6,197,198,199],heavili:50,held:[6,50,125,129],help:[5,6,10,26,28,33,35,41,43,60,62,64,125,163,186,190,191,192,193,194,195,196,197,198,199],helper:35,henc:[5,6,11,22],here:[6,26,27,29,34,35,36,38,48,53,56,60,198],hex:[12,17,109],hexadecim:[10,12,109],hibern:58,hidden:[58,199],hide:[25,30,62,182],hierarch:20,hierarchi:[20,55],high:[0,6,27,34,36,48,50,59,195,197,198],higher:[0,19,20,33,45,48,53,58,127,175,197,199],highest:[48,187,188],highli:[33,36,50,56,197,198],hint:[0,6,11,12,18,36,37,42,52,53,55,80,81,90,91,106,114,125,126,145,156,159,170,177,198],hint_delai:53,hintedhandoff:[6,52],hintedhandoffmanag:53,hints_creat:53,hints_directori:37,hints_not_stor:53,hintsdispatch:[53,198],hintsfail:53,hintsservic:52,hintssucceed:53,hintstimedout:53,histogram:[4,48,53,125,128,173,187,197],histor:[11,33],histori:[24,25,43,67,69,125],hit:[6,48,53,199],hitrat:53,hoc:35,hold:[0,6,10,13,20,36,48,61,195,197,199],home:[22,34,60,61],honor:[6,31],hope:48,hopefulli:33,host:[6,27,37,42,43,53,57,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,198,199],hostnam:[6,36,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,199],hot:[6,52,53,199],hotspot:11,hotspotdiagnost:56,hottest:6,hour:[6,22,33,34,48],hourli:[43,88],how:[0,5,6,7,8,11,12,22,24,28,30,31,32,33,35,39,41,42,47,48,49,53,57,59,60,61,88,185,197,198,199],howev:[4,6,9,10,11,12,13,15,17,18,20,22,24,33,35,36,37,40,45,49,50,55,56,58,61],hoytech:199,html:[6,60,185],http:[6,24,25,27,29,31,34,40,53,183,184,185,186,187,188,189,190,191,193,194,199],httpadaptor:53,hub:36,hudson:24,huge_daili:43,human:[11,43,47,70,124,174,198],hurt:11,hypothet:29,iauthent:6,iauthor:6,ibm:59,icompressor:49,idct:186,ide:31,idea:[6,14,27,32,33,35,36,48,61,198,199],ideal:[6,35,48,53,56],idealclwritelat:53,idempot:[13,22],idemptot:22,ident:[0,60],identifi:[6,9,10,11,13,14,15,16,20,21,22,60,195],idiomat:8,idl:6,idx:47,ieee:[17,22],iendpointsnitch:[6,57],iftop:199,ignor:[0,6,10,14,22,25,61,174,186],iinternodeauthent:6,illeg:14,illegalargumentexcept:188,illustr:[20,188],imag:[22,27,34,199],imagin:48,immedi:[4,6,11,22,33,45,49,56,65,125],immut:[4,36,49,50],impact:[6,11,30,48,52,56,197,199],implement:[4,6,10,13,14,18,20,24,25,35,36,43,47,49,56,57,59],implementor:6,impli:[11,12,22],implic:[0,56],implicitli:[14,20],import_:61,impos:6,imposs:48,improv:[0,6,11,22,28,33,35,45,48,50,57,58,61,199],inaccur:199,inact:36,inam:189,incast:199,includ:[4,6,10,11,12,13,18,20,22,24,25,26,27,33,34,43,48,50,53,56,59,61,62,84,141,178,182,189,195,196,197,198,199],included_categori:[43,84],included_keyspac:[43,84],included_us:[43,84],inclus:33,incom:[6,43],incomingbyt:53,incompat:[6,10],incomplet:[30,193],inconsist:[36,55],incorrect:36,increas:[0,4,6,11,18,36,45,48,49,50,53,57,58,140,186,194,195],increment:[0,6,10,13,22,33,34,48,52,53,76,86,125,141,147,167,190,193],incur:[13,22,53],indefinit:43,indent:25,independ:[11,48,50,56,198],index:[4,6,9,10,11,12,13,15,22,42,47,48,52,61,125,131,186,192,193,197],index_build:171,index_identifi:16,index_nam:16,index_summary_off_heap_memory_us:174,indexclass:16,indexedentrys:53,indexinfocount:53,indexinfoget:53,indexnam:131,indexsummaryoffheapmemoryus:53,indic:[5,6,12,13,25,33,36,43,47,140,187,188,195,197,198,199],indirectli:13,individu:[6,10,14,22,33,35,50,56,186,194],induc:13,inequ:[10,13],inet:[9,11,14,17,22],inetaddress:[6,36],inetworkauthor:6,inexpens:50,infin:[9,10,12],influenc:11,info:[6,37,43,53,73,125,183,197],inform:[4,6,12,13,22,34,41,53,55,56,57,58,60,61,64,67,93,113,115,116,117,124,125,146,163,165,185,186,187,195,196],infrastructur:[33,59],ing:11,ingest:6,ingestr:61,inher:[11,22],inherit:20,init:53,initcond:[9,14],initi:[6,14,18,25,30,32,34,43,53,56,58,61,125,158,186],initial_token:58,inject:43,innov:59,input:[9,10,14,17,22,30,61,189,197],inputd:22,inreleas:40,insecur:6,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,20,22,36,39,42,43,50,56,60,61,193],insert_stat:[12,13],insertedtimestamp:183,insid:[6,11,12,13,22,25,60,61],insight:[197,198],inspect:[6,31,60,61,194],instabl:6,instal:[6,21,24,26,35,36,39,42,56,61,192,199],instanc:[0,10,11,12,13,14,16,18,19,20,21,22,24,31,35,36,47,48,50,53],instantan:53,instanti:10,instantli:6,instead:[4,10,11,13,18,22,25,27,34,36,43,48,146,165,183,199],instrospect:196,instruct:[6,8,11,27,28,29,31,42,199],instrument:[26,56],insuffic:195,insuffici:199,insufici:197,intasblob:13,integ:[0,10,11,12,13,17,22,47,53,190],integr:[32,35,42,59],intellij:[25,32],intend:[30,56,186],intens:[6,35,36],intent:30,intention:20,inter:[6,104,125,157,186],interact:[35,41,61,199],interest:[0,48,56,198],interfac:[6,10,14,25,27,36,37,49,56,59,199],interleav:60,intern:[6,9,11,13,18,22,27,30,36,50,53,62,182,195,199],internaldroppedlat:53,internalresponsestag:[53,198],internet:6,internod:[6,36,56,186,195,199],internode_application_timeout_in_m:6,internode_encrypt:[6,56],internode_tcp_connect_timeout_in_m:6,internode_tcp_user_timeout_in_m:6,internodeconnect:[111,161],internodeus:[111,161],interpret:[6,10,22,61],interrupt:36,interv:[4,6,9,53,56,60,187],intra:[6,53,57,60],intrins:22,introduc:[6,10,17,28,43,58,193],introduct:[10,20,35],introspect:199,intrus:185,intvalu:14,invalid:[6,13,20,30,56,116,118,119,120,125,188,194,198],invalidatecountercach:125,invalidatekeycach:125,invalidaterowcach:125,invert:60,invertedindex:21,investig:[6,32,196,197,198,199],invoc:14,invok:[14,29,40,56,179],involv:[6,13,27,48,49,56,193,197,199],ioerror:25,ios:199,ip1:6,ip2:6,ip3:6,ip_address:63,ipv4:[6,17,22,36],ipv6:[6,17,22],irolemanag:6,irrevers:[11,22],isn:[0,18,25,33,36],iso8601:[43,183],iso:22,isol:[6,11,13,53,195,196,198],issu:[0,6,20,24,26,27,28,29,33,34,35,36,45,48,49,140,183,184,185,186,187,188,189,190,191,193,194,195,197,198,199],item:[12,22,24,30,31],iter:[0,6,188],its:[4,6,11,12,13,14,22,31,36,43,48,53,56,57,58,59,60,184,188],itself:[6,11,16,36,40,58,198],iv_length:6,jaa:56,jacki:29,jamm:31,januari:22,jar:[14,25,26,31,35,53],java7:56,java8_hom:31,java:[6,14,21,22,25,31,33,39,40,42,47,48,50,53,56,163,188,196,197,199],java_hom:199,javaag:31,javadoc:[24,25,30],javas:6,javascript:[6,14],javax:56,jbod:50,jce8:6,jce:6,jcek:6,jconsol:[42,48,56],jdk:6,jdwp:31,jenkin:[26,32,42],jetbrain:31,jira:[5,6,26,28,30,32,33,35,47,183,184,185,186,187,188,189,190,191,193,194],jks:60,jkskeyprovid:6,jmap:199,jmc:[48,56],jmx:[6,18,20,42,52,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],jmx_password:56,jmx_user:56,jmxremot:56,job:[33,65,95,138,140,147,178],job_thread:140,john:[13,22],join:[6,8,13,42,48,56,58,125,197,198],joss:13,jpg:22,jsmith:22,json:[9,10,13,15,42,48,49,69,174,176,183],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,jstackjunit:35,jstackjunittask:35,judgement:25,jul:199,junit:[24,25,26,31,35],junittask:35,jurisdict:6,just:[6,14,20,28,31,33,35,36,47,48,55,56,60,195,199],jvm:[6,21,31,36,37,48,52,56,58,196,197],jvm_extra_opt:31,jvm_opt:[37,56],jvmstabilityinspector:30,keep:[6,8,11,25,28,33,36,43,48,53,62,116,182,193,195,198,199],keepal:[6,36],kei:[4,6,9,10,11,13,14,17,22,35,36,40,47,48,49,50,53,56,59,60,62,65,103,107,109,119,125,129,149,150,174,182,187],kept:[4,6,48,53,193],kernel:[6,36,47,199],key_alia:6,key_password:6,key_provid:6,keycach:53,keycachehitr:53,keyserv:[34,40],keyspac:[0,6,9,10,12,14,15,16,20,22,42,43,45,48,49,52,55,56,58,60,61,62,65,66,68,73,75,84,85,94,95,98,103,107,109,116,125,127,129,130,131,132,138,140,146,147,151,164,165,166,173,174,175,178,179,181,182,183,184,185,186,189,190,191,192,193,194,197,198],keyspace1:[20,184,186,187,188,189,190,192,197],keyspace_definit:60,keyspace_nam:[11,14,20,22,48,55,197],keystor:[6,56,186],keystore_password:6,keystorepassword:56,keytyp:187,keyword:[10,11,13,14,15,16,17,22],kib:[70,124,174,198],kick:[125,142],kill:[6,40],kilobyt:49,kind:[11,12,22,24,33,47,48,195,198],kitten:22,knife:163,know:[4,6,13,22,25,28,34,48,189,197,198,199],knowledg:28,known:[20,22,38,41,45,48],krumma:35,ks_owner:56,ks_user:56,kspw:186,ktlist:164,kundera:38,label:[22,24,34],lack:[53,197,198],lag:53,land:49,landlin:22,lang:[42,53,56,188,199],languag:[6,9,10,12,14,21,22,27,38,41,42,61],larg:[6,11,13,14,22,24,35,42,48,50,53,56,59,61,183,189,191,195,197,198,199],large_daili:43,larger:[6,35,36,48,49,50],largest:[6,53],last:[6,12,13,14,15,19,27,48,53,63,125,187,188,189,195,197,199],lastli:[13,22],lastnam:13,latenc:[0,6,11,36,53,57,59,60,196,197],latent:[195,199],later:[0,11,22,25,27,33,36],latest:[0,6,34,40,48,61,179,185,197],latest_ev:60,latter:12,layer:50,layout:[11,27],lazi:11,lazili:11,lead:[6,10,11,22,48,197,199],learn:[6,35,36,61],least:[0,4,6,11,12,13,18,27,36,48,50,55],leav:[6,12,13,25,35,36,61,195,197,198],left:[6,17,19,26,48,193],legaci:[4,6,20,60],legal:[10,11],length:[4,6,10,17,22,30,48],lengthier:33,less:[4,6,22,26,33,36,45,50,191,194,197,198,199],let:[6,28,34,48],letter:17,level:[4,6,10,11,13,19,20,25,30,37,43,49,50,52,53,56,61,62,105,116,125,158,182,187,188,190,195,197,198],leveledcompactionstrategi:[11,45,48,185,188,198],lexic:36,lib:[4,6,21,26,30,31,35,40,183,184,185,186,187,188,189,190,191,192,193,194,199],libqtcassandra:38,librari:[8,30,32,35,38,53,61],licenc:30,licens:[26,30,31,33],lie:195,lies:195,life:33,lifespan:50,lightweight:62,like:[0,6,12,13,14,17,22,25,27,29,30,33,35,36,42,48,49,50,55,56,188,189,190,195,196,197,199],likewis:20,limit:[4,6,9,10,11,20,22,36,47,48,49,56,60,197,199],line:[6,12,25,33,35,37,40,41,47,56,62,63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,186,188,193,197],linear:50,linearli:45,link:[6,8,11,12,33,35,40,47,192],linux:[6,27,34,36,196,197,199],list:[4,5,6,9,10,11,12,13,14,17,24,26,27,31,32,33,34,35,37,40,41,42,43,47,48,53,56,58,60,61,62,63,65,66,67,68,73,75,81,84,85,91,94,95,98,100,103,107,108,109,111,115,116,122,123,125,127,130,131,132,135,138,139,140,141,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,184,186,187,188,189,194],list_liter:[13,22],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,42,53,199],listen_address:[37,41,42],listen_interfac:37,listsnapshot:[125,192],liter:[10,12,14,17,20,61],littl:[25,195,198],live:[13,27,42,48,53,58,187,192,193,197,199],livediskspaceus:53,liveness_info:183,livescannedhistogram:53,livesstablecount:53,load:[0,6,11,21,22,28,42,52,53,56,57,58,60,62,117,125,132,140,165,182,195,198,199],loader:186,loadm:186,local:[0,4,6,11,20,26,31,32,33,35,41,50,53,56,57,61,125,134,140,144,177,187,195,196,197,199],local_jmx:56,local_on:[0,56,61,195,198],local_quorum:[0,61,195,199],local_read_count:174,local_read_latency_m:174,local_seri:61,local_write_latency_m:174,localhost:[6,41,43,56],locat:[6,34,39,40,43,49,53,56,57,61,171,186,195,197,199],lock:[6,36,53,199],log:[0,4,6,11,13,27,30,35,39,40,42,47,52,53,56,60,62,74,78,84,88,105,125,140,143,158,171,182,196,199],log_al:48,logback:[37,43,197],logdir:43,logger:[25,37,43,84],loggernam:43,logic:[6,21,197,198],login:[6,9,20,34,35,43,56,62,195],logmessag:43,lol:22,longer:[6,9,10,11,34,36,48,58,65,125,190,193,195],longest:197,look:[6,12,24,27,28,29,33,35,48,50,55,188,190,195,197,199],lookup:53,loop:25,lose:[4,6,48,58,193],loss:[6,22,48,55,199],lost:[43,48,58,190],lot:[6,27,41,42,55,62,182,191,197,198,199],low:[6,33,59,125,127,199],lower:[0,4,6,11,12,13,20,36,45,48,49,53,58,195,197],lowercas:12,lowest:[6,33,48,187],lsm:[198,199],lucen:42,luckili:196,lwt:0,lz4:[4,6],lz4compressor:[4,6,11,49,187],mac:199,macaddr:9,machin:[6,11,35,36,53,56,57,58,187,196,199],made:[6,22,34,42,45,50,56,197],magnet:[4,6],magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,18,20,22,24,26,27,30,31,33,35,36,40,43,45,48,53,56,57,58,60,61,147,188,190,195,196,197,198,199],mail:[5,28,33,34,42],main:[0,6,14,18,31,36,39,40,56,61,188,195,197],main_actor:13,mainli:6,maintain:[6,11,28,33],mainten:53,major:[0,4,10,27,33,56,68,125,192,198],make:[0,6,8,9,21,22,24,25,26,27,28,31,33,35,36,37,40,48,56,58,60,61,183,197,199],malform:195,malici:56,man:6,manag:[6,20,24,27,31,32,34,35,42,53,56,58,62,64,125,182],mandatori:[11,14],mani:[0,6,11,25,30,33,48,49,50,53,56,60,61,62,65,68,75,85,88,94,95,140,147,166,178,179,182,188,194,195,198,199],manifest:[62,182],manipul:[12,15,18,35,42,183],manual:[6,26,29,36,193,199],map:[6,9,10,11,13,14,17,20,42,47,53,197,199],map_liter:[11,16,20,22],mar:22,mark:[6,20,33,48,55,58,79,125,187,189,193],marker:[4,6,11,12,30,36,193],markup:27,marshal:187,massiv:[28,199],match:[4,6,12,13,14,17,20,53,57,187,192],materi:[0,6,10,11,12,15,42,53,61,125,181],materialized_view_stat:12,matter:[11,36,199],maven:26,max:[4,6,42,48,53,56,60,61,88,98,106,125,140,151,159,187,190,197,198],max_hint_window_in_m:58,max_log_s:[43,88],max_map_count:36,max_mutation_size_in_kb:[4,6,36],max_queue_weight:[43,88],max_threshold:48,maxattempt:61,maxbatchs:61,maxfiledescriptorcount:53,maxfiles:43,maxhintwindow:159,maxhistori:43,maxim:50,maximum:[4,6,14,43,45,53,61,88,100,147,153,187,190,191,195,197,198],maximum_live_cells_per_slice_last_five_minut:174,maximum_tombstones_per_slice_last_five_minut:174,maxinserterror:61,maxldt:184,maxoutputs:61,maxparseerror:61,maxpartitions:53,maxpools:53,maxrequest:61,maxrow:61,maxt:184,maxtasksqueu:53,maxthreshold:151,maxtimestamp:4,maxtimeuuid:10,mayb:13,mbean:[6,20,48,53,56],mbeanserv:20,mbit:186,mbp:6,mct:6,mean:[0,6,9,11,12,13,14,17,18,22,42,48,53,57,60,61,140,195,196,197,198,199],meaning:13,meanpartitions:53,meant:[22,36,53],measur:[6,30,33,35,53,58,60,61,199],mechan:47,median:[53,197],medium:199,meet:[6,30,56],megabit:186,megabyt:[6,191,198],mem:199,member:[25,56,60],membership:6,memlock:36,memori:[4,6,11,42,43,45,47,48,49,52,59,194,197,199],memory_pool:53,memtabl:[2,6,11,45,47,48,49,50,53,164,197,199],memtable_allocation_typ:4,memtable_cell_count:174,memtable_cleanup_threshold:4,memtable_data_s:174,memtable_flush_period_in_m:11,memtable_off_heap_memory_us:174,memtable_switch_count:174,memtablecolumnscount:53,memtableflushwrit:[53,198],memtablelivedatas:53,memtableoffheaps:53,memtableonheaps:53,memtablepool:6,memtablepostflush:[53,198],memtablereclaimmemori:[53,198],memtableswitchcount:53,mention:[6,22,33,53,56,186,195],menu:31,mere:25,merg:[27,29,33,45,49,50,52,199],mergetool:29,merkl:[6,53,55],mess:[33,35],messag:[6,22,24,27,30,33,40,42,43,53,56,186,190,191,192,193,194,195,197,198],met:13,meta:[13,53,60],metadata:[4,20,34,49,50,53,62,182,190,193,194,197],metal:6,meter:53,method:[10,13,14,20,25,28,30,31,35,42,56,60],metric:[6,52,60,196,198,199],metricnam:53,metricsreporterconfigfil:53,mib:[70,124,174],micro:198,microsecond:[6,11,13,22,53,187,198],midnight:22,might:[6,13,34,48,53,63,65,66,68,73,75,81,85,88,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,194,195,198],migrat:[6,53,57],migrationstag:[53,198],milli:4,millisecond:[4,6,10,22,53,127,147,175,187,190,198,199],min:[4,6,36,47,48,53,60,61,98,125,151,187,197,198],min_sstable_s:48,min_threshold:48,minbatchs:61,minim:[6,48,50],minimum:[6,11,14,37,53,55,185,187],minlocaldeletiontim:187,minor:[10,12,27,52],minpartitions:53,mint:184,minthreshold:151,mintimestamp:187,mintimeuuid:10,minttl:187,minut:[6,22,43,48,53,56,60,88],mirror:27,misbehav:[42,48,196],misc:[111,161],miscelen:53,miscellan:6,miscstag:[53,198],mismatch:6,misrepres:190,miss:[11,18,24,26,48,53,55,58,193,199],misslat:53,misspel:185,mistaken:[63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181],mistun:197,mit:34,mitig:[6,56],mix:[6,48,60,199],mkdir:[34,197],mmap:36,mnt:16,mock:35,mode:[4,6,56,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,195],model:[11,15,20,33,42,56,60,199],moder:50,modern:[50,199],modif:[13,20,197],modifi:[6,9,10,11,14,20,22,33,45,48,49,189],modification_stat:13,modul:61,modular:30,moment:[6,33],monitor:[36,42,52,56,57,64,125,195,199],monkeyspeci:[11,18],monkeyspecies_by_popul:18,monoton:[0,11],month:22,more:[0,4,6,10,11,12,13,22,25,27,28,33,34,35,37,41,42,43,45,49,50,52,53,56,57,58,60,62,68,94,95,125,127,140,147,163,175,179,182,187,188,194,196,198,199],moreov:13,most:[6,11,12,13,22,27,28,31,33,35,36,37,43,48,49,50,53,56,61,67,125,175,187,188,195,197,198,199],mostli:[4,6,11,22,196,197],motiv:[35,48],mount:[6,199],move:[6,33,36,42,47,52,53,125,190,193,198],movement:[52,197],movi:[13,22],movingaverag:6,msg:43,mtime:[11,189],mtr:199,much:[0,5,6,11,45,47,48,57,186,195,197,199],multi:[0,6,12,30,197,199],multilin:32,multipl:[4,6,10,11,12,13,14,19,22,25,30,31,33,36,37,48,50,57,60,62,130,182,183,195,198],multipli:48,multivari:59,murmur3:4,murmur3partit:4,murmur3partition:[6,14,61,187],must:[0,4,6,10,11,13,14,17,18,20,25,26,31,33,35,36,37,48,53,56,58,60,61,164,182,183,184,185,186,187,188,189,190,191,192,193,194],mutant:16,mutat:[0,4,6,13,36,47,53,179,198],mutatedanticompactiongaug:53,mutationsizehistogram:53,mutationstag:[53,198],mv1:18,mvn:26,mx4j:53,mx4j_address:53,mx4j_port:53,mx4jtool:53,mxbean:20,myaggreg:14,mycolumn:17,mydir:61,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,19,21],mytrigg:21,nairo:22,name:[4,6,9,10,11,12,13,14,16,17,18,20,21,22,24,27,30,31,33,34,35,36,37,43,53,56,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,185,186,189,192,195,197,198,199],names_valu:13,nan:[9,10,12],nanosecond:[22,53],narrow:[195,197,198,199],nathan:13,nativ:[4,6,10,12,15,17,30,36,41,43,53,61,77,87,125,131,168,186,198,199],native_transport_port:37,native_transport_port_ssl:56,native_typ:22,natur:[11,22,25,48,49,199],navig:27,nbproject:31,ncurs:199,nearli:31,neccessari:6,necessari:[6,11,14,20,33,40,43,49,56,183,187,190],necessarili:[6,12,37],need:[0,4,6,10,11,12,13,20,22,24,25,26,30,31,33,34,35,36,37,40,41,43,45,48,49,50,55,56,57,59,61,103,107,186,191,192,194,198,199],neg:6,negat:[19,56],neglig:[13,199],neighbor:195,neighbour:48,neither:[6,18,22,56],neon:31,nerdmovi:[13,16],nest:[12,13,25],net:[6,31,36,39,40,43,56],netbean:32,netstat:[58,125],netti:6,network:[6,13,36,50,55,56,57,124,125,128,197],network_author:20,network_permiss:6,networktopologystrategi:[56,60],never:[0,6,10,11,12,13,14,22,25,36,48,55,188],nevertheless:13,new_rol:20,new_superus:56,newargtuplevalu:14,newargudtvalu:14,newer:[48,50,61,95,184,199],newest:[11,48,184],newli:[11,20,22,33,47,125,132],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[0,6,36,41,48,61,196,197],ngem3b:13,ngem3c:13,nic:199,nid:199,nifti:29,nio:[6,14,53],nntp:199,no_pubkei:40,node:[0,4,6,11,13,14,21,22,30,35,37,38,41,42,43,45,47,48,50,52,53,55,57,59,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,187,188,196,197,198,199],nodej:39,nodetool:[4,6,18,40,42,45,49,52,55,56,58,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,189,192,194,195,196,197,199],nois:[6,197],noiser:197,noisi:197,nologin:9,non:[6,9,10,11,12,13,14,20,22,36,45,49,53,56,61,187,190],none:[0,6,11,13,22,53,56,187],nonsens:20,nor:[6,11,18,22],norecurs:[9,20],norm:53,normal:[14,17,20,31,36,40,53,60,61,195,197,198,199],nosql:59,nosuperus:[9,20],notabl:[14,17],notat:[10,12,13,61],note:[0,4,5,6,10,11,12,13,14,15,17,20,22,29,32,33,34,36,48,56,182,183,184,185,186,187,188,189,190,191,192,193,194,197,199],noth:[6,11,14,29,35,36,184],notic:[6,56,198,199],notif:8,notion:[11,12],now:[10,24,25,27,31,48,58,199],ntp:6,nullval:61,num_cor:61,num_token:58,number:[0,4,6,10,11,12,13,14,15,17,18,22,24,31,33,34,35,36,40,43,45,48,49,53,56,58,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,187,191,195,196,198,199],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:174,numer:[15,19,24,45,60],numprocess:61,numsampl:197,object:[6,11,12,30,183],objectnam:20,observ:25,obsolet:[6,50,53,194],obtain:[12,56,199],obviou:[14,29],obvious:11,occasion:[55,198],occup:[13,199],occupi:[6,53],occur:[6,10,12,13,21,22,36,48,50,53,55,182,183,184,185,186,187,188,189,190,191,192,193,194,199],occurr:22,octet:[6,57],oddli:6,off:[4,6,36,47,49,53,56,61,125,142,199],off_heap_memory_used_tot:174,offer:[15,35,49],offheap:[45,50],offheap_buff:6,offheap_object:6,offici:[27,33,42,59,61],offset:[4,47,53],often:[6,11,12,25,27,28,33,35,36,43,48,49,50,55,56,57,61,88,188,195,198,199],ohc:6,ohcprovid:6,okai:25,old:[4,6,48,58,62,82,92,125,182,193,199],older:[4,6,14,31,40,48,50,61,184,192],oldest:[4,6,11,43,184],omit:[4,6,10,11,13,17,22,158],onc:[0,4,6,11,12,14,22,24,29,31,33,35,36,47,48,49,50,53,55,56,58,60,61,188,195],one:[0,4,6,9,10,11,12,13,14,17,18,20,22,25,28,31,33,35,37,42,43,45,48,50,53,55,56,57,58,61,62,65,68,75,85,94,95,111,125,140,147,161,164,166,178,179,182,183,187,190,192,193,195,197,198,199],oneminutecachehitr:53,ones:[6,11,12,13,14,18,20,53,188],ongo:[28,48,53,58],onli:[0,4,6,9,11,12,13,14,17,18,20,22,25,27,33,34,35,37,42,45,47,48,49,50,53,55,56,57,58,60,61,62,140,164,174,182,184,186,189,190,191,192,194,195,198,199],onlin:61,only_purge_repaired_tombston:48,onto:[4,48],open:[5,6,20,24,28,32,34,56,57,59,186,199],openfiledescriptorcount:53,openhft:43,openjdk:40,oper:[0,6,10,11,12,13,15,16,18,20,22,25,32,42,43,45,47,50,53,55,56,58,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,185,192,193,195,196,197,198,199],operand:19,operatingsystem:53,operationtimedoutexcept:195,opertaion:6,oplog:193,opnam:60,opportun:[27,45],ops:[36,60],opt:14,optim:[4,6,11,12,13,36,48,50,58,187,197],optimis:140,option1_valu:20,option:[4,6,9,10,11,12,13,14,16,20,22,27,31,33,35,36,40,49,50,52,56,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,186,187,188,189,190,191,192,193,194,197,198,199],oracl:[6,40,56,199],order:[0,4,6,9,10,11,14,18,22,25,33,36,45,47,48,57,58,60,61,187],ordering_claus:13,orderpreservingpartition:6,ordinari:6,org:[6,14,21,24,25,26,27,31,34,35,36,40,43,48,49,53,56,183,184,185,186,187,188,189,190,191,193,194,197],organ:[4,24,31,38],orgapachecassandra:34,origin:[4,9,27,29,33,47,147,188,190,191,192],orign:13,os_prio:199,osx:27,other:[0,4,6,10,11,12,13,14,18,20,22,26,27,28,29,31,33,37,42,43,45,48,50,52,53,56,57,58,125,130,141,184,187,188,193,195,196,197,198,199],other_rol:20,otherwis:[0,6,9,12,13,16,22,55,100,184,195],our:[5,6,8,24,27,28,29,31,34,48,199],ourselv:29,out:[4,6,11,12,25,26,28,31,33,34,48,53,55,56,57,58,59,140,183,184,195,198,199],outbound:6,outboundtcpconnect:6,outgo:[6,199],outgoingbyt:53,outlin:[24,56],outofmemoryerror:42,output:[14,20,30,31,34,45,48,60,61,62,68,69,174,176,182,187,190,191,193,194],outsid:[11,21,22],outstand:[193,198],over:[0,4,6,11,22,36,48,53,55,56,57,58,60,188,190,193],overal:14,overflow:[17,62,147,182],overhead:[6,36,49,53,58],overidden:56,overlap:[0,48,188],overload:[6,14,36,186],overrid:[6,11,24,25,56,58,147,186,190],overridden:[6,11,43],overview:[2,42,52],overwrit:[43,49,50,56],overwritten:[53,95],own:[0,11,12,14,22,28,32,33,36,40,48,49,53,56,59,60,103,109,116,125,179,188,198],owner:22,ownership:[48,146],ownersip:197,p0000:22,p50:198,p99:199,pacif:22,packag:[31,35,36,37,39,41,61,197],packet:[6,197],page:[6,22,24,27,28,31,35,36,50,53,59,62,196,198],paged_rang:198,paged_slic:53,pages:61,pagetimeout:61,pai:[25,26,34],pair:[6,11,20,22,48,56],parallel:[18,35,48,140,198],paramet:[4,6,14,24,25,30,31,37,43,45,50,57,58,125,158],parameter:24,paranoid:6,parent:[26,186],parenthesi:[11,60,61,195],parnew:50,pars:[6,12,43,47,61,199],parser:[9,10],part:[5,6,11,13,14,18,22,26,30,31,33,35,36,56,57,58,61,186,195],parti:[30,42,53,183],partial:[4,11,193],particip:[0,21],particular:[0,6,11,12,13,14,17,20,22,36,50,53,56,195,197,198,199],particularli:[12,22,56,197,198,199],partit:[4,6,10,11,13,14,36,45,48,50,53,60,95,103,107,125,147,175,183,187,195,197,198],partition:[4,10,13,14,55,61,72,125,140,187],partition_kei:[11,13],partitionspercounterbatch:53,partitionsperloggedbatch:53,partitionsperunloggedbatch:53,partitionsvalid:53,partli:13,pass:[30,33,37,61,163,186,187,198,199],password:[6,9,13,20,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186],password_a:20,password_b:20,passwordauthent:[6,56],passwordfilepath:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],past:53,patch:[10,13,24,25,27,29,30,32,35,42],path1:43,path2:43,path:[5,6,16,30,40,45,48,49,50,53,56,59,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,184,186,190,192,194,197,199],pathn:43,patter:20,pattern:[6,20,22,43,195,198,199],paus:[6,36,125,126,197,199],pausehandoff:125,paxo:[13,53,61],pcap:199,peak:[186,199],peer:[6,20,53,199],peerip:53,penalti:[6,13],pend:[6,48,53,125,139,198],pending_flush:174,pendingcompact:53,pendingflush:53,pendingrangecalcul:[53,198],pendingtask:53,pendingtasksbytablenam:53,pennsylvania:22,peopl:[27,33,34,36],per:[0,4,6,10,11,13,25,29,33,36,45,47,48,49,53,56,60,61,125,148,156,183,186,193,195,197,198,199],percent:53,percent_repair:174,percentag:[6,53,57,199],percentil:[53,195,198,199],percentrepair:53,perdiskmemtableflushwriter_0:[53,198],perf:199,perfdisablesharedmem:199,perfect:14,perform:[6,11,13,20,22,28,29,30,32,33,36,37,43,45,48,50,53,55,56,57,61,140,197,198,199],perhap:[195,197],period:[4,6,24,50,53,55,56,58,125,127,199],perl:39,perman:[11,36,48,50,197],permiss:[6,9,12,35,56],permit:[6,20,47,56],persist:[4,36,45,47,50,56,199],person:199,perspect:36,pet:22,pgp:34,pgrep:40,phantom:38,phase:[58,61,198],phi:6,phone:[13,22],php:39,physic:[0,6,11,36,50,57],pick:[6,29,33,36,48,56,58,60,130],pid:[36,40,199],piec:[12,48,53],pile:6,pin:[6,57],ping:[33,199],pkcs5pad:6,pkill:40,place:[5,6,16,21,25,29,33,47,48,53,55,56,61,125,132,186,191,197,199],placehold:[14,61],plai:[14,22],plain:4,plan:[11,29,33],plane:27,platform:[20,24,59],platter:[6,50],player:[14,22],playorm:38,pleas:[5,6,11,13,14,22,24,25,27,31,34,35,36,43,56,60,194],plu:[14,48,53,198],plug:[6,24],pluggabl:[20,56],plugin:[42,53],pmc:34,poe:22,point:[4,6,10,17,22,25,27,31,34,42,56,60,61,103,125,186,195,199],pointer:14,polici:[6,33,34,56,179,195],poll:56,pom:32,pool:[6,40,53,125,153,176,198,199],pop:60,popul:[11,18,60],popular:[31,50],port:[6,31,37,42,43,53,56,60,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186,199],portion:[50,61,191],posit:[4,6,10,11,19,22,45,53,58,183,187],possbili:6,possess:20,possibl:[0,6,10,11,13,14,17,20,22,24,30,33,35,36,45,48,50,53,56,58,60,188,195,197],post:[13,24,32,125,150],post_at:13,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,30,43,48,50,56,58,147,188,190],power8:59,power:[6,59],pr3z1den7:22,practic:[0,6,11,12,13,34,52,56],pre:[6,17,22,50,56,190,191,193],preced:[19,36,60],precis:[10,17,22,48,187],precondit:53,predefin:11,predict:[13,188],prefer:[0,6,11,12,22,25,33,56,57],preferipv4stack:31,prefix:[11,12,22,187,193],prepar:[6,14,15,43,53],prepare_releas:34,preparedstatementscount:53,preparedstatementsevict:53,preparedstatementsexecut:53,preparedstatementsratio:53,prepend:22,prerequisit:[32,39],presenc:6,presens:4,present:[12,13,18,37,47,53,56,190,199],preserv:[6,11,17,20],preserveframepoint:199,press:40,pressur:[6,53,198,199],pretti:[61,199],prevent:[6,11,35,47,53,55,186,190,199],preview:[27,55,140],previou:[6,10,11,22,34,43,48,55,58,192],previous:[6,193],previsouli:[91,125],primari:[9,10,11,13,14,22,35,47,48,49,55,56,58,60],primarili:[6,11],primary_kei:[11,18],print0:189,print:[55,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,197],prio:199,prior:[6,13,20,22,58],prioriti:[33,199],privat:[6,25,34,56,57],privileg:[20,40,56],probabilist:[45,49],probabl:[0,4,6,11,35,45,48,55,112,125,162,197,198,199],problem:[5,6,14,29,30,34,36,56,195,196,198,199],problemat:[22,195],proc:[6,36],proce:[30,49,58,195],procedur:[13,56],proceed:194,process:[0,6,14,24,26,28,29,30,31,32,33,35,36,40,42,47,49,50,53,55,56,58,59,61,64,100,125,126,145,153,185,186,190,192,193,194,197,198,199],prod_clust:61,produc:[13,14,28,48,88,195],product:[0,6,11,26,28,33,36,50,57],profil:[13,31,62,125,127,199],profileload:125,program:[14,35,196,199],programmat:189,progress:[25,29,33,34,45,52,60,62,125,181,182,193,198],project:[24,25,26,27,28,35,53],promin:11,promot:4,prompt:61,propag:[6,11,14,25,30,57],proper:[11,22,27,36,56],properli:[6,30],properti:[4,6,11,18,20,31,34,39,47,48,56,57,58],propertyfilesnitch:[6,57],proport:[6,13],proportion:[6,97,125,148],propos:[6,34,53],protect:[6,50,55,56,193],protocol:[6,30,36,41,43,53,56,61,67,77,82,87,92,125,168,186,197,199],prove:199,provid:[0,4,5,6,11,12,13,14,15,17,22,31,33,34,41,43,47,48,49,50,53,55,56,57,58,59,60,62,124,125,135,139,186,187,188,191,193,194,195,197],provis:199,proxim:[6,57],proxyhistogram:[125,198],prtcl:186,prv:[55,140],ps1:56,ps22dhd:13,pt89h8m53:22,publish:26,published_d:60,pull:[27,35,48,53,140],pure:199,purg:50,purpos:[11,12,13,22,50,56],push:[29,33,34,53],put:[15,33,37,48,58,116,140,188,198],pwd:34,pwf:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],python:[14,24,33,35,39,40,61],pytz:62,qos:43,quak:[14,22],qualifi:[11,14,33,198],qualiti:[27,56],quantiti:[22,198],queri:[0,6,10,11,12,13,14,16,18,20,39,42,43,48,53,59,60,61,78,88,125,143,196,199],queryvalidationexcept:195,question:[8,20,32,42,199],queu:[6,53],queue:[6,43,53,88,198,199],quick:[116,179,194],quickli:[36,48,198],quill:38,quintana:22,quit:[48,61,186,198,199],quorum:[0,11,56,61,195],quot:[9,10,11,12,14,17,20,60,61],quotat:20,quoted_identifi:12,quoted_nam:11,r_await:199,race:[22,29],rack1:6,rack:[0,6,56,57,195,198],rackdc:[6,57],rackinferringsnitch:[6,57],raid0:50,raid1:50,raid5:50,rain:12,rais:[6,12,36,195],raison:9,ram:[45,49,50,199],ran:189,random:[11,14,36,58],randomli:[6,58],randompartition:[6,13,14],rang:[2,6,10,11,13,22,30,48,52,53,55,60,61,68,73,111,125,130,140,161,195,198],range_slic:[53,198],rangekeysampl:125,rangelat:53,rangemov:58,rangeslic:53,rapid:50,rapidli:199,rare:[10,45,195],raspberri:50,rate:[6,11,53,56,60,61,186,199],ratebasedbackpressur:6,ratefil:61,rather:[6,13,36,47,48,50,60],ratio:[6,49,50,53,60,187],ration:4,raw:[4,6,14,62,182,197],reacah:43,reach:[4,6,11,33,36,47,48,188],read:[0,6,11,13,22,25,27,30,35,36,39,42,45,48,49,50,52,53,56,57,60,61,111,161,174,179,186,187,194,195,197,198,199],read_ahead_kb:199,read_lat:174,read_repair:[0,11,53,198],read_request_timeout:36,readabl:[11,43,47,70,124,174,198],readi:[0,11,27,33,56],readlat:[53,195],readm:[27,34],readrepair:53,readrepairstag:[53,198],readstag:[53,198],readtimeoutexcept:195,readwrit:56,real:[4,8,11,25,36,59,197],realclean:26,realis:60,realiz:48,realli:[6,35,185,189,195,199],realtim:47,reappear:55,reason:[0,4,6,11,13,14,15,18,36,37,40,48,50,55,56,58,198,199],rebas:27,rebuild:[0,45,48,49,53,125,131,147],rebuild_index:125,receiv:[6,14,18,33,36,48,50,58,195,199],recent:[6,33,35,50,67,188,193],reclaim:[43,48],recogn:[13,31,33],recommend:[4,6,11,22,27,36,43,50,56,58,197],recompact:48,recompress:49,reconcil:11,reconnect:56,reconstruct:188,record:[4,6,11,13,19,22,33,43,48,53,60,199],recov:[6,36,48],recoveri:6,recreat:[20,61],recrus:6,recurs:88,recv:40,recycl:[4,6,53],redhat:34,redirect:43,redistribut:[6,197],redo:33,reduc:[4,6,28,36,48,49,55,62,71,97,125,140,148,182],reduct:6,redund:[0,25,30,33,50],reenabl:[87,89,90,125],ref:[34,43,183,184,185,186,187,188,189,190,191,193,194],refer:[6,11,12,13,14,22,24,25,26,35,36,40,41,43,58,60,61,195,197],referenc:[6,60],reflect:[47,48,183],refresh:[6,56,61,125,133],refreshsizeestim:125,refus:42,regard:[11,13],regardless:[0,6,20,33,199],regener:45,regexp:12,region:[6,57],regist:22,registri:56,regress:[30,35],regular:[9,12,27,31,35,36,53,61],regularcolumn:187,regularli:55,regularstatementsexecut:53,regularupd:60,reinsert:[147,190],reject:[6,13,36,47,56,195],rel:[6,22,61,199],relat:[8,10,12,13,26,31,33,48,53,60,187,195,199],relationship:6,releas:[6,10,26,32,33,40,42,61,199],relev:[13,20,22,33,49,56,59,186,187,190,199],relevel:[62,182],reli:[6,14,22,36],reliabl:[28,48],reload:[6,52,125,134,135,136,137],reloadlocalschema:125,reloadse:125,reloadssl:[56,125],reloadtrigg:125,reloc:[125,138,197],relocatesst:125,remain:[6,13,14,20,22,29,48,53,55,58,174,198],remaind:[17,19,49],remeb:43,remedi:48,rememb:[43,195],remot:[0,4,27,29,31,42,48,56,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,195],remov:[0,4,6,10,11,12,13,14,15,17,18,20,22,26,30,36,42,47,52,56,63,66,95,125,139,188,193,194,199],removenod:[58,63,125],renam:[9,22],render:27,reorder:6,repair:[0,4,6,11,18,36,42,49,52,53,57,58,62,116,125,141,158,179,182,187,190,194,198],repair_admin:125,repairedat:189,repairpreparetim:53,repairtim:53,repeat:[12,40,49,56],replac:[0,6,9,14,20,22,26,30,34,36,42,48,52,55,88,192,193],replace_address_first_boot:58,replai:[0,4,22,50,53,97,125,142,148,187],replaybatchlog:125,repli:28,replic:[2,6,11,42,48,50,55,56,58,60,63,125],replica:[0,6,11,13,36,48,53,55,57,58,71,107,125,195,198,199],replication_factor:[0,11,56,60],repo:[26,29,31,34],repodata:34,repomd:34,report:[6,26,32,33,42,52,195],report_writ:20,reportfrequ:61,repositori:[5,8,24,26,27,28,31,33,35,40,59],repres:[6,10,17,20,22,36,48,53,56,57,60,61,187,197],represent:[10,17,183],reproduc:28,reproduct:28,request:[0,6,13,20,21,27,35,36,43,45,48,50,52,56,57,61,125,162,178,194,195,198,199],request_respons:[53,198],requestresponsest:198,requestresponsestag:[53,198],requesttyp:53,requir:[0,6,11,13,14,20,25,27,29,30,31,32,33,34,36,45,49,50,56,60,185,186,189,192],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15,199],reservoir:195,reset:[6,13,27,125,144,158,185],reset_bootstrap_progress:58,resetfullquerylog:125,resetlocalschema:125,resid:[6,13,36,53,199],resolut:[6,13,32,36],resolv:[26,29,36,146,165],resort:[63,125],resourc:[20,56,186,198],resp:14,respect:[6,10,11,14,24,26,40,55,57,88,197],respond:[0,6,12,199],respons:[0,6,20,36,53,58,198],ressourc:22,rest:[6,11,12,22,24,30,58,195],restart:[36,48,56,58,125,132,150,185,197],restor:[48,58,61,186,192,193],restrict:[6,10,11,13,18,55],restructuredtext:27,result:[0,6,10,11,12,14,17,20,22,28,33,36,48,53,55,61,182,183,184,185,186,187,188,189,190,191,192,193,194,199],resum:[64,125,145],resumehandoff:125,resurrect:48,resync:[125,144],retain:[20,36,43,48,190,192],rethrow:25,retir:27,retri:[0,6,11,22,53,88],retriev:[11,13,20,26],reus:30,revers:[11,13],revert:197,review:[11,25,27,32,33,35,42],revis:60,revok:[9,56],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[45,48,49,62,125,147,178,182,190],rewritten:[50,147,190],rfc:[14,22],rhel:42,rich:[22,197],rid:26,rider:22,riderresult:22,right:[6,19,31,34,36,55,61,198,199],ring:[2,6,42,55,56,58,61,121,123,125,158,186,195],rise:195,risk:[11,48],riski:48,rmb:199,rmem_max:6,rmi:[36,56],rogu:14,role:[6,9,10,12,15,52],role_a:20,role_admin:20,role_b:20,role_c:20,role_manag:56,role_nam:20,role_opt:20,role_or_permission_stat:12,role_permiss:6,roll:[36,43,56,88],roll_cycl:[43,88],rollcycl:43,rollingfileappend:43,rollingpolici:43,rollov:43,romain:22,room:[5,8,34],root:[6,29,33,40,194,197],rotat:[6,197],roughli:6,round:[13,48,53],rout:[6,57],routin:199,row:[0,4,6,10,11,13,14,15,17,18,35,41,45,49,50,53,60,61,62,95,116,120,125,147,149,150,182,187,190,194,199],rowcach:[42,53],rowcachehit:53,rowcachehitoutofrang:53,rowcachemiss:53,rowindexentri:53,rows_per_partit:11,rpc:[6,53],rpc_timeout_in_m:[111,161],rpm:34,rpmsign:34,rrqm:199,rsc:179,rst:27,rubi:[14,39],rule:[6,12,14,33,36,195,197],run:[4,5,6,12,22,24,26,29,31,33,34,36,37,40,48,50,53,55,56,58,59,60,62,116,140,163,182,185,186,187,189,191,192,196,197,198,199],runnabl:199,runtim:[6,18,39,105,125],runtimeexcept:25,rust:39,safe:[14,22,48,56,199],safeguard:50,safepoint:197,safeti:[11,48,58],sai:42,said:[11,33,36,125,178,199],same:[0,4,5,6,11,12,13,14,15,17,18,19,20,22,27,29,31,33,37,42,45,48,53,55,56,57,58,60,140,188,193,195,197,199],samerow:60,sampl:[4,6,12,14,53,60,61,88,125,127,129,175],sampler:[53,127,175,198],san:50,sandbox:[6,14],sasi:6,satisfi:[0,11,25,50,53,58],satur:[6,53,198,199],save:[6,13,22,24,26,36,37,45,49,50,58,60,125,150],saved_cach:6,saved_caches_directori:37,sbin:36,scala:[14,39],scalabl:[34,59],scalar:15,scale:[35,49,59,60],scan:[6,13,45,53],scenario:29,scene:36,schedul:[6,24],schema:[9,11,14,17,53,56,60,61,72,125,134,144,185,187],schema_own:20,scope:[20,43,53,56],score:[6,14,22,57],script:[6,14,24,31,34,35,62,88,182,183,184,185,186,187,188,190,191,192,193,194,199],scrub:[45,48,49,53,62,125,171,182],sda:199,sdb:199,sdc1:199,sdc:199,search:[33,59,197],searchabl:199,second:[6,11,12,13,22,36,47,50,56,60,61,62,125,148,156,182,195,197,198,199],secondari:[10,12,13,15,42,48,53,59,125,131],secondary_index_stat:12,secondaryindexmanag:[53,198],section:[2,4,5,7,10,11,12,13,15,20,22,34,36,39,40,41,43,48,53,55,56,58,62,182,186,197,198],secur:[6,14,15,34,42,52],see:[0,4,6,10,11,12,13,14,17,20,22,26,28,31,33,34,35,41,42,43,48,53,56,58,61,95,125,140,185,187,188,191,197,198,199],seed:[6,37,42,57,108,125,135],seedprovid:6,seek:[4,6,50,53],seen:[6,11],segment:[4,6,43,47,53,61,88,197,198],segment_nam:47,segmentid:187,select:[6,9,10,11,12,14,15,19,20,24,31,34,35,36,41,43,45,48,56,60,61,130,197,198,199],select_claus:13,select_stat:[12,18],self:30,selinux:36,semant:[10,13,14],semi:36,send:[6,8,36,60,195,199],sendto:60,sens:[10,13,15,36],sensic:14,sensit:[11,12,14,17,199],sensor:22,sent:[0,6,11,22,36,53,195,199],sentenc:33,separ:[4,6,11,13,25,27,33,37,43,48,50,56,58,61,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,186,190],septemb:33,seq:[6,140],sequenc:12,sequenti:[6,50,140],seren:13,seri:[11,34,48,61],serial:[4,6,62],serializingcacheprovid:6,seriou:[27,195,198],serv:[13,50,56,199],server:[6,12,13,22,31,32,34,35,36,50,53,56,59,60,186,195],server_encryption_opt:[56,186],servic:[6,31,40,53,56,58,197,199],session:[6,20,56,62,125,141],set:[0,4,6,9,10,11,12,13,14,17,18,27,30,32,33,35,37,42,43,45,47,48,49,50,53,56,57,58,60,61,62,65,84,95,125,138,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,178,182,183,185,186,192,194,195,196,197,198,199],set_liter:[20,22],setbatchlogreplaythrottl:125,setcachecapac:125,setcachekeystosav:125,setcompactionthreshold:[48,125],setcompactionthroughput:[48,125],setconcurrentcompactor:125,setconcurrentviewbuild:[18,125],sethintedhandoffthrottlekb:125,setint:14,setinterdcstreamthroughput:125,setlogginglevel:[125,197],setlong:14,setmaxhintwindow:125,setstr:14,setstreamthroughput:125,setter:[20,24,25],settimeout:125,settraceprob:125,setup:[27,33,35,56],sever:[4,13,20,48,55,56,60,186],sfunc:[9,14],sha1:[34,192],sha:[29,34],shadow:[18,48],shape:60,shard:4,share:[11,13,31,187,195,199],sharedpool:61,sharp:38,shed:36,shell:[41,42,62],shift:22,ship:[26,35,41,56,61,197,199],shortcut:18,shorter:[27,56],shorthand:61,shortlog:34,should:[0,4,5,6,10,11,12,13,14,17,20,22,24,26,27,30,31,33,35,36,37,38,39,41,43,45,48,49,50,53,55,56,57,58,60,61,130,140,161,191,193,195,199],shouldn:[11,37],show:[20,26,42,43,55,58,62,73,93,113,125,129,139,146,165,166,174,181,182,194,195,197,198,199],shown:[12,61,174,186],shrink:6,shut:6,shutdown:[4,6,50],side:[6,11,13,17,22,56,195],sig:34,sign:[13,22,36],signal:[6,125,136],signatur:[40,47],signifi:199,signific:[6,27,31,33,35,50,195],significantli:[6,55,199],silent:14,similar:[6,13,14,49,50,194,195,199],similarli:[0,10,17,25,50,125,130],similiar:55,simpl:[6,11,26,28,31,35,56],simple_classnam:35,simple_select:13,simplequerytest:35,simplereplicationstrategi:56,simpleseedprovid:6,simplesnitch:[6,57],simplestrategi:60,simpli:[0,4,6,11,13,14,17,22,31,35,48,50,53,58,179],simul:35,simultan:[6,50,61,65,95,138,147,178],sinc:[6,11,13,14,22,27,31,35,36,40,48,53,55,58,185,188,190,198,199],singl:[0,6,10,11,12,13,14,17,18,20,22,25,33,37,41,42,52,53,55,56,57,61,62,68,182,195,197,198,199],singleton:30,site:[27,34],situat:[6,35,48,199],size:[4,6,11,22,25,36,37,43,45,47,49,50,52,53,56,60,61,62,88,122,125,182,185,187,188,189,192,197,198,199],size_estim:[125,133,197],sizeandtimebasedrollingpolici:43,sizetieredcompactionstrategi:[11,48,198],skinni:198,skip:[6,13,36,53,58,61,62,147,164,182,185,191],skipcol:61,skiprow:61,sks:40,sla:30,slack:[5,33,42,55],slash:12,slave:24,sleep:199,slf4j:[25,26,43],slightli:6,slow:[6,11,57,195,197,198,199],slower:[6,11,45,198,199],slowest:6,slowli:[6,22],small:[4,6,11,13,22,36,48,50,62,182,186,195,199],smaller:[4,6,36,48,50,61,191],smallest:[0,11,14,53,188],smallint:[9,10,14,17,19,22],smith:22,smoother:10,smoothli:6,snappi:[4,6],snappycompressor:[11,49],snapshot:[4,6,26,53,62,66,122,125,147,182,190,194,199],snapshot_nam:[66,192],snapshotnam:[66,125],snitch:[6,42,52,72,125],snt:199,socket:[6,56,161],soft:27,softwar:26,sole:[11,28],solid:[6,50],solr:59,solut:24,some:[0,6,9,11,12,13,14,22,26,27,28,31,33,34,35,36,37,47,48,49,53,56,58,61,187,189,195,197,198,199],some_funct:14,some_keysopac:11,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[29,48],someth:[6,189,197,199],sometim:[6,12,13,195,196,197,198,199],someudt:14,somewher:[40,55],soon:56,sooner:6,sort:[4,11,13,22,48,50,59,174,188,197],sort_kei:174,sourc:[4,5,6,8,14,24,26,27,28,32,34,40,43,53,62,130,183,192,195],source_elaps:61,space:[4,6,25,36,47,48,50,53,191,199],space_used_by_snapshots_tot:174,space_used_l:174,space_used_tot:174,span:[6,13,48],spare:[24,197],sparingli:13,spark:38,speak:[196,197,199],spec:[30,41,53,60,61],speci:[11,18],special:[12,13,35,36,48,53,62,193],specif:[9,11,12,13,22,27,31,33,36,38,47,48,53,55,56,60,61,125,130,140,186],specifc:53,specifi:[0,6,10,11,12,13,14,16,18,20,22,26,31,36,41,47,48,49,53,56,58,60,61,62,66,68,109,125,130,140,146,159,161,164,171,174,177,182,186,192,195],specific_dc:140,specific_host:140,specific_keyspac:130,specific_sourc:130,specific_token:130,specifii:20,specnam:60,specul:[0,11,53],speculative_retri:11,speculativefailedretri:53,speculativeinsufficientreplica:53,speculativeretri:53,speculativesamplelatencynano:53,speed:[6,42,49,62,182,198],spend:199,spent:[53,199],sphinx:32,spike:36,spin:[6,50],spindl:[4,6],spirit:[6,57],split:[25,36,48,53,60,61,62,68,182],spread:[6,11,57],sql:[13,15],squar:12,squash:[27,33],src:[26,34,130],ssd:[6,16,50,199],ssh:195,ssl:[6,36,52,60,61,62,125,136,182],ssl_storage_port:57,ssp:186,sss:17,sstabl:[2,6,11,36,42,45,49,50,52,62,65,68,95,109,116,125,132,138,147,178,179,183,187,188,190,191,192,194,197,198,199],sstable_act:197,sstable_compression_ratio:174,sstable_count:174,sstable_s:48,sstable_size_in_mb:48,sstable_task:197,sstabledump:[62,182],sstableexpiredblock:[48,62,182],sstablelevelreset:[62,182],sstableload:[56,62,182],sstablemetadata:[62,182,185,189],sstableofflinerelevel:[62,182],sstablerepairedset:[62,182,187],sstablerepairset:189,sstablescrub:[62,182],sstablesperreadhistogram:53,sstablesplit:[62,182],sstableupgrad:[62,182],sstableutil:[62,182,183,187],sstableverifi:[62,182],sstablewrit:25,stabil:[24,33],stabl:[40,61,197],stack:[6,190,191,192,193,194,199],stackcollaps:199,staff:60,staff_act:60,stage:[33,34,100,153,195,198],staging_numb:34,stai:[42,48],stale:56,stall:[6,58],stamp:43,stand:35,standalon:35,standard1:[184,186,187,189,190,192,197],standard:[6,22,24,28,36,40,53,60,183,187,197],start:[0,6,9,13,27,32,36,37,40,42,48,50,53,55,56,58,68,140,171,188,192,195,197,198,199],start_token:[68,140],start_token_1:130,start_token_2:130,start_token_n:130,starter:33,startup:[4,6,21,31,36,48,53,58,193],startupcheck:197,starvat:6,stat:187,state:[6,14,45,48,50,53,58,125,165,196,197],statement:[6,9,10,11,13,14,15,16,17,20,21,22,30,32,43,45,48,53,56,60,61,195,199],static0:11,static1:11,staticcolumn:187,statist:[4,48,53,61,70,96,125,128,173,174,176,186,187,192,193,198],statu:[20,24,30,33,36,40,56,61,62,125,139,166,167,168,169,170,179,182,195,196],statusautocompact:125,statusbackup:125,statusbinari:125,statusgossip:125,statushandoff:125,stc:11,stdev:[60,199],stdin:61,stdout:61,stdvrng:60,step:[6,24,27,31,32,34,56,196,197],still:[0,6,10,11,13,14,17,20,22,24,25,34,55,56,58,61,184,195,199],stop:[4,6,40,61,83,125,143,172,182,183,184,185,186,187,188,189,190,191,192,193,194,197],stop_commit:6,stop_paranoid:6,stopdaemon:125,storag:[0,2,11,15,16,33,36,42,49,50,52,59,186,187],storage_port:[37,57],storageservic:[6,25,56],store:[0,4,6,10,11,12,13,22,42,45,48,49,50,53,56,59,61,80,88,90,125,170,186,187,190],store_typ:6,stort:188,straight:[26,58,199],straightforward:47,strategi:[0,6,11,52,57,60,185,198],stratio:42,stream:[4,6,42,48,49,52,55,64,104,110,125,130,140,157,158,160,161,186,193,199],stream_throughput_outbound_megabits_per_sec:186,street:22,strength:6,stress:[42,62,199],stresscql:60,strict:[10,48],strictli:[8,11,14],string:[4,6,10,11,12,13,14,16,17,20,21,22,24,53,61,109,183],strong:0,strongli:[6,11,12,56],structur:[4,6,9,20,27,30,45,53,62,182,199],stub:56,stuck:188,style:[6,30,31,32,33,35,42],stype:[9,14],sub:[11,13,22,40,48,199],subclass:6,subdirectori:[6,21],subject:[6,14,20,56],submiss:[6,33],submit:[32,33,35,42,68],subopt:60,subrang:6,subscrib:[8,28],subscript:8,subsequ:[6,11,13,20,36,48,49,56],subset:[0,20,48,61,195],substanti:199,substitut:40,substract:19,subsystem:56,subtract:187,subvert:48,succed:53,succeed:194,succesfulli:53,success:[0,34,43,61],successfulli:[34,43,53,194],sudden:6,sudo:[36,40,199],suffer:199,suffici:[6,50,56],suggest:[12,27,28,33,50,194],suit:[6,24,33,35,56,186],suitabl:[13,14,30,33],sum:47,summari:[4,6,33,53,186,187,192,193],sun:[25,56,199],sunx509:186,supercolumn:9,supersed:[10,147,190],superus:[9,20,56],suppli:[11,13,29,183,195],support:[0,4,6,9,10,11,12,13,14,15,16,18,19,20,22,28,31,33,35,36,38,42,48,56,61,62,147,171,190,197,199],suppos:13,sure:[6,8,24,25,26,27,28,31,33,35,36,37,40,48,60,199],surfac:56,surplu:36,surpris:0,surprisingli:6,surround:[17,61],suscept:14,suspect:[5,33,199],suspend:31,svctm:199,svg:199,svn:34,svnpubsub:34,swamp:36,swap:[4,6,199],swiss:163,symbol:199,symmetri:17,symptom:36,sync:[4,6,27,36,53,55,140,199],synchron:[6,55],synctim:53,synonym:20,synopsi:[63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181],syntact:[11,20],syntax:[10,12,13,14,20,22,27,48,49,60],syntaxerror:195,sys:6,sysctl:[6,36],sysf:199,sysintern:6,system:[6,11,14,20,31,35,36,37,41,43,48,50,53,56,59,61,99,101,102,104,110,116,125,132,133,134,152,154,155,157,160,186,191,193,195,196,199],system_auth:[6,56],system_schema:[20,43],system_trac:140,system_view:197,system_virtual_schema:[43,197],tab:[25,31],tabl:[0,4,6,9,10,12,13,14,15,16,17,18,20,21,22,35,43,45,48,49,52,55,56,60,61,62,65,68,75,83,85,94,95,98,103,107,116,125,131,132,134,138,140,147,151,164,166,171,173,174,178,179,182,184,186,187,189,193,194,195,197,198],table1:[20,55],table2:55,table_definit:60,table_nam:[11,13,16,20,21,48,174,197],table_opt:[11,18],tablehistogram:[125,198],tablestat:125,tag:[22,30,34,164],tail:197,take:[6,10,11,13,14,22,27,30,31,33,34,36,45,48,49,50,58,125,164,189,191,194,197,198,199],taken:[6,47,48,53,60,192],tar:[26,40],tarbal:[26,37,39,61],target:[11,20,26,31,35,43,48,186],task:[6,24,26,28,31,33,53,61,197,198,199],taskdef:35,tcp:[6,36,199],tcp_keepalive_intvl:36,tcp_keepalive_prob:36,tcp_keepalive_tim:36,tcp_nodelai:6,tcp_retries2:6,tcp_wmem:6,tcpdump:199,teach:[6,57],team:[34,36],technetwork:6,technic:[11,15],techniqu:[196,199],technot:6,tee:40,tell:[6,13,30,36,37,53,199],templat:[24,34],tempor:6,temporari:[56,62,182],temporarili:6,tend:[4,6,36,50],tendenc:6,tent:34,terabyt:49,term:[6,13,14,15,18,22,59],termin:[12,20,61],ternari:25,test:[6,25,26,30,32,33,34,41,42,50,60,61],test_keyspac:[56,197],testabl:[30,33],testbatchandlist:35,testmethod1:35,testmethod2:35,testsom:35,teststaticcompactt:35,text:[4,9,11,12,13,14,17,22,27,34,43,47,49,56,59,60,199],than:[0,4,6,11,12,13,14,15,18,19,22,25,33,42,47,48,49,50,56,57,58,60,141,154,155,184,186,188,191,192,195,197,198,199],thei:[6,9,10,11,12,13,14,15,18,19,20,22,25,30,33,35,42,43,45,48,49,50,53,56,184,188,193,194,195,197,198,199],them:[0,6,10,11,13,14,22,24,25,28,33,34,35,36,41,43,45,48,53,56,125,178,186,193,195,197,199],themselv:[13,20],theoret:11,therefor:[27,33,35,56,185,193],theses:56,thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,20,22,24,25,26,27,28,29,30,31,33,34,35,36,37,39,40,42,43,45,48,49,50,53,55,56,57,58,60,61,62,63,65,66,68,71,73,75,81,85,91,94,95,97,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,197,198,199],thing:[6,22,28,29,33,36,39,48,55,199],think:6,third:[22,30,42,53,198],thobb:61,those:[11,12,13,14,16,17,18,20,22,33,36,47,48,56,61,178,186,190,191,193,195,199],though:[10,12,22,42,48,49,53],thought:191,thousand:61,thousandssep:61,thread:[4,6,18,43,50,53,56,60,65,95,125,138,140,147,156,176,178,188,197,198],threaddump:199,threadpool:[52,196],threadpoolnam:53,threadprioritypolici:31,three:[0,6,11,45,48,49,56,61,195,197,198],threshold:[4,11,47,50,57,98,125,151,158,199],thrift:[9,60],throttl:[6,24,62,97,125,148,152,156,157,160,182],through:[0,5,9,10,11,12,13,18,24,27,31,33,36,41,43,47,48,61,199],throughout:56,throughput:[0,6,48,49,50,53,99,104,110,125,152,157,160,186,197,198],throwabl:[30,35],thrown:[22,188],thu:[6,10,11,12,13,18,22,36,53,57,58,125,178],thumb:[6,33],thusli:22,tib:[70,124,174],tick:33,ticket:[5,27,28,29,30,33,34,35,47],tid:199,tie:36,tier:52,ties:[13,198],tighter:6,tightli:6,tild:61,time:[0,4,6,8,9,10,11,12,13,15,16,17,18,25,27,30,31,33,35,36,43,45,47,49,52,53,55,56,59,60,61,125,127,187,189,194,195,197,198,199],timefram:58,timehorizon:6,timelin:11,timeout:[6,22,36,53,61,111,125,161,195,198],timeout_in_m:161,timeout_typ:[111,161],timer:[6,53],timestamp:[4,9,10,11,13,14,15,17,19,42,43,48,61,62,147,182,184,187,190],timeunit:48,timeuuid:[9,10,11,17,22,60],timewindowcompactionstrategi:11,timezon:[17,61],tini:[6,48],tinyint:[9,10,14,17,19,22],tip:195,titl:[33,60],tjake:25,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,tmp:[6,192,193,197,199],tmpf:199,tmplink:193,toc:[4,192,193],tock:33,todai:12,todat:14,todo:30,togeth:[6,11,13,14,24,48,195,198,199],toggl:56,tojson:15,token:[2,4,6,9,10,12,13,36,48,53,55,60,61,68,73,116,117,123,125,130,140,146,179,187,188,195,197,198],tokenawar:195,tokenrang:60,toler:[0,45],tom:13,tombston:[4,6,11,17,36,52,53,55,95,147,184,187,190,199],tombstone_compaction_interv:48,tombstone_threshold:48,tombstonescannedhistogram:53,ton:35,too:[6,11,12,14,22,30,48,60,195,198,199],took:[195,197],tool:[6,12,26,27,33,34,36,42,43,48,53,56,58,60,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198],toolset:199,top:[13,22,24,33,34,42,53,127,174,175,188],topcount:[127,175],topic:61,topolog:[6,57,146],toppartit:125,total:[4,6,13,47,48,53,60,186,197,198,199],total_replica:[0,11],totalblockedtask:53,totalcolumnsset:187,totalcommitlogs:53,totalcompactionscomplet:53,totaldiskspaceus:53,totalhint:53,totalhintsinprogress:53,totallat:53,totalrow:187,totalsizecap:43,totimestamp:14,touch:[8,36,48],tough:35,tounixtimestamp:14,tour:22,tpstat:[125,198],trace:[6,53,62,112,125,140,162,190,191,192,193,194,197,199],tracerout:199,track:[4,6,48,53],tracker:[27,33],tradeoff:[0,6,199],tradit:[48,49],traffic:[6,56,57,199],trail:25,transact:[13,21,53,62,171,182],transfer:[6,36,56,186],transform:13,transient_replica:[0,11],transit:[10,20,26],translat:199,transpar:[6,36],transport:[6,31,53,60,77,87,125,168,186,198],treat:[0,6,10,36,57],tree:[6,26,31,53,55],tri:[6,48,195],trigger:[4,6,9,11,12,15,24,42,43,45,49,52,56,65,125,137],trigger_nam:21,trigger_stat:12,trip:[6,13],trivial:56,troubl:197,troubleshoot:[6,30,32,42,55,195,197,198,199],truesnapshotss:53,truli:9,truncat:[4,6,9,10,15,20,56,60,111,125,161,177],truncate_stat:12,truncatehint:125,trunk:[27,29,30,31,33,35],trust:56,trusti:199,trustor:6,truststor:[6,56,60,186],truststore_password:6,truststorepassword:56,tspw:186,tstamp:183,ttl:[4,6,9,10,11,14,17,22,52,147,187,190],tty:61,tunabl:2,tune:[11,36,45,48,50,197,198],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:22,tuplevalu:[10,14],turn:[0,6,33,36,56,195],twc:[11,48],twice:[4,6,22],two:[0,6,11,12,13,14,17,19,31,42,43,45,48,50,56,57,61,187,198,199],txt:[4,14,29,30,33,34,192,193],type:[0,4,6,10,11,12,13,14,15,19,20,30,32,40,42,43,50,52,55,56,60,61,111,125,161,171,183,186,187,191,193,197,198],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,36,45,48,50,53,56,59,61,192,195,197,198,199],typo:27,ubuntu:31,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17],udt_liter:12,udt_nam:22,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:36,ultra:49,unabl:[4,30,42,198],unacknowledg:6,unaffect:22,unari:19,unavail:[6,11,53,56,58,199],unavailableexcept:195,unblock:53,unbound:[6,22],uncaught:197,unchecked_tombstone_compact:48,uncom:[6,53,56],uncommon:33,uncompress:[4,6,49,53],unconfirm:6,undecor:4,undelet:48,under:[6,22,24,25,35,43,53,56,199],underli:[6,18,48,56,199],understand:[6,33,36,55,56,197,199],unencrypt:[6,56],unexpect:[4,182,183,184,185,186,187,188,189,190,191,192,193,194],unexpectedli:22,unfinishedcommit:53,unflush:164,unfortun:35,uniform:60,uniq:197,uniqu:[11,14,22,60,187],unit:[22,30,32,48,125,149,186,191,198],unix:[43,196],unixtimestampof:[10,14],unknown:188,unless:[6,11,13,16,18,20,22,25,47,56,57,187,191,199],unlik:[6,10,13,22],unlimit:[6,36,61,186],unlog:[9,53,60],unnecessari:[30,58],unnecessarili:47,unpredict:13,unprepar:53,unquot:12,unquoted_identifi:12,unquoted_nam:11,unreach:55,unrel:[33,195],unrepair:[6,52,53,55,62,182],unrespons:11,unsafe_aggressive_sstable_expir:48,unsecur:56,unselected_column:18,unset:[6,10,13,17,189],unsign:22,unspecifi:6,unsubscrib:[8,42],unsuccess:43,untar:40,until:[0,4,6,11,18,22,45,47,48,49,56,57],unus:6,unusu:30,unwrit:6,updat:[6,9,10,11,12,14,15,17,18,20,22,27,30,32,33,35,40,42,43,48,49,53,56,60,61,197,198],update_paramet:13,update_stat:[12,13],updatewithlwt:60,upgrad:[4,6,11,48,125,178,192,193],upgradesst:[45,48,49,125],upload:33,upload_bintrai:34,upon:[6,22,43,45,47,49],upper:[12,17,48,56],ups:50,upstream:33,uptim:[117,125],urgent:[6,34],url:[27,29,60],usag:[0,4,6,11,22,42,45,47,49,52,53,61,62,182],use:[0,4,6,9,10,11,12,13,14,16,17,18,20,22,24,25,27,30,31,33,34,35,37,40,41,42,43,45,47,48,50,53,56,57,58,60,61,65,95,108,125,127,138,147,175,178,183,186,187,189,190,191,193,195,196,197,198,199],use_k:43,use_stat:12,usec:199,usecas:48,useconcmarksweepgc:31,usecondcardmark:31,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,22,26,30,31,33,34,35,36,43,48,49,50,53,56,57,58,60,61,63,65,66,68,73,75,81,84,85,91,94,95,98,100,103,107,109,111,115,116,123,125,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,183,184,185,186,187,188,189,191,192,193,195,198,199],useecassandra:56,useful:[0,4,6,11,14,26,33,48,49,53,55,58,61,63,65,66,68,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,185,187,197,198,199],useparnewgc:31,user1:13,user2:13,user3:13,user4:13,user:[0,5,6,8,9,10,11,12,13,15,16,17,18,24,30,32,33,34,36,40,43,45,48,49,50,56,61,62,68,84,125,184,192,197,199],user_count:13,user_defined_typ:22,user_funct:20,user_nam:13,user_occup:13,user_opt:20,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,53,56,61,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,186],uses:[0,4,6,11,12,13,14,16,20,21,27,35,36,56,60,194,198,199],usethreadprior:31,using:[4,6,10,11,12,13,14,18,20,22,24,31,32,33,35,39,40,41,42,43,45,49,50,52,53,56,58,61,68,130,147,164,183,185,187,188,189,190,193,195,196,197,198,199],usr:[61,199],usual:[6,13,22,29,33,35,45,56,140,190,195,197],utc:[17,61],utd:11,utf8:[22,61],utf8typ:[9,187],utf:61,util:[4,14,30,48,61,197,199],uuid:[9,10,11,12,17,22],val0:11,val1:11,val:[14,60],valid:[0,6,10,11,12,13,14,17,22,34,36,48,49,53,55,56,61,62,140,147,171,182,194],validationexecutor:[53,198],validationtim:53,valu:[4,6,9,10,11,12,13,14,16,17,19,22,30,31,34,36,43,45,48,49,53,56,57,59,60,61,62,84,112,116,125,148,152,154,155,156,157,159,160,161,162,182,183,194,195,197,199],valuabl:197,value1:13,value2:13,value_in_kb_per_sec:[148,156],value_in_m:159,value_in_mb:[152,157,160],valueof:14,varchar:[9,11,14,17,22],vari:[11,49],variabl:[6,10,12,17,22,24,31,34,39,189],varianc:197,variant:12,varieti:47,varint:[9,11,14,17,19,22],variou:[6,11,24,31,35,50,56,60,182,196,197],vector:56,verbos:[186,190,193,194],veri:[6,11,13,27,33,35,36,45,48,49,50,189,194,195,197,198,199],verif:[62,182],verifi:[33,36,38,40,49,55,116,125,171,182,183,184,185,186,187,188,189,190,191,192,193,194],versa:193,version:[2,5,6,9,11,14,15,22,26,31,33,38,40,48,53,58,62,67,72,82,92,125,178,179,182,190,193,197],vertic:61,via:[4,6,8,10,18,20,26,30,31,36,37,43,48,49,50,53,55,56,57,187,189,199],vice:193,view:[0,6,10,11,12,15,20,42,53,61,102,125,155,181,189,197,198,199],view_nam:18,viewbuildexecutor:[53,198],viewbuildstatu:125,viewlockacquiretim:53,viewmutationstag:[53,198],viewpendingmut:53,viewreadtim:53,viewreplicasattempt:53,viewreplicassuccess:53,viewwrit:53,viewwritelat:53,virtual:[0,6,36,48,53,58],visibl:[11,20,25,45],visit:60,visual:[27,197],vnode:[6,49],volum:[4,6,47,49,194,198,199],vote:32,vulner:[6,34,56],w_await:199,wai:[4,6,12,15,17,18,22,24,28,29,31,35,36,43,48,49,140,187,188,189,190,197,199],wait:[0,4,6,11,33,36,43,53,125,142,197,198,199],waitingoncommit:53,waitingonfreememtablespac:53,waitingonsegmentalloc:53,want:[4,6,11,13,24,31,33,34,35,36,43,55,56,58,60,185,186,189,197,199],warmup:[60,125,150],warn:[6,11,25,35,52,140,194,197],warrant:198,washington:22,wasn:10,wast:6,watch:[35,199],weaker:0,web:27,websit:[35,40,199],week:[22,55,189],weibul:60,weight:[43,53,88],welcom:8,well:[0,6,11,13,14,17,22,30,31,43,47,49,50,56,57,125,143,192,197,199],went:53,were:[6,9,10,20,30,31,34,48,53,190,193,197,198],west:34,what:[11,13,22,27,28,32,35,37,42,48,50,56,60,61,187,195,196,197,198,199],whatev:[10,13,36],whedon:13,wheel:192,when:[4,6,9,10,11,12,13,14,15,16,17,20,22,24,25,27,30,33,34,35,37,42,43,45,47,49,50,52,53,55,56,57,58,60,61,63,65,66,68,71,73,75,81,85,91,94,95,98,100,103,107,109,111,115,116,123,127,130,131,132,138,139,140,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,171,173,174,175,177,178,179,181,187,188,193,195,197,198,199],whenev:[188,199],where:[0,4,6,9,10,11,12,14,16,17,18,19,20,22,30,35,37,40,43,45,48,49,56,58,60,61,88,140,195,197,199],where_claus:13,wherea:[22,56,198],whether:[0,6,9,11,13,31,48,57,61,88],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,22,27,33,34,35,36,37,40,41,43,45,47,48,49,50,53,55,56,57,58,60,68,103,107,116,130,140,193,195,196,197,198,199],whichev:[0,6],whilst:6,whitelist:56,whitespac:32,who:[20,24,33,36],whole:[6,13,14,22,48,55],whose:[11,22,171],why:[30,33,42,184,195,197,199],wide:[4,47,198],width:12,wiki:[6,27,31],wildcard:[13,20,191],wildli:11,window:[0,4,6,52,53,56,106,114,125,159,196],winner:36,wip:33,wipe:[36,58],wire:[6,36],wireshark:199,wise:11,wish:[6,34,48,53,197],within:[0,4,6,11,12,13,16,31,33,34,36,48,50,53,56],withing:6,without:[0,6,11,12,13,14,20,22,29,31,33,34,35,36,47,50,53,56,61,62,63,116,125,132,182,183],wmb:199,wmem_max:6,won:[4,6,13,27,29,55,199],wont:[43,48],word:[10,11,12,18,20,22,36,47,56],work:[0,4,6,10,11,14,15,17,24,25,28,29,31,32,34,35,36,42,48,50,53,55,56,57,58,61,186,199],workaround:[186,190],worker:61,workload:[6,28,30,45,48,50,60,198,199],workspac:31,worktre:31,worri:[33,36],wors:[6,57],worst:[6,33],worth:[6,43],worthwhil:6,would:[6,12,13,14,17,20,27,31,33,35,42,48,49,50,55,56,57,187,189,193,197,199],wrap:57,write:[0,4,6,10,11,13,22,25,27,28,30,35,36,47,48,49,50,53,55,56,57,58,60,61,83,111,125,161,174,187,190,193,195,197,198,199],write_lat:174,write_request_timeout:36,writefailedideacl:53,writelat:[53,195],writer:[4,6,25],writetim:[9,14],writetimeoutexcept:[6,195],written:[4,6,11,21,24,36,43,45,48,49,53,55],wrong:[6,34,198],wrqm:199,wrst:199,wrte:53,www:[6,34,40,199],x86:59,xandra:38,xarg:[189,197],xdm:199,xlarg:50,xlarge_daili:43,xml:[26,31,34,35,37,197],xmn220m:31,xms1024m:31,xmx1024m:31,xmx:50,xss256k:31,xzvf:40,yaml:[0,4,6,14,18,20,37,40,53,56,57,58,60,69,84,88,125,143,174,176,186,187,195],year:[13,22],yes:[9,11,56],yet:[6,11,24,28,34,47,53,193],ygc:199,ygct:199,yield:[13,43,58,199],ymmv:197,you:[0,4,5,6,8,10,11,12,13,14,16,17,18,20,21,22,24,25,26,27,28,29,31,32,34,35,36,37,38,39,40,41,42,43,47,48,53,55,56,57,58,59,60,61,63,125,164,183,185,186,187,189,190,191,193,194,195,196,197,198,199],young:199,younger:14,your:[0,5,6,8,10,11,12,25,27,28,31,32,33,35,36,37,40,42,48,50,55,56,57,60,61,186,191,194,196,197,198,199],yourself:[28,29,35],yum:34,yyyi:[17,22,43],z_0:[11,16,18],zero:[6,10,11,36,53,57,197],zgrep:197,zip:[22,43],zipcod:22,zone:[6,22,57],zoomabl:199,zstd:4,zstdcompressor:49},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Arithmetic Operators","Security","Triggers","Data Types","Data Modeling","Jenkins CI Environment","Code Style","Dependency Management","Working on Documentation","Getting Started","How-to Commit","Review Checklist","Building and IDE Integration","Contributing to Cassandra","Contributing Code Changes","Release Process","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Audit Logging","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","Third-Party Plugins","Cassandra Stress","cqlsh: the CQL shell","Cassandra Tools","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","SSTable Tools","sstabledump","sstableexpiredblockers","sstablelevelreset","sstableloader","sstablemetadata","sstableofflinerelevel","sstablerepairedset","sstablescrub","sstablesplit","sstableupgrade","sstableutil","sstableverify","Find The Misbehaving Nodes","Troubleshooting","Cassandra Logs","Use Nodetool","Diving Deep, Use External Tools"],titleterms:{"class":57,"final":193,"function":[13,14,17],"import":[25,116],"long":35,"new":36,"switch":48,"transient":0,Adding:58,Doing:188,IDE:31,IDEs:25,LCS:48,TLS:56,The:[13,15,17,48,195],USE:11,Use:[49,186,198,199],Uses:49,Using:[31,189],Will:36,With:56,about:24,abov:187,access:56,adcanc:43,add:[26,36],address:36,advanc:[49,199],after:58,aggreg:14,alias:13,all:[20,36,187,193],alloc:58,allocate_tokens_for_keyspac:6,allocate_tokens_for_local_replication_factor:6,allow:13,alreadi:185,alter:[11,18,20,22],ani:36,announc:34,answer:28,apach:[24,31,42],appendic:9,appendix:9,architectur:2,arithmet:19,artifact:34,ask:36,assassin:63,assign:58,attempt:191,audit:43,audit_logging_opt:6,auditlog:43,auth:56,authent:[6,20,56],author:[6,56],auto_snapshot:6,automat:20,automatic_sstable_upgrad:6,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:44,base:27,basic:[190,194,199],batch:[13,36,53],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,bcc:199,befor:33,benefit:49,best:55,binari:40,binauditlogg:43,bintrai:34,blob:[14,36],block:184,bloom:45,boilerpl:25,bootstrap:[36,48,58,64],branch:33,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:53,bug:[5,28,33],build:31,bulk:[36,46],cach:[53,56,199],call:[34,36],can:36,capi:59,captur:[43,47,61,199],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,24,27,31,32,34,35,36,37,40,42,43,47,52,56,59,60,62,192,197],cast:14,cdc:47,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,certif:56,chang:[10,33,36,37,45,47,48],characterist:22,check:190,checklist:30,choic:50,choos:33,circleci:35,claus:13,clean:193,cleanup:[58,65],clear:61,clearsnapshot:66,client:[38,41,53,56,195],client_encryption_opt:6,clientstat:67,clojur:38,cloud:50,cluster:[36,186,198],cluster_nam:6,code:[4,25,33],collect:[22,48,199],column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[31,43,48,61,189],comment:12,commit:29,commit_failure_polici:6,commitlog:[4,53],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_group_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:47,committ:27,common:[11,48,50,197],compact:[9,48,53,68,198],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:69,compactionstat:70,compactionstrategi:48,compat:61,compress:49,concern:48,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_build:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_valid:6,concurrent_writ:6,condition:20,config:186,configur:[6,7,37,43,47,49],conflict:26,connect:[20,36],consist:[0,61],constant:12,contact:8,content:[34,43],contribut:[28,32,33],control:20,convent:[12,25],convers:14,coordin:198,copi:61,corrupt:[190,194],corrupted_tombstone_strategi:6,count:14,counter:[13,22,190],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:[50,199],cql:[9,15,53,61],cqlsh:[41,61],cqlshrc:61,creat:[11,14,16,18,20,21,22,28,33,34],credenti:20,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:35,current:[14,192],custom:22,cython:61,dart:38,data:[11,13,17,20,22,23,36,47,48,58],data_file_directori:6,databas:20,datacent:20,date:[14,22,190],datetim:[14,19],dead:58,deal:190,debian:40,debug:[31,197],decommiss:71,deep:199,defin:[14,22],definit:[11,12],defragment:48,delet:[13,34,36,48],depend:[26,61],describ:[61,73],describeclust:72,detail:[48,186],detect:0,develop:34,diagnostic_events_en:6,dies:36,directori:[37,48],disabl:[43,47],disableauditlog:74,disableautocompact:75,disablebackup:76,disablebinari:77,disablefullquerylog:78,disablegossip:79,disablehandoff:80,disablehintsfordc:81,disableoldprotocolvers:82,disk:[36,50],disk_failure_polici:6,disk_optimization_strategi:6,displai:183,distribut:34,dive:199,document:[27,28,42],doe:[36,43],drain:83,driver:[38,41],drop:[9,11,14,16,18,20,21,22,36],droppedmessag:53,dry:188,dtest:[28,35],dump:183,durat:22,dynam:57,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:[36,187],eclips:31,elixir:38,email:36,enabl:[43,47,56],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_transient_repl:6,enable_user_defined_funct:6,enableauditlog:84,enableautocompact:85,enablebackup:86,enablebinari:87,enablefullquerylog:88,enablegossip:89,enablehandoff:90,enablehintsfordc:91,enableoldprotocolvers:92,encod:17,encrypt:56,endpoint_snitch:6,engin:4,entir:183,entri:36,environ:[24,37],erlang:38,error:[36,195],even:36,exampl:4,except:25,exclud:183,exist:36,exit:61,expand:61,experiment:6,expir:48,explan:187,extend:194,extern:199,factor:36,fail:[36,58],failur:[0,36],failuredetector:93,faq:60,featur:6,file:[6,25,26,40,43,186,191,194,197],file_cache_size_in_mb:6,fileauditlogg:43,filedescriptorratio:53,filter:[13,43,45],find:195,fix:[28,33],flamegraph:199,flow:27,flush:94,format:[25,183],found:[185,188],freez:33,frequent:36,from:[31,34,36,40,61,186],fromjson:17,full:[55,197],full_query_log_dir:6,full_query_logging_opt:6,fulli:48,further:47,garbag:[48,199],garbagecollect:95,garbagecollector:53,gc_grace_second:48,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:96,gener:25,get:[28,39,186,197],getbatchlogreplaythrottl:97,getcompactionthreshold:98,getcompactionthroughput:99,getconcurr:100,getconcurrentcompactor:101,getconcurrentviewbuild:102,getendpoint:103,getinterdcstreamthroughput:104,getlogginglevel:105,getmaxhintwindow:106,getreplica:107,getse:108,getsstabl:109,getstreamthroughput:110,gettimeout:111,gettraceprob:112,github:27,give:36,gossip:0,gossipinfo:113,gpg:34,grace:[48,187],grant:20,graph:60,group:13,guarante:1,handl:25,handoffwindow:114,hang:58,happen:36,hardwar:50,has:185,haskel:38,heap:36,help:[61,115],hide:186,high:199,hint:51,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:53,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,hintsservic:53,host:[36,61],hot:56,how:[27,29,36,43],htop:199,idea:31,ideal_consistency_level:6,identifi:12,impact:49,includ:193,increment:55,incremental_backup:6,index:[16,53,59],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:117,inform:[197,199],initi:28,initial_token:6,insert:[13,17,41],instal:40,integr:[31,56],intellij:31,inter:56,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[20,56,183],internode_application_receive_queue_capacity_in_byt:6,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:6,internode_application_receive_queue_reserve_global_capacity_in_byt:6,internode_application_send_queue_capacity_in_byt:6,internode_application_send_queue_reserve_endpoint_capacity_in_byt:6,internode_application_send_queue_reserve_global_capacity_in_byt:6,internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:118,invalidatekeycach:119,invalidaterowcach:120,investig:[28,195],iostat:199,java:[36,38],jconsol:36,jenkin:24,jira:[27,34],jmx:[36,48,53,56],job:24,join:[36,121],json:17,jstack:199,jstat:199,jvm:[53,199],keep:192,kei:[16,18,34,183],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,36,53,188],keyword:[9,12],lang:36,languag:15,larg:36,latenc:[195,198,199],level:[0,48,185,199],librari:26,lightweight:60,limit:[13,18,43],line:[31,61],list:[8,20,22,28,36,193],listen:36,listen_address:[6,36],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:122,liter:22,live:36,load:[36,46,186],local:[27,198],locat:37,log:[36,37,43,48,193,195,197],logger:197,login:61,lot:[36,189],lucen:59,made:36,mail:8,main:37,major:48,manag:[26,183],mani:189,manifest:190,manipul:13,manual:58,map:[16,22,36],materi:18,max:[14,36],max_concurrent_automatic_sstable_upgrad:6,max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:36,memori:[36,50,53],memorypool:53,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:48,messag:36,metadata:[187,189],method:36,metric:[53,195],min:14,minor:48,mintimeuuid:14,misbehav:195,mode:60,model:23,monitor:[53,58],more:[36,48,183,186,197],move:[58,123],movement:58,multilin:25,multipl:191,nativ:[14,22],native_transport_allow_older_protocol:6,native_transport_flush_in_batches_legaci:6,native_transport_frame_block_size_in_kb:6,native_transport_idle_timeout_in_m:6,native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:38,netbean:31,netstat:124,network:199,network_author:6,networktopologystrategi:[0,11],newer:31,next:[34,195],nexu:34,node:[36,56,58,195],nodej:38,nodetool:[36,43,48,125,198],note:27,noteworthi:22,now:14,num_token:6,number:19,old:[34,192],one:[36,189],onli:[36,183,193],open:31,oper:[19,34,36,48,49,52],option:[18,43,48,55,61],order:13,otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[36,55],outofmemoryerror:36,output:[43,183,184,186],overflow:190,overview:[3,47],own:24,packag:[34,40],packet:199,page:[61,199],paramet:[13,47,48],parti:59,partition:6,password:56,patch:[28,33],pausehandoff:126,perform:[34,35],periodic_commitlog_sync_lag_block_in_m:6,perl:38,permiss:20,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:38,pick:0,plugin:[24,59],point:36,pom:26,port:36,post:34,practic:55,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:[34,40],primari:18,print:[187,189],process:34,profil:60,profileload:127,progress:[58,186],project:31,promot:34,properti:37,proxyhistogram:128,publish:[27,34],python:38,pytz:61,queri:[15,41,195,197,198],question:[28,36],rang:[0,58],range_request_timeout_in_m:6,rangekeysampl:129,rate:195,raw:183,read:[47,54],read_request_timeout_in_m:6,rebuild:130,rebuild_index:131,reduc:185,refresh:132,refreshsizeestim:133,refus:36,releas:34,relevel:188,reliabl:199,reload:[43,56],reloadlocalschema:134,reloadse:135,reloadssl:136,reloadtrigg:137,relocatesst:138,remot:36,remov:[48,58],removenod:139,repair:[48,54,55,140,189],repair_admin:141,repair_session_space_in_mb:6,repaired_data_tracking_for_partition_reads_en:6,repaired_data_tracking_for_range_reads_en:6,replac:58,replaybatchlog:142,replic:[0,36],report:[5,28,36,53],report_unconfirmed_repaired_data_mismatch:6,repositori:34,request:53,request_timeout_in_m:6,requir:[24,26],reserv:9,resetfullquerylog:143,resetlocalschema:144,resolut:26,resourc:199,restrict:20,result:13,resum:58,resumehandoff:145,retriev:14,review:[28,30],revok:20,rewrit:192,rhel:36,right:33,ring:[0,36,146],role:[20,56],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row:183,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rowcach:59,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rubi:38,run:[35,188],runtim:37,rust:38,safeti:6,sai:36,same:36,sampl:43,saved_caches_directori:6,scala:38,scalar:14,script:189,scrub:[147,190],second:187,secondari:16,secur:[20,56],see:36,seed:[24,36],seed_provid:6,select:[13,17,18],selector:13,send:34,serial:61,server:24,server_encryption_opt:6,session:61,set:[20,22,24,31,36,189],setbatchlogreplaythrottl:148,setcachecapac:149,setcachekeystosav:150,setcompactionthreshold:151,setcompactionthroughput:152,setconcurr:153,setconcurrentcompactor:154,setconcurrentviewbuild:155,sethintedhandoffthrottlekb:156,setinterdcstreamthroughput:157,setlogginglevel:158,setmaxhintwindow:159,setstreamthroughput:160,settimeout:161,settraceprob:162,setup:[24,31],share:61,shell:61,show:[36,61,189],sign:34,signatur:14,simplestrategi:[0,11],singl:[36,48,183],size:[48,191],sjk:163,skip:190,slack:[8,34],slow_query_log_timeout_in_m:6,small:191,snapshot:[164,186,191,192],snapshot_before_compact:6,snitch:57,sourc:[31,61],special:61,specif:20,specifi:[187,191],speed:[36,186],sphinx:27,split:191,ssl:[56,186],ssl_storage_port:6,sstabl:[4,48,53,182,184,185,186,189,193],sstable_preemptive_open_interval_in_mb:6,sstabledump:183,sstableexpiredblock:184,sstablelevelreset:185,sstableload:186,sstablemetadata:187,sstableofflinerelevel:188,sstablerepairedset:189,sstablescrub:190,sstablesplit:191,sstableupgrad:192,sstableutil:193,sstableverifi:194,stai:36,standard:56,start:[28,31,33,39],start_native_transport:6,starv:48,state:[198,199],statement:[12,18,25],statu:[165,189,198],statusautocompact:166,statusbackup:167,statusbinari:168,statusgossip:169,statushandoff:170,stc:48,step:[26,195],stop:171,stopdaemon:172,storag:[4,9,53],storage_port:6,store:36,strategi:48,stratio:59,stream:[36,53,58],stream_entire_sst:6,stream_throughput_outbound_megabits_per_sec:6,streaming_connections_per_host:6,streaming_keep_alive_period_in_sec:6,stress:[35,60],structur:183,style:25,submit:28,sum:14,support:[17,60],sync:34,system:197,tabl:[11,47,53,183,185,188,190,192],tablehistogram:173,tablestat:174,tarbal:40,temporari:193,term:12,test:[24,28,31,35],than:36,thei:36,third:59,though:36,thread:199,threadpool:[53,198],threshold:6,throttl:186,throughput:199,tier:48,time:[14,22,48],timestamp:[22,36,183],timeuuid:14,timewindowcompactionstrategi:48,todo:[0,1,3,11,23,44,46,51,54],tojson:17,token:[0,14,58],tombston:48,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[35,62,182,199],top:[36,199],topic:34,toppartit:175,tpstat:176,trace:61,tracetype_query_ttl:6,tracetype_repair_ttl:6,transact:[60,193],transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[21,48],troubleshoot:[26,196],truncat:11,truncate_request_timeout_in_m:6,truncatehint:177,ttl:[13,48],tunabl:0,tupl:22,tweet:34,two:36,type:[9,17,22,26,48,53],udt:22,unabl:36,unit:[28,31,35],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:[48,189],unsubscrib:36,updat:[13,26,28,34,36],upgradesst:178,upload:34,usag:[36,55,60,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,183,184,185,186,187,188,189,190,191,192,193,194,199],use:36,user:[14,20,22,28,60],using:[27,36,48],uuid:14,valid:190,valu:187,variabl:37,verif:194,verifi:179,version:[4,10,34,61,180,192],view:[18,43],viewbuildstatu:181,vmtouch:199,vote:34,wait:34,warn:47,websit:34,welcom:42,what:[33,36,43],when:[36,48],where:13,whitespac:25,why:[36,48],window:48,windows_timer_interv:6,without:[48,190,191],work:[22,27,33],write_request_timeout_in_m:6,writetim:13,yaml:[43,47],you:33,your:[24,34]}}) \ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/cassandra_stress.html b/src/doc/4.0-alpha2/tools/cassandra_stress.html deleted file mode 100644 index 15cd83900..000000000 --- a/src/doc/4.0-alpha2/tools/cassandra_stress.html +++ /dev/null @@ -1,352 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Cassandra Stress" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Stress

-

cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model.

-

This documentation focuses on user mode as this allows the testing of your -actual schema.

-
-

Usage

-

There are several operation types:

-
-
    -
  • write-only, read-only, and mixed workloads of standard data
  • -
  • write-only and read-only workloads for counter columns
  • -
  • user configured workloads, running custom queries on custom schemas
  • -
-
-

The syntax is cassandra-stress <command> [options]. If you want more information on a given command -or options, just run cassandra-stress help <command|option>.

-
-
Commands:
-
-
read:
-
Multiple concurrent reads - the cluster must first be populated by a write test
-
write:
-
Multiple concurrent writes against the cluster
-
mixed:
-
Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test
-
counter_write:
-
Multiple concurrent updates of counters.
-
counter_read:
-
Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.
-
user:
-
Interleaving of user provided queries, with configurable ratio and distribution.
-
help:
-
Print help for a command or option
-
print:
-
Inspect the output of a distribution definition
-
legacy:
-
Legacy support mode
-
-
-
Primary Options:
-
-
-pop:
-
Population distribution and intra-partition visit order
-
-insert:
-
Insert specific options relating to various methods for batching and splitting partition updates
-
-col:
-
Column details such as size and count distribution, data generator, names, comparator and if super columns should be used
-
-rate:
-
Thread count, rate limit or automatic mode (default is auto)
-
-mode:
-
Thrift or CQL with options
-
-errors:
-
How to handle errors when encountered during stress
-
-sample:
-
Specify the number of samples to collect for measuring latency
-
-schema:
-
Replication settings, compression, compaction, etc.
-
-node:
-
Nodes to connect to
-
-log:
-
Where to log progress to, and the interval at which to do it
-
-transport:
-
Custom transport factories
-
-port:
-
The port to connect to cassandra nodes on
-
-sendto:
-
Specify a stress server to send this command to
-
-graph:
-
Graph recorded metrics
-
-tokenrange:
-
Token range settings
-
-
-
Suboptions:
-
Every command and primary option has its own collection of suboptions. These are too numerous to list here. -For information on the suboptions for each command or option, please use the help command, -cassandra-stress help <command|option>.
-
-
-
-

User mode

-

User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn’t scale.

-
-

Profile

-

User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname.

-

An identifier for the profile:

-
specname: staff_activities
-
-
-

The keyspace for the test:

-
keyspace: staff
-
-
-

CQL for the keyspace. Optional if the keyspace already exists:

-
keyspace_definition: |
- CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-
-
-

The table to be stressed:

-
table: staff_activities
-
-
-

CQL for the table. Optional if the table already exists:

-
table_definition: |
-  CREATE TABLE staff_activities (
-      name text,
-      when timeuuid,
-      what text,
-      PRIMARY KEY(name, when, what)
-  )
-
-
-

Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:

-
columnspec:
-  - name: name
-    size: uniform(5..10) # The names of the staff members are between 5-10 characters
-    population: uniform(1..10) # 10 possible staff members to pick from
-  - name: when
-    cluster: uniform(20..500) # Staff members do between 20 and 500 events
-  - name: what
-    size: normal(10..100,50)
-
-
-

Supported types are:

-

An exponential distribution over the range [min..max]:

-
EXP(min..max)
-
-
-

An extreme value (Weibull) distribution over the range [min..max]:

-
EXTREME(min..max,shape)
-
-
-

A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:

-
GAUSSIAN(min..max,stdvrng)
-
-
-

A gaussian/normal distribution, with explicitly defined mean and stdev:

-
GAUSSIAN(min..max,mean,stdev)
-
-
-

A uniform distribution over the range [min, max]:

-
UNIFORM(min..max)
-
-
-

A fixed distribution, always returning the same value:

-
FIXED(val)
-
-
-

If preceded by ~, the distribution is inverted

-

Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)

-

Insert distributions:

-
insert:
-  # How many partition to insert per batch
-  partitions: fixed(1)
-  # How many rows to update per partition
-  select: fixed(1)/500
-  # UNLOGGED or LOGGED batch for insert
-  batchtype: UNLOGGED
-
-
-

Currently all inserts are done inside batches.

-

Read statements to use during the test:

-
queries:
-   events:
-      cql: select *  from staff_activities where name = ?
-      fields: samerow
-   latest_event:
-      cql: select * from staff_activities where name = ?  LIMIT 1
-      fields: samerow
-
-
-

Running a user mode test:

-
cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once
-
-
-

This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test.

-

The full example can be found here yaml

-
-
Running a user mode test with multiple yaml files::
-
cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m “ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)” truncate=once
-
This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table
-
although care must be taken that the table definition is identical (data generation specs can be different).
-
-
-
-

Lightweight transaction support

-

cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s).

-

Lightweight transaction update query:

-
queries:
-  regularupdate:
-      cql: update blogposts set author = ? where domain = ? and published_date = ?
-      fields: samerow
-  updatewithlwt:
-      cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ?
-      fields: samerow
-
-
-

The full example can be found here yaml

-
-
-
-

Graphing

-

Graphs can be generated for each run of stress.

-../_images/example-stress-graph.png -

To create a new graph:

-
cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph"
-
-
-

To add a new run to an existing graph point to an existing file and add a revision name:

-
cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run"
-
-
-
-
-

FAQ

-

How do you use NetworkTopologyStrategy for the keyspace?

-

Use the schema option making sure to either escape the parenthesis or enclose in quotes:

-
cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)"
-
-
-

How do you use SSL?

-

Use the transport option:

-
cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra"
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/cqlsh.html b/src/doc/4.0-alpha2/tools/cqlsh.html deleted file mode 100644 index 9fffe0c5c..000000000 --- a/src/doc/4.0-alpha2/tools/cqlsh.html +++ /dev/null @@ -1,485 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/index.html b/src/doc/4.0-alpha2/tools/index.html deleted file mode 100644 index ab5f4cc0f..000000000 --- a/src/doc/4.0-alpha2/tools/index.html +++ /dev/null @@ -1,257 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/assassinate.html b/src/doc/4.0-alpha2/tools/nodetool/assassinate.html deleted file mode 100644 index c3dff1a42..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/assassinate.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/bootstrap.html b/src/doc/4.0-alpha2/tools/nodetool/bootstrap.html deleted file mode 100644 index 4509e7c33..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-p <port> | --port <port>)] [(-pp | --print-port)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] [(-h <host> | --host <host>)]
-                bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/cleanup.html b/src/doc/4.0-alpha2/tools/nodetool/cleanup.html deleted file mode 100644 index 7f6a0950c..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/cleanup.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/clearsnapshot.html b/src/doc/4.0-alpha2/tools/nodetool/clearsnapshot.html deleted file mode 100644 index c1d3d4ae1..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/clientstats.html b/src/doc/4.0-alpha2/tools/nodetool/clientstats.html deleted file mode 100644 index 71240cd80..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/clientstats.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/compact.html b/src/doc/4.0-alpha2/tools/nodetool/compact.html deleted file mode 100644 index 6a0eddc52..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/compact.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/compactionhistory.html b/src/doc/4.0-alpha2/tools/nodetool/compactionhistory.html deleted file mode 100644 index cdfd70afa..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/compactionstats.html b/src/doc/4.0-alpha2/tools/nodetool/compactionstats.html deleted file mode 100644 index e0a63501e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/decommission.html b/src/doc/4.0-alpha2/tools/nodetool/decommission.html deleted file mode 100644 index 3468bf586..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/decommission.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/describecluster.html b/src/doc/4.0-alpha2/tools/nodetool/describecluster.html deleted file mode 100644 index 04b35f9f2..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/describecluster.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/describering.html b/src/doc/4.0-alpha2/tools/nodetool/describering.html deleted file mode 100644 index 82c796538..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/describering.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disableauditlog.html b/src/doc/4.0-alpha2/tools/nodetool/disableauditlog.html deleted file mode 100644 index b99973e90..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disableautocompaction.html b/src/doc/4.0-alpha2/tools/nodetool/disableautocompaction.html deleted file mode 100644 index b0a2bed92..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablebackup.html b/src/doc/4.0-alpha2/tools/nodetool/disablebackup.html deleted file mode 100644 index c9df215fe..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablebinary.html b/src/doc/4.0-alpha2/tools/nodetool/disablebinary.html deleted file mode 100644 index 670bb1ee8..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablefullquerylog.html b/src/doc/4.0-alpha2/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index df9348629..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablegossip.html b/src/doc/4.0-alpha2/tools/nodetool/disablegossip.html deleted file mode 100644 index bf5e8026e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablehandoff.html b/src/doc/4.0-alpha2/tools/nodetool/disablehandoff.html deleted file mode 100644 index f2da82139..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disablehintsfordc.html b/src/doc/4.0-alpha2/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index a7986fe28..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/disableoldprotocolversions.html b/src/doc/4.0-alpha2/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index 01a5f0cae..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/drain.html b/src/doc/4.0-alpha2/tools/nodetool/drain.html deleted file mode 100644 index 623144317..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/drain.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enableauditlog.html b/src/doc/4.0-alpha2/tools/nodetool/enableauditlog.html deleted file mode 100644 index 6ed51e522..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enableautocompaction.html b/src/doc/4.0-alpha2/tools/nodetool/enableautocompaction.html deleted file mode 100644 index c2b37db22..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablebackup.html b/src/doc/4.0-alpha2/tools/nodetool/enablebackup.html deleted file mode 100644 index d2689ec21..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablebinary.html b/src/doc/4.0-alpha2/tools/nodetool/enablebinary.html deleted file mode 100644 index a5b812161..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablefullquerylog.html b/src/doc/4.0-alpha2/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index fc27a6080..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablegossip.html b/src/doc/4.0-alpha2/tools/nodetool/enablegossip.html deleted file mode 100644 index 458724acc..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablehandoff.html b/src/doc/4.0-alpha2/tools/nodetool/enablehandoff.html deleted file mode 100644 index f65da8193..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enablehintsfordc.html b/src/doc/4.0-alpha2/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 952277f1d..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/enableoldprotocolversions.html b/src/doc/4.0-alpha2/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 74e103977..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/failuredetector.html b/src/doc/4.0-alpha2/tools/nodetool/failuredetector.html deleted file mode 100644 index 885e4a4d7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/flush.html b/src/doc/4.0-alpha2/tools/nodetool/flush.html deleted file mode 100644 index 00154b862..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/flush.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/garbagecollect.html b/src/doc/4.0-alpha2/tools/nodetool/garbagecollect.html deleted file mode 100644 index 2e0c2260a..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/gcstats.html b/src/doc/4.0-alpha2/tools/nodetool/gcstats.html deleted file mode 100644 index 336d1169e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/gcstats.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/4.0-alpha2/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index f961ca396..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getcompactionthreshold.html b/src/doc/4.0-alpha2/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index b1a994e88..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getcompactionthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index c1c64bdf7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getconcurrency.html b/src/doc/4.0-alpha2/tools/nodetool/getconcurrency.html deleted file mode 100644 index e80d8bf93..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getconcurrentcompactors.html b/src/doc/4.0-alpha2/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index a3cd7e338..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/4.0-alpha2/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 0cc40444b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getendpoints.html b/src/doc/4.0-alpha2/tools/nodetool/getendpoints.html deleted file mode 100644 index 2b0e8a318..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 04c39bc06..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getlogginglevels.html b/src/doc/4.0-alpha2/tools/nodetool/getlogginglevels.html deleted file mode 100644 index 5d8005d07..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getmaxhintwindow.html b/src/doc/4.0-alpha2/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index ae006caab..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getreplicas.html b/src/doc/4.0-alpha2/tools/nodetool/getreplicas.html deleted file mode 100644 index dde41f8d5..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getseeds.html b/src/doc/4.0-alpha2/tools/nodetool/getseeds.html deleted file mode 100644 index b6950393f..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getseeds.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getsstables.html b/src/doc/4.0-alpha2/tools/nodetool/getsstables.html deleted file mode 100644 index dd01028b0..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getsstables.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/getstreamthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 3ab48c92d..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/gettimeout.html b/src/doc/4.0-alpha2/tools/nodetool/gettimeout.html deleted file mode 100644 index 7ddc28ed7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/gettraceprobability.html b/src/doc/4.0-alpha2/tools/nodetool/gettraceprobability.html deleted file mode 100644 index 12600416d..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/gossipinfo.html b/src/doc/4.0-alpha2/tools/nodetool/gossipinfo.html deleted file mode 100644 index 2f648ff41..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/handoffwindow.html b/src/doc/4.0-alpha2/tools/nodetool/handoffwindow.html deleted file mode 100644 index a137e7213..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/help.html b/src/doc/4.0-alpha2/tools/nodetool/help.html deleted file mode 100644 index 7254793d0..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/help.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/import.html b/src/doc/4.0-alpha2/tools/nodetool/import.html deleted file mode 100644 index d98ca3257..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/import.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/info.html b/src/doc/4.0-alpha2/tools/nodetool/info.html deleted file mode 100644 index 6c94b683e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/info.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/invalidatecountercache.html b/src/doc/4.0-alpha2/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 417b069f8..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/invalidatekeycache.html b/src/doc/4.0-alpha2/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 5a528a5ea..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/invalidaterowcache.html b/src/doc/4.0-alpha2/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index 4ccff4cb0..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/join.html b/src/doc/4.0-alpha2/tools/nodetool/join.html deleted file mode 100644 index 98aecea15..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/join.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/listsnapshots.html b/src/doc/4.0-alpha2/tools/nodetool/listsnapshots.html deleted file mode 100644 index ffb49a2d5..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/move.html b/src/doc/4.0-alpha2/tools/nodetool/move.html deleted file mode 100644 index 6274e2fa8..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/move.html +++ /dev/null @@ -1,132 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/netstats.html b/src/doc/4.0-alpha2/tools/nodetool/netstats.html deleted file mode 100644 index 0674b13a0..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/netstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/nodetool.html b/src/doc/4.0-alpha2/tools/nodetool/nodetool.html deleted file mode 100644 index f7673b75e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/nodetool.html +++ /dev/null @@ -1,242 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-p <port> | –port <port>)] [(-pp | –print-port)]
-
[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-u <username> | –username <username>)] [(-h <host> | –host <host>)] -<command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/pausehandoff.html b/src/doc/4.0-alpha2/tools/nodetool/pausehandoff.html deleted file mode 100644 index 31378e73e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/profileload.html b/src/doc/4.0-alpha2/tools/nodetool/profileload.html deleted file mode 100644 index efcc3198b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/profileload.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/proxyhistograms.html b/src/doc/4.0-alpha2/tools/nodetool/proxyhistograms.html deleted file mode 100644 index b724ed79f..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/rangekeysample.html b/src/doc/4.0-alpha2/tools/nodetool/rangekeysample.html deleted file mode 100644 index 64fc707f9..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/rebuild.html b/src/doc/4.0-alpha2/tools/nodetool/rebuild.html deleted file mode 100644 index c3aa1170a..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/rebuild.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/rebuild_index.html b/src/doc/4.0-alpha2/tools/nodetool/rebuild_index.html deleted file mode 100644 index ecbb31236..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/refresh.html b/src/doc/4.0-alpha2/tools/nodetool/refresh.html deleted file mode 100644 index 3024c418f..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/refresh.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/refreshsizeestimates.html b/src/doc/4.0-alpha2/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 9daf0dca6..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/reloadlocalschema.html b/src/doc/4.0-alpha2/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index aee3ab533..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/reloadseeds.html b/src/doc/4.0-alpha2/tools/nodetool/reloadseeds.html deleted file mode 100644 index 840dfe172..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/reloadssl.html b/src/doc/4.0-alpha2/tools/nodetool/reloadssl.html deleted file mode 100644 index c9a705c4a..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/reloadtriggers.html b/src/doc/4.0-alpha2/tools/nodetool/reloadtriggers.html deleted file mode 100644 index 6dd7aa557..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/relocatesstables.html b/src/doc/4.0-alpha2/tools/nodetool/relocatesstables.html deleted file mode 100644 index fb309c24a..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/removenode.html b/src/doc/4.0-alpha2/tools/nodetool/removenode.html deleted file mode 100644 index 52562a736..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/removenode.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/repair.html b/src/doc/4.0-alpha2/tools/nodetool/repair.html deleted file mode 100644 index 69ea017ca..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/repair.html +++ /dev/null @@ -1,197 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/repair_admin.html b/src/doc/4.0-alpha2/tools/nodetool/repair_admin.html deleted file mode 100644 index 57c2957a1..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/replaybatchlog.html b/src/doc/4.0-alpha2/tools/nodetool/replaybatchlog.html deleted file mode 100644 index 2149675af..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/resetfullquerylog.html b/src/doc/4.0-alpha2/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 86ffcfd2a..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/resetlocalschema.html b/src/doc/4.0-alpha2/tools/nodetool/resetlocalschema.html deleted file mode 100644 index 2345fd190..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/resumehandoff.html b/src/doc/4.0-alpha2/tools/nodetool/resumehandoff.html deleted file mode 100644 index 8355c5b36..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/ring.html b/src/doc/4.0-alpha2/tools/nodetool/ring.html deleted file mode 100644 index 476504c6e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/ring.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/scrub.html b/src/doc/4.0-alpha2/tools/nodetool/scrub.html deleted file mode 100644 index 8f59b064b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/scrub.html +++ /dev/null @@ -1,158 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/4.0-alpha2/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 836c7a710..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setcachecapacity.html b/src/doc/4.0-alpha2/tools/nodetool/setcachecapacity.html deleted file mode 100644 index eed0cc8a2..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setcachekeystosave.html b/src/doc/4.0-alpha2/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index 67ab7178b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setcompactionthreshold.html b/src/doc/4.0-alpha2/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index 6ced27323..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setcompactionthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 566a07124..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setconcurrency.html b/src/doc/4.0-alpha2/tools/nodetool/setconcurrency.html deleted file mode 100644 index 76213b6a5..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setconcurrentcompactors.html b/src/doc/4.0-alpha2/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index 5c1ee5460..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/4.0-alpha2/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index b48850c8f..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/4.0-alpha2/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index f96cc8506..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index aa7427d5b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setlogginglevel.html b/src/doc/4.0-alpha2/tools/nodetool/setlogginglevel.html deleted file mode 100644 index fcf45cc88..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setmaxhintwindow.html b/src/doc/4.0-alpha2/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index d1957c051..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/setstreamthroughput.html b/src/doc/4.0-alpha2/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index b14547880..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/settimeout.html b/src/doc/4.0-alpha2/tools/nodetool/settimeout.html deleted file mode 100644 index 4fc7f0324..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/settimeout.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/settraceprobability.html b/src/doc/4.0-alpha2/tools/nodetool/settraceprobability.html deleted file mode 100644 index 04c21b2b8..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/sjk.html b/src/doc/4.0-alpha2/tools/nodetool/sjk.html deleted file mode 100644 index 6bc80e53c..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/sjk.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/snapshot.html b/src/doc/4.0-alpha2/tools/nodetool/snapshot.html deleted file mode 100644 index d2c1f70a7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/snapshot.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/status.html b/src/doc/4.0-alpha2/tools/nodetool/status.html deleted file mode 100644 index 363de1d20..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/status.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/statusautocompaction.html b/src/doc/4.0-alpha2/tools/nodetool/statusautocompaction.html deleted file mode 100644 index 69f4ea738..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/statusbackup.html b/src/doc/4.0-alpha2/tools/nodetool/statusbackup.html deleted file mode 100644 index 9ea628777..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/statusbinary.html b/src/doc/4.0-alpha2/tools/nodetool/statusbinary.html deleted file mode 100644 index 70bbe4f0e..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/statusgossip.html b/src/doc/4.0-alpha2/tools/nodetool/statusgossip.html deleted file mode 100644 index 93e0519ba..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/statushandoff.html b/src/doc/4.0-alpha2/tools/nodetool/statushandoff.html deleted file mode 100644 index 81c8f3be7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/stop.html b/src/doc/4.0-alpha2/tools/nodetool/stop.html deleted file mode 100644 index a6ec711d7..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/stop.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB, VERIFY,
-            INDEX_BUILD
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/stopdaemon.html b/src/doc/4.0-alpha2/tools/nodetool/stopdaemon.html deleted file mode 100644 index a17655d99..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/tablehistograms.html b/src/doc/4.0-alpha2/tools/nodetool/tablehistograms.html deleted file mode 100644 index 7e2bc796b..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/tablestats.html b/src/doc/4.0-alpha2/tools/nodetool/tablestats.html deleted file mode 100644 index e6e50b9e3..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/tablestats.html +++ /dev/null @@ -1,168 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/toppartitions.html b/src/doc/4.0-alpha2/tools/nodetool/toppartitions.html deleted file mode 100644 index ea64821bc..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/tpstats.html b/src/doc/4.0-alpha2/tools/nodetool/tpstats.html deleted file mode 100644 index 26a96d7ba..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/tpstats.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/truncatehints.html b/src/doc/4.0-alpha2/tools/nodetool/truncatehints.html deleted file mode 100644 index 60f4f97fb..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/upgradesstables.html b/src/doc/4.0-alpha2/tools/nodetool/upgradesstables.html deleted file mode 100644 index 153960ba2..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/verify.html b/src/doc/4.0-alpha2/tools/nodetool/verify.html deleted file mode 100644 index a25774623..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/verify.html +++ /dev/null @@ -1,153 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/version.html b/src/doc/4.0-alpha2/tools/nodetool/version.html deleted file mode 100644 index 911bee678..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/version.html +++ /dev/null @@ -1,124 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/nodetool/viewbuildstatus.html b/src/doc/4.0-alpha2/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 3aaf981bb..000000000 --- a/src/doc/4.0-alpha2/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/index.html b/src/doc/4.0-alpha2/tools/sstable/index.html deleted file mode 100644 index 0da4d7269..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/index.html +++ /dev/null @@ -1,228 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "SSTable Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

SSTable Tools

-

This section describes the functionality of the various sstable tools.

-

Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstabledump.html b/src/doc/4.0-alpha2/tools/sstable/sstabledump.html deleted file mode 100644 index 2c7b6ae58..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstabledump.html +++ /dev/null @@ -1,403 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstabledump" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstabledump

-

Dump contents of a given SSTable to standard output in JSON format.

-

You must supply exactly one sstable.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstabledump <options> <sstable file path>

- ---- - - - - - - - - - - - - - - - - - - - - -
-dCQL row per line internal representation
-eEnumerate partition keys only
-k <arg>Partition key
-x <arg>Excluded partition key(s)
-tPrint raw timestamps instead of iso8601 date strings
-lOutput each row as a separate JSON object
-

If necessary, use sstableutil first to find out the sstables used by a table.

-
-
-

Dump entire table

-

Dump the entire table without any options.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26
-
-cat eventlog_dump_2018Jul26
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-]
-
-
-
-
-

Dump table in a more manageable format

-

Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines
-
-cat eventlog_dump_2018Jul26_justlines
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Dump only keys

-

Dump only the keys by using the -e option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys
-
-cat eventlog_dump_2018Jul26b
-[ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ]
-
-
-
-
-

Dump row for a single key

-

Dump a single key using the -k option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey
-
-cat eventlog_dump_2018Jul26_singlekey
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Exclude a key or keys in dump of rows

-

Dump a table except for the rows excluded with the -x option. Multiple keys can be used.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e  > eventlog_dump_2018Jul26_excludekeys
-
-cat eventlog_dump_2018Jul26_excludekeys
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display raw timestamps

-

By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times
-
-cat eventlog_dump_2018Jul26_times
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "1532118147028809" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display internal structure in output

-

Dump the table in a format that reflects the internal structure.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d
-
-cat eventlog_dump_2018Jul26_d
-[3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]:  | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711]
-[d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]:  | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522]
-[cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]:  | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809]
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableexpiredblockers.html b/src/doc/4.0-alpha2/tools/sstable/sstableexpiredblockers.html deleted file mode 100644 index 4ffcc7502..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableexpiredblockers.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableexpiredblockers" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableexpiredblockers

-

During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable.

-

This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-10015

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableexpiredblockers <keyspace> <table>

-
-
-

Output blocked sstables

-

If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing.

-

Otherwise, the script will return <sstable> blocks <#> expired sstables from getting dropped followed by a list of the blocked sstables.

-

Example:

-
sstableexpiredblockers keyspace1 standard1
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstablelevelreset.html b/src/doc/4.0-alpha2/tools/sstable/sstablelevelreset.html deleted file mode 100644 index 91d39cd97..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstablelevelreset.html +++ /dev/null @@ -1,174 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablelevelreset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablelevelreset

-

If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration.

-

See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5271

-
-

Usage

-

sstablelevelreset –really-reset <keyspace> <table>

-

The really-reset flag is required, to ensure this intrusive command is not run accidentally.

-
-
-

Table not found

-

If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error.

-

Example:

-
ColumnFamily not found: keyspace/evenlog.
-
-
-
-
-

Table has no sstables

-

Example:

-
Found no sstables, did you give the correct keyspace/table?
-
-
-
-
-

Table already at level 0

-

The script will not set the level if it is already set to 0.

-

Example:

-
Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0
-
-
-
-
-

Table levels reduced to 0

-

If the level is not already 0, then this will reset it to 0.

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 1
-
-sstablelevelreset --really-reset keyspace eventlog
-Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableloader.html b/src/doc/4.0-alpha2/tools/sstable/sstableloader.html deleted file mode 100644 index 5ebfaae8e..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableloader.html +++ /dev/null @@ -1,408 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableloader" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableloader

-

Bulk-load the sstables found in the directory <dir_path> to the configured cluster. The parent directories of <dir_path> are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files.

-

Several of the options listed below don’t work quite as intended, and in those cases, workarounds are mentioned for specific use cases.

-

To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-1278

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableloader <options> <dir_path>

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-d, –nodes <initial hosts>Required. Try to connect to these hosts (comma-separated) -initially for ring information
-u, –username <username>username for Cassandra authentication
-pw, –password <password>password for Cassandra authentication
-p, –port <native transport port>port used for native connection (default 9042)
-sp, –storage-port <storage port>port used for internode communication (default 7000)
-ssp, –ssl-storage-port <ssl storage port>port used for TLS internode communication (default 7001)
–no-progressdon’t display progress
-t, –throttle <throttle>throttle speed in Mbits (default unlimited)
-idct, –inter-dc-throttle <inter-dc-throttle>inter-datacenter throttle speed in Mbits (default unlimited)
-cph, –connections-per-host <connectionsPerHost>number of concurrent connections-per-host
-i, –ignore <NODES>don’t stream to this (comma separated) list of nodes
-alg, –ssl-alg <ALGORITHM>Client SSL: algorithm (default: SunX509)
-ciphers, –ssl-ciphers <CIPHER-SUITES>Client SSL: comma-separated list of encryption suites to use
-ks, –keystore <KEYSTORE>Client SSL: full path to keystore
-kspw, –keystore-password <KEYSTORE-PASSWORD>Client SSL: password of the keystore
-st, –store-type <STORE-TYPE>Client SSL: type of store
-ts, –truststore <TRUSTSTORE>Client SSL: full path to truststore
-tspw, –truststore-password <TRUSTSTORE-PASSWORD>Client SSL: password of the truststore
-prtcl, –ssl-protocol <PROTOCOL>Client SSL: connections protocol to use (default: TLS)
-ap, –auth-provider <auth provider>custom AuthProvider class name for cassandra authentication
-f, –conf-path <path to config file>cassandra.yaml file path for streaming throughput and client/server SSL
-v, –verboseverbose output
-h, –helpdisplay this help message
-

You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

Load sstables from a Snapshot

-

Copy the snapshot sstables into an accessible directory and use sstableloader to restore them.

-

Example:

-
cp snapshots/1535397029191/* /path/to/keyspace1/standard1/
-
-sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4700000
-   Total duration (ms):          : 4390
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

The -d or –nodes option is required, or the script will not run.

-

Example:

-
sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Initial hosts must be specified (-d)
-
-
-
-
-

Use a Config File for SSL Clusters

-

If SSL encryption is enabled in the cluster, use the –conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line.

-

Example:

-
sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 9.165KiB/s (avg: 9.165KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 5.147MiB/s (avg: 18.299KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 9.751MiB/s (avg: 27.423KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 8.203MiB/s (avg: 36.524KiB/s)
-...
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 9356 ms
-   Average transfer rate   : 480.105KiB/s
-   Peak transfer rate      : 586.410KiB/s
-
-
-
-
-

Hide Progress Output

-

To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the –no-progress option.

-

Example:

-
sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2]
-
-
-
-
-

Get More Detail

-

Using the –verbose option will provide much more progress output.

-

Example:

-
sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 12.056KiB/s (avg: 12.056KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 9.092MiB/s (avg: 24.081KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 18.832MiB/s (avg: 36.099KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 2.253MiB/s (avg: 47.882KiB/s)
-progress: [/172.17.0.2]0:0/1 7  % total: 7% 6.388MiB/s (avg: 59.743KiB/s)
-progress: [/172.17.0.2]0:0/1 8  % total: 8% 14.606MiB/s (avg: 71.635KiB/s)
-progress: [/172.17.0.2]0:0/1 9  % total: 9% 8.880MiB/s (avg: 83.465KiB/s)
-progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s)
-progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s)
-progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s)
-progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s)
-progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s)
-progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s)
-progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s)
-progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s)
-progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s)
-progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s)
-progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s)
-progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s)
-progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s)
-progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s)
-progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s)
-progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s)
-progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s)
-progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s)
-progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s)
-progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s)
-progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s)
-progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s)
-progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s)
-progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s)
-progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s)
-progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s)
-progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s)
-progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s)
-progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s)
-progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s)
-progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s)
-progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s)
-progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s)
-progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s)
-progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s)
-progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s)
-progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s)
-progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s)
-progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s)
-progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s)
-progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s)
-progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s)
-progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s)
-progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s)
-progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s)
-progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s)
-progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s)
-progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s)
-progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s)
-progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s)
-progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s)
-progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s)
-progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s)
-progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s)
-progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s)
-progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s)
-progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s)
-progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s)
-progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s)
-progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s)
-progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s)
-progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s)
-progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 6706 ms
-   Average transfer rate   : 669.835KiB/s
-   Peak transfer rate      : 767.802KiB/s
-
-
-
-
-

Throttling Load

-

To prevent the table loader from overloading the system resources, you can throttle the process with the –throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below.

-

Example:

-
sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 0 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 37634
-   Average transfer rate (MB/s): : 0
-   Peak transfer rate (MB/s):    : 0
-
-
-
-
-

Speeding up Load

-

To speed up the load process, the number of connections per host can be increased.

-

Example:

-
sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 100
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 3486
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

This small data set doesn’t benefit much from the increase in connections per host, but note that the total duration has decreased in this example.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstablemetadata.html b/src/doc/4.0-alpha2/tools/sstable/sstablemetadata.html deleted file mode 100644 index 8e788598f..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstablemetadata.html +++ /dev/null @@ -1,472 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablemetadata" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablemetadata

-

Print information about an sstable from the related Statistics.db and Summary.db files to standard output.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablemetadata <options> <sstable filename(s)>

- ---- - - - - - -
–gc_grace_seconds <arg>The gc_grace_seconds to use when calculating droppable tombstones
-
- -
-

Specify gc grace seconds

-

To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn’t access the schema directly, this is a way to more accurately estimate droppable tombstones – for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds).

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-12208

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4
-Estimated tombstone drop times:
-1536599100:         1
-1536599640:         1
-1536599700:         2
-
-echo $(date +%s)
-1536602005
-
-# if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 4.0E-5
-
-# if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 9.61111111111111E-6
-
-# if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 0.0
-
-
-
-
-

Explanation of each value printed above

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueExplanation
SSTableprefix of the sstable filenames related to this sstable
Partitionerpartitioner type used to distribute data across nodes; defined in cassandra.yaml
Bloom Filter FPprecision of Bloom filter used in reads; defined in the table definition
Minimum timestampminimum timestamp of any entry in this sstable, in epoch microseconds
Maximum timestampmaximum timestamp of any entry in this sstable, in epoch microseconds
SSTable min local deletion timeminimum timestamp of deletion date, based on TTL, in epoch seconds
SSTable max local deletion timemaximum timestamp of deletion date, based on TTL, in epoch seconds
Compressorblank (-) by default; if not blank, indicates type of compression enabled on the table
TTL mintime-to-live in seconds; default 0 unless defined in the table definition
TTL maxtime-to-live in seconds; default 0 unless defined in the table definition
First tokenlowest token and related key found in the sstable summary
Last tokenhighest token and related key found in the sstable summary
Estimated droppable tombstonesratio of tombstones to columns, using configured gc grace seconds if relevant
SSTable levelcompaction level of this sstable, if leveled compaction (LCS) is used
Repaired atthe timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds
Replay positions coveredthe interval of time and commitlog positions related to this sstable
totalColumnsSetnumber of cells in the table
totalRowsnumber of rows in the table
Estimated tombstone drop timesapproximate number of rows that will expire, ordered by epoch seconds
Count Row Size Cell Counttwo histograms in two columns; one represents distribution of Row Size -and the other represents distribution of Cell Count
Estimated cardinalityan estimate of unique values, used for compaction
EncodingStats* minTTLin epoch milliseconds
EncodingStats* minLocalDeletionTimein epoch seconds
EncodingStats* minTimestampin epoch microseconds
KeyTypethe type of partition key, useful in reading and writing data -from/to storage; defined in the table definition
ClusteringTypesthe type of clustering key, useful in reading and writing data -from/to storage; defined in the table definition
StaticColumnsa list of the shared columns in the table
RegularColumnsa list of non-static, non-key columns in the table
-
    -
  • For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableofflinerelevel.html b/src/doc/4.0-alpha2/tools/sstable/sstableofflinerelevel.html deleted file mode 100644 index 51727e535..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableofflinerelevel.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableofflinerelevel" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableofflinerelevel

-

When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-8301

-

The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):

-
L3 [][][][][][][][][][][]
-L2 [    ][    ][    ][  ]
-L1 [          ][        ]
-L0 [                    ]
-
-
-

Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):

-
[][][]
-[    ][][][]
-    [    ]
-[          ]
-...
-
-
-

Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below.

-

If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableofflinerelevel [–dry-run] <keyspace> <table>

-
-
-

Doing a dry run

-

Use the –dry-run option to see the current level distribution and predicted level after the change.

-

Example:

-
sstableofflinerelevel --dry-run keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-Potential leveling:
-L0=1
-L1=1
-
-
-
-
-

Running a relevel

-

Example:

-
sstableofflinerelevel keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-New leveling:
-L0=1
-L1=1
-
-
-
-
-

Keyspace or table not found

-

If an invalid keyspace and/or table is provided, an exception will be thrown.

-

Example:

-
sstableofflinerelevel --dry-run keyspace evenlog
-
-Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog
-    at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstablerepairedset.html b/src/doc/4.0-alpha2/tools/sstable/sstablerepairedset.html deleted file mode 100644 index 294865e52..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstablerepairedset.html +++ /dev/null @@ -1,192 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablerepairedset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablerepairedset

-

Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired.

-

Note that running a repair (e.g., via nodetool repair) doesn’t set the status of this metadata. Only setting the status of this metadata via this tool does.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5351

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablerepairedset –really-set <options> [-f <sstable-list> | <sstables>]

- ---- - - - - - - - - - - - - - - -
–really-setrequired if you want to really set the status
–is-repairedset the repairedAt status to the last modified time
–is-unrepairedset the repairedAt status to 0
-fuse a file containing a list of sstables as the input
-
-
-

Set a lot of sstables to unrepaired status

-

There are many ways to do this programmatically. This way would likely include variables for the keyspace and table.

-

Example:

-
find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired %
-
-
-
-
-

Set one to many sstables to repaired status

-

Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice.

-

Example:

-
nodetool repair keyspace1 standard1
-find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired %
-
-
-
- -
-

Using command in a script

-

If you know you ran repair 2 weeks ago, you can do something like the following:

-
sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstablescrub.html b/src/doc/4.0-alpha2/tools/sstable/sstablescrub.html deleted file mode 100644 index 3c574047a..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstablescrub.html +++ /dev/null @@ -1,210 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablescrub" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablescrub

-

Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4321

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablescrub <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-m,–manifest-checkonly check and repair the leveled manifest, without actually scrubbing the sstables
-n,–no-validatedo not validate columns using column validator
-r,–reinsert-overflowed-ttlRewrites rows with overflowed expiration date affected by CASSANDRA-14092 -with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows.
-s,–skip-corruptedskip corrupt rows in counter tables
-v,–verboseverbose output
-
-
-

Basic Scrub

-

The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable.

-

Example:

-
sstablescrub keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped
-Checking leveled manifest
-
-
-
-
-

Scrub without Validation

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-9406

-

Use the –no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client.

-

Example:

-
sstablescrub --no-validate keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned
-
-
-
-
-

Skip Corrupted Counter Tables

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5930

-

If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the –skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+.

-

Example:

-
sstablescrub --skip-corrupted keyspace1 counter1
-
-
-
-
-

Dealing with Overflow Dates

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-14092

-

Using the option –reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow).

-

Example:

-
sstablescrub --reinsert-overflowed-ttl keyspace1 counter1
-
-
-
-
-

Manifest Check

-

As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstablesplit.html b/src/doc/4.0-alpha2/tools/sstable/sstablesplit.html deleted file mode 100644 index 8c69ef623..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstablesplit.html +++ /dev/null @@ -1,201 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablesplit" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablesplit

-

Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4766

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablesplit <options> <filename>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h, –helpdisplay this help message
–no-snapshotdon’t snapshot the sstables before splitting
-s, –size <size>maximum size in MB for the output sstables (default: 50)
-

This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped.

-
-
-

Split a File

-

Split a large sstable into smaller sstables. By default, unless the option –no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-Pre-split sstables snapshotted into snapshot pre-split-1533144514795
-
-
-
-
-

Split Multiple Files

-

Wildcards can be used in the filename portion of the command to split multiple files.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1*
-
-
-
-
-

Attempt to Split a Small File

-

If the file is already smaller than the split size provided, the sstable will not be split.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB)
-No sstables needed splitting.
-
-
-
-
-

Split a File into Specified Size

-

The default size used for splitting is 50MB. Specify another size with the –size option. The size is in megabytes (MB). Specify only the number, not the units. For example –size 50 is correct, but –size 50MB is not.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db
-Pre-split sstables snapshotted into snapshot pre-split-1533144996008
-
-
-
-
-

Split Without Snapshot

-

By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the –no-snapshot option to skip it.

-

Example:

-
sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db
-
-
-

Note: There is no output, but you can see the results in your file system.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableupgrade.html b/src/doc/4.0-alpha2/tools/sstable/sstableupgrade.html deleted file mode 100644 index 36417c6fc..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableupgrade.html +++ /dev/null @@ -1,248 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableupgrade" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableupgrade

-

Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version.

-

The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableupgrade <options> <keyspace> <table> [snapshot_name]

- ---- - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-k,–keep-sourcedo not delete the source sstables
-
-
-

Rewrite tables to the current Cassandra version

-

Start with a set of sstables in one version of Cassandra:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables:

-
sstableupgrade keyspace1 standard1
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 13:48 backups
--rw-r--r--   1 user  wheel      292 Aug 22 13:48 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4599475 Aug 22 13:48 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:48 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 13:48 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330807 Aug 22 13:48 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 13:48 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 13:48 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 13:48 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite tables to the current Cassandra version, and keep tables in old version

-

Again, starting with a set of sstables in one version:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:

-
sstableupgrade keyspace1 standard1 -k
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 14:00 backups
--rw-r--r--@  1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--@  1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--@  1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--@  1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--@  1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--@  1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--@  1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--@  1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
--rw-r--r--   1 user  wheel      292 Aug 22 14:01 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4596370 Aug 22 14:01 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 14:01 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 14:01 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330801 Aug 22 14:01 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 14:01 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 14:01 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 14:01 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite a snapshot to the current Cassandra version

-

Find the snapshot name:

-
nodetool listsnapshots
-
-Snapshot Details:
-Snapshot name       Keyspace name                Column family name           True size          Size on disk
-...
-1534962986979       keyspace1                    standard1                    5.85 MB            5.85 MB
-
-
-

Then rewrite the snapshot:

-
sstableupgrade keyspace1 standard1 1534962986979
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete.
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableutil.html b/src/doc/4.0-alpha2/tools/sstable/sstableutil.html deleted file mode 100644 index 71c0f4e4c..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableutil.html +++ /dev/null @@ -1,204 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableutil" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableutil

-

List sstable files for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7066

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableutil <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - -
-c, –cleanupclean up any outstanding transactions
-d, –debugdisplay stack traces
-h, –helpdisplay this help message
-o, –oploginclude operation logs
-t, –type <arg>all (list all files, final or temporary), tmp (list temporary files only), -final (list final files only),
-v, –verboseverbose output
-
-
-

List all sstables

-

The basic command lists the sstables associated with a given keyspace/table.

-

Example:

-
sstableutil keyspace eventlog
-Listing files...
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt
-
-
-
-
-

List only temporary sstables

-

Using the -t option followed by tmp will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra.

-
-
-

List only final sstables

-

Using the -t option followed by final will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option.

-
-
-

Include transaction logs

-

Using the -o option will include transaction logs in the listing, in the format above.

-
-
-

Clean up sstables

-

Using the -c option removes any transactions left over from incomplete writes or compactions.

-

From the 3.0 upgrade notes:

-

New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix “add:” or “remove:”. They also contain a special line “commit”, only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the “add” prefix) and delete the old sstables (those with the “remove” prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/tools/sstable/sstableverify.html b/src/doc/4.0-alpha2/tools/sstable/sstableverify.html deleted file mode 100644 index f6a9bc3da..000000000 --- a/src/doc/4.0-alpha2/tools/sstable/sstableverify.html +++ /dev/null @@ -1,204 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableverify" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableverify

-

Check sstable(s) for errors or corruption, for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5791

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableverify <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-e, –extendedextended verification
-h, –helpdisplay this help message
-v, –verboseverbose output
-
-
-

Basic Verification

-

This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-
-
-
-
-

Extended Verification

-

During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time.

-

Example:

-
root@DC1C1:/# sstableverify -e keyspace eventlog
-WARN  14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully
-
-
-
-
-

Corrupted File

-

Corrupted files are listed if they are detected by the script.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db
-
-
-

A similar (but less verbose) tool will show the suggested actions:

-
nodetool verify keyspace eventlog
-error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/troubleshooting/finding_nodes.html b/src/doc/4.0-alpha2/troubleshooting/finding_nodes.html deleted file mode 100644 index 7ba3085d0..000000000 --- a/src/doc/4.0-alpha2/troubleshooting/finding_nodes.html +++ /dev/null @@ -1,240 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Find The Misbehaving Nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Find The Misbehaving Nodes

-

The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware).

-

There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below.

-
-

Client Logs and Errors

-

Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter’s nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with.

-

Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax drivers:

-
    -
  • SyntaxError (client). This and other QueryValidationException -indicate that the client sent a malformed request. These are rarely server -issues and usually indicate bad queries.
  • -
  • UnavailableException (server): This means that the Cassandra -coordinator node has rejected the query as it believes that insufficent -replica nodes are available. If many coordinators are throwing this error it -likely means that there really are (typically) multiple nodes down in the -cluster and you can identify them using nodetool status If only a single coordinator is throwing this error it may -mean that node has been partitioned from the rest.
  • -
  • OperationTimedOutException (server): This is the most frequent -timeout message raised when clients set timeouts and means that the query -took longer than the supplied timeout. This is a client side timeout -meaning that it took longer than the client specified timeout. The error -message will include the coordinator node that was last tried which is -usually a good starting point. This error usually indicates either -aggressive client timeout values or latent server coordinators/replicas.
  • -
  • ReadTimeoutException or WriteTimeoutException (server): These -are raised when clients do not specify lower timeouts and there is a -coordinator timeouts based on the values supplied in the cassandra.yaml -configuration file. They usually indicate a serious server side problem as -the default values are usually multiple seconds.
  • -
-
-
-

Metrics

-

If you have Cassandra metrics reporting to a -centralized location such as Graphite or -Grafana you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are:

-
-

Errors

-

Cassandra refers to internode messaging errors as “drops”, and provided a -number of Dropped Message Metrics to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue.

-
-
-

Latency

-

For timeouts or latency related issues you can start with Table -Metrics by comparing Coordinator level metrics e.g. -CoordinatorReadLatency or CoordinatorWriteLatency with their associated -replica metrics e.g. ReadLatency or WriteLatency. Issues usually show -up on the 99th percentile before they show up on the 50th percentile or -the mean. While maximum coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, maximum replica latencies that correlate with increased 99th -percentiles on coordinators can help narrow down the problem.

-

There are usually three main possibilities:

-
    -
  1. Coordinator latencies are high on all nodes, but only a few node’s local -read latencies are high. This points to slow replica nodes and the -coordinator’s are just side-effects. This usually happens when clients are -not token aware.
  2. -
  3. Coordinator latencies and replica latencies increase at the -same time on the a few nodes. If clients are token aware this is almost -always what happens and points to slow replicas of a subset of token -ranges (only part of the ring).
  4. -
  5. Coordinator and local latencies are high on many nodes. This usually -indicates either a tipping point in the cluster capacity (too many writes or -reads per second), or a new query pattern.
  6. -
-

It’s important to remember that depending on the client’s load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use TokenAware policies the same -node’s coordinator and replica latencies will often increase together, but if -you just use normal DCAwareRoundRobin coordinator latencies can increase -with unrelated replica node’s latencies. For example:

-
    -
  • TokenAware + LOCAL_ONE: should always have coordinator and replica -latencies on the same node rise together
  • -
  • TokenAware + LOCAL_QUORUM: should always have coordinator and -multiple replica latencies rise together in the same datacenter.
  • -
  • TokenAware + QUORUM: replica latencies in other datacenters can -affect coordinator latencies.
  • -
  • DCAwareRoundRobin + LOCAL_ONE: coordinator latencies and unrelated -replica node’s latencies will rise together.
  • -
  • DCAwareRoundRobin + LOCAL_QUORUM: different coordinator and replica -latencies will rise together with little correlation.
  • -
-
-
-

Query Rates

-

Sometimes the Table query rate metrics can help -narrow down load issues as “small” increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with BATCH writes, where a client may send a single BATCH -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator BATCH write turns into 450 -replica writes! This is why keeping BATCH’s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a “single” -query.

-
-
-
-

Next Step: Investigate the Node(s)

-

Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -logs, nodetool, and -os tools. If you are not able to login you may still -have access to logs and nodetool -remotely.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/troubleshooting/index.html b/src/doc/4.0-alpha2/troubleshooting/index.html deleted file mode 100644 index a39806939..000000000 --- a/src/doc/4.0-alpha2/troubleshooting/index.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-

As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you.

-

These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don’t -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/troubleshooting/reading_logs.html b/src/doc/4.0-alpha2/troubleshooting/reading_logs.html deleted file mode 100644 index 30a6c8aa7..000000000 --- a/src/doc/4.0-alpha2/troubleshooting/reading_logs.html +++ /dev/null @@ -1,350 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Cassandra Logs" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Logs

-

Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs.

-
-

Common Log Files

-

Cassandra has three main logs, the system.log, debug.log and -gc.log which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively.

-

These logs by default live in ${CASSANDRA_HOME}/logs, but most Linux -distributions relocate logs to /var/log/cassandra. Operators can tune -this location as well as what levels are logged using the provided -logback.xml file.

-
-

system.log

-

This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log:

-
    -
  • Uncaught exceptions. These can be very useful for debugging errors.
  • -
  • GCInspector messages indicating long garbage collector pauses. When long -pauses happen Cassandra will print how long and also what was the state of -the system (thread state) at the time of that pause. This can help narrow -down a capacity issue (either not enough heap or not enough spare CPU).
  • -
  • Information about nodes joining and leaving the cluster as well as token -metadata (data ownersip) changes. This is useful for debugging network -partitions, data movements, and more.
  • -
  • Keyspace/Table creation, modification, deletion.
  • -
  • StartupChecks that ensure optimal configuration of the operating system -to run Cassandra
  • -
  • Information about some background operational tasks (e.g. Index -Redistribution).
  • -
-

As with any application, looking for ERROR or WARN lines can be a -great first step:

-
$ # Search for warnings or errors in the latest system.log
-$ grep 'WARN\|ERROR' system.log | tail
-...
-
-$ # Search for warnings or errors in all rotated system.log
-$ zgrep 'WARN\|ERROR' system.log.* | less
-...
-
-
-
-
-

debug.log

-

This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal system.log. Some -examples of activities logged to this log:

-
    -
  • Information about compactions, including when they start, which sstables -they contain, and when they finish.
  • -
  • Information about memtable flushes to disk, including when they happened, -how large the flushes were, and which commitlog segments the flush impacted.
  • -
-

This log can be very noisy, so it is highly recommended to use grep and -other log analysis tools to dive deep. For example:

-
$ # Search for messages involving a CompactionTask with 5 lines of context
-$ grep CompactionTask debug.log -C 5
-...
-
-$ # Look at the distribution of flush tasks per keyspace
-$ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c
-    6 compaction_history:
-    1 test_keyspace:
-    2 local:
-    17 size_estimates:
-    17 sstable_activity:
-
-
-
-
-

gc.log

-

The gc log is a standard Java GC log. With the default jvm.options -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:

-
$ grep stopped gc.log.0.current | tail
-2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds
-2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds
-2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds
-2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds
-2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds
-2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds
-2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds
-2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds
-2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds
-2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds
-
-
-

This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n  | tail | xargs -IX grep X gc.log.0.current | sort -k 1
-2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds
-2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds
-2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds
-2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds
-2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds
-2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds
-2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds
-2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds
-2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds
-2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds
-
-
-

In this case any client waiting on a query would have experienced a 56ms -latency at 17:13:41.

-

Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn’t know could have disk latency, so the JVM safepoint logic -doesn’t handle a blocking memory mapped read particularly well).

-

Using these logs you can even get a pause distribution with something like -histogram.py:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py
-# NumSamples = 410293; Min = 0.00; Max = 11.49
-# Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498
-# each ∎ represents a count of 5470
-    0.0001 -     1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
-    1.1496 -     2.2991 [    15]:
-    2.2991 -     3.4486 [     5]:
-    3.4486 -     4.5981 [     1]:
-    4.5981 -     5.7475 [     5]:
-    5.7475 -     6.8970 [     9]:
-    6.8970 -     8.0465 [     1]:
-    8.0465 -     9.1960 [     0]:
-    9.1960 -    10.3455 [     0]:
-   10.3455 -    11.4949 [     2]:
-
-
-

We can see in this case while we have very good average performance something -is causing multi second JVM pauses … In this case it was mostly safepoint -pauses caused by slow disks:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X  gc.log.0.current| sort -k 1
-2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds
-2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds
-2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds
-2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds
-2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds
-2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds
-2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds
-2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds
-2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds
-2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds
-
-
-

Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as GCViewer which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -200ms and GC throughput greater than 99% (ymmv).

-

Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues.

-
-
-
-

Getting More Information

-

If the default logging levels are insuficient, nodetool can set higher -or lower logging levels for various packages and classes using the -nodetool setlogginglevel command. Start by viewing the current levels:

-
$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-
-
-

Perhaps the Gossiper is acting up and we wish to enable it at TRACE -level for even more insight:

-
$ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE
-
-$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-org.apache.cassandra.gms.Gossiper                      TRACE
-
-$ grep TRACE debug.log | tail -2
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating
-heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ...
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local
-heartbeat version 2341 greater than 2340 for 127.0.0.1:7000
-
-
-

Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -logback.xml.

-
diff --git a/conf/logback.xml b/conf/logback.xml
-index b2c5b10..71b0a49 100644
---- a/conf/logback.xml
-+++ b/conf/logback.xml
-@@ -98,4 +98,5 @@ appender reference in the root level section below.
-   </root>
-
-   <logger name="org.apache.cassandra" level="DEBUG"/>
-+  <logger name="org.apache.cassandra.gms.Gossiper" level="TRACE"/>
- </configuration>
-
-
-
-

Full Query Logger

-

Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -nodetool and the logs are read with the provided bin/fqltool utility:

-
$ mkdir /var/tmp/fql_logs
-$ nodetool enablefullquerylog --path /var/tmp/fql_logs
-
-# ... do some querying
-
-$ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail
-Query time: 1530750927224
-Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name =
-'system_views' AND table_name = 'sstable_tasks';
-Values:
-
-Type: single
-Protocol version: 4
-Query time: 1530750934072
-Query: select * from keyspace1.standard1 ;
-Values:
-
-$ nodetool disablefullquerylog
-
-
-

Note that if you want more information than this tool provides, there are other -live capture options available such as packet capture.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/troubleshooting/use_nodetool.html b/src/doc/4.0-alpha2/troubleshooting/use_nodetool.html deleted file mode 100644 index 6c7289bd2..000000000 --- a/src/doc/4.0-alpha2/troubleshooting/use_nodetool.html +++ /dev/null @@ -1,320 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Use Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Use Nodetool

-

Cassandra’s nodetool allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see nodetool help -for all the commands), but briefly some of the most useful for troubleshooting:

-
-

Cluster Status

-

You can use nodetool status to assess status of the cluster:

-
$ nodetool status <optional keyspace>
-
-Datacenter: dc1
-=======================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-UN  127.0.1.1  4.69 GiB   1            100.0%            35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e  r1
-UN  127.0.1.2  4.71 GiB   1            100.0%            752e278f-b7c5-4f58-974b-9328455af73f  r2
-UN  127.0.1.3  4.69 GiB   1            100.0%            9dc1a293-2cc0-40fa-a6fd-9e6054da04a7  r3
-
-
-

In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all “up”. The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -nodetool status on multiple nodes in a cluster to see the full view.

-

You can use nodetool status plus a little grep to see which nodes are -down:

-
$ nodetool status | grep -v '^UN'
-Datacenter: dc1
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-Datacenter: dc2
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-DN  127.0.0.5  105.73 KiB  1            33.3%             df303ac7-61de-46e9-ac79-6e630115fd75  r1
-
-
-

In this case there are two datacenters and there is one node down in datacenter -dc2 and rack r1. This may indicate an issue on 127.0.0.5 -warranting investigation.

-
-
-

Coordinator Query Latency

-

You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using nodetool proxyhistograms:

-
$ nodetool proxyhistograms
-Percentile       Read Latency      Write Latency      Range Latency   CAS Read Latency  CAS Write Latency View Write Latency
-                     (micros)           (micros)           (micros)           (micros)           (micros)           (micros)
-50%                    454.83             219.34               0.00               0.00               0.00               0.00
-75%                    545.79             263.21               0.00               0.00               0.00               0.00
-95%                    654.95             315.85               0.00               0.00               0.00               0.00
-98%                    785.94             379.02               0.00               0.00               0.00               0.00
-99%                   3379.39            2346.80               0.00               0.00               0.00               0.00
-Min                     42.51             105.78               0.00               0.00               0.00               0.00
-Max                  25109.16           43388.63               0.00               0.00               0.00               0.00
-
-
-

Here you can see the full latency distribution of reads, writes, range requests -(e.g. select * from keyspace.table), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds).

-
-
-

Local Query Latency

-

If you know which table is having latency/error issues, you can use -nodetool tablehistograms to get a better idea of what is happening -locally on a node:

-
$ nodetool tablehistograms keyspace table
-Percentile  SSTables     Write Latency      Read Latency    Partition Size        Cell Count
-                              (micros)          (micros)           (bytes)
-50%             0.00             73.46            182.79             17084               103
-75%             1.00             88.15            315.85             17084               103
-95%             2.00            126.93            545.79             17084               103
-98%             2.00            152.32            654.95             17084               103
-99%             2.00            182.79            785.94             17084               103
-Min             0.00             42.51             24.60             14238                87
-Max             2.00          12108.97          17436.92             17084               103
-
-
-

This shows you percentile breakdowns particularly critical metrics.

-

The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. SizeTieredCompactionStrategy typically has many more reads -per read than LeveledCompactionStrategy does for update heavy workloads.

-

The second column shows you a latency breakdown of local write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments.

-

The third column shows you a latency breakdown of local read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read.

-

The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it’s read.

-
-
-

Threadpool State

-

You can use nodetool tpstats to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:

-
$ nodetool tpstats
-Pool Name                         Active   Pending      Completed   Blocked  All time blocked
-ReadStage                              2         0             12         0                 0
-MiscStage                              0         0              0         0                 0
-CompactionExecutor                     0         0           1940         0                 0
-MutationStage                          0         0              0         0                 0
-GossipStage                            0         0          10293         0                 0
-Repair-Task                            0         0              0         0                 0
-RequestResponseStage                   0         0             16         0                 0
-ReadRepairStage                        0         0              0         0                 0
-CounterMutationStage                   0         0              0         0                 0
-MemtablePostFlush                      0         0             83         0                 0
-ValidationExecutor                     0         0              0         0                 0
-MemtableFlushWriter                    0         0             30         0                 0
-ViewMutationStage                      0         0              0         0                 0
-CacheCleanupExecutor                   0         0              0         0                 0
-MemtableReclaimMemory                  0         0             30         0                 0
-PendingRangeCalculator                 0         0             11         0                 0
-SecondaryIndexManagement               0         0              0         0                 0
-HintsDispatcher                        0         0              0         0                 0
-Native-Transport-Requests              0         0            192         0                 0
-MigrationStage                         0         0             14         0                 0
-PerDiskMemtableFlushWriter_0           0         0             30         0                 0
-Sampler                                0         0              0         0                 0
-ViewBuildExecutor                      0         0              0         0                 0
-InternalResponseStage                  0         0              0         0                 0
-AntiEntropyStage                       0         0              0         0                 0
-
-Message type           Dropped                  Latency waiting in queue (micros)
-                                             50%               95%               99%               Max
-READ                         0               N/A               N/A               N/A               N/A
-RANGE_SLICE                  0              0.00              0.00              0.00              0.00
-_TRACE                       0               N/A               N/A               N/A               N/A
-HINT                         0               N/A               N/A               N/A               N/A
-MUTATION                     0               N/A               N/A               N/A               N/A
-COUNTER_MUTATION             0               N/A               N/A               N/A               N/A
-BATCH_STORE                  0               N/A               N/A               N/A               N/A
-BATCH_REMOVE                 0               N/A               N/A               N/A               N/A
-REQUEST_RESPONSE             0              0.00              0.00              0.00              0.00
-PAGED_RANGE                  0               N/A               N/A               N/A               N/A
-READ_REPAIR                  0               N/A               N/A               N/A               N/A
-
-
-

This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the RequestResponseState queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ALL ties up RF -RequestResponseState threads whereas LOCAL_ONE only uses a single -thread in the ReadStage threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the concurrent_compactors or compaction_throughput options.

-

The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation.

-
-
-

Compaction State

-

As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS page cache, -and can put a lot of load on your disk drives. There are great -os tools to determine if this is the case, but often it’s a -good idea to check if compactions are even running using -nodetool compactionstats:

-
$ nodetool compactionstats
-pending tasks: 2
-- keyspace.table: 2
-
-id                                   compaction type keyspace table completed total    unit  progress
-2062b290-7f3a-11e8-9358-cd941b956e60 Compaction      keyspace table 21848273  97867583 bytes 22.32%
-Active compaction remaining time :   0h00m04s
-
-
-

In this case there is a single compaction running on the keyspace.table -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass -H to get the units in a human readable format.

-

Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don’t take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra’s concurrent_compactors -or compaction_throughput options.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha2/troubleshooting/use_tools.html b/src/doc/4.0-alpha2/troubleshooting/use_tools.html deleted file mode 100644 index 294054832..000000000 --- a/src/doc/4.0-alpha2/troubleshooting/use_tools.html +++ /dev/null @@ -1,608 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Diving Deep, Use External Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Diving Deep, Use External Tools

-

Machine access allows operators to dive even deeper than logs and nodetool -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes.

-
-

JVM Tooling

-

The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks.

-

NOTE: There are two common gotchas with JVM tooling and Cassandra:

-
    -
  1. By default Cassandra ships with -XX:+PerfDisableSharedMem set to prevent -long pauses (see CASSANDRA-9242 and CASSANDRA-9483 for details). If -you want to use JVM tooling you can instead have /tmp mounted on an in -memory tmpfs which also effectively works around CASSANDRA-9242.
  2. -
  3. Make sure you run the tools as the same user as Cassandra is running as, -e.g. if the database is running as cassandra the tool also has to be -run as cassandra, e.g. via sudo -u cassandra <cmd>.
  4. -
-
-

Garbage Collection State (jstat)

-

If you suspect heap pressure you can use jstat to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):

-
jstat -gcutil <cassandra pid> 500ms
- S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT
- 0.00   0.00  81.53  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.94  31.16  93.07  88.20     12    0.151     3    0.257    0.408
-
-
-

In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies.

-
-
-

Thread Information (jstack)

-

To get a point in time snapshot of exactly what Cassandra is doing, run -jstack against the Cassandra PID. Note that this does pause the JVM for -a very brief period (<20ms).:

-
$ jstack <cassandra pid> > threaddump
-
-# display the threaddump
-$ cat threaddump
-...
-
-# look at runnable threads
-$grep RUNNABLE threaddump -B 1
-"Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000]
-   java.lang.Thread.State: RUNNABLE
---
-"Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-...
-
-# Note that the nid is the Linux thread id
-
-
-

Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on.

-
-
-
-

Basic OS Tooling

-

A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of:

-
    -
  • CPU cores. For executing concurrent user queries
  • -
  • CPU processing time. For query activity (data decompression, row merging, -etc…)
  • -
  • CPU processing time (low priority). For background tasks (compaction, -streaming, etc …)
  • -
  • RAM for Java Heap. Used to hold internal data-structures and by default the -Cassandra memtables. Heap space is a crucial component of write performance -as well as generally.
  • -
  • RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS -disk cache is a crucial component of read performance.
  • -
  • Disks. Cassandra cares a lot about disk read latency, disk write throughput, -and of course disk space.
  • -
  • Network latency. Cassandra makes many internode requests, so network latency -between nodes can directly impact performance.
  • -
  • Network throughput. Cassandra (as other databases) frequently have the -so called “incast” problem where a small request (e.g. SELECT * from -foo.bar) returns a massively large result set (e.g. the entire dataset). -In such situations outgoing bandwidth is crucial.
  • -
-

Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource.

-
-

High Level Resource Usage (top/htop)

-

Cassandra makes signifiant use of system resources, and often the very first -useful action is to run top or htop (website)to see the state of the machine.

-

Useful things to look at:

-
    -
  • System load levels. While these numbers can be confusing, generally speaking -if the load average is greater than the number of CPU cores, Cassandra -probably won’t have very good (sub 100 millisecond) latencies. See -Linux Load Averages -for more information.
  • -
  • CPU utilization. htop in particular can help break down CPU utilization -into user (low and normal priority), system (kernel), and io-wait -. Cassandra query threads execute as normal priority user threads, while -compaction threads execute as low priority user threads. High system -time could indicate problems like thread contention, and high io-wait -may indicate slow disk drives. This can help you understand what Cassandra -is spending processing resources doing.
  • -
  • Memory usage. Look for which programs have the most resident memory, it is -probably Cassandra. The number for Cassandra is likely inaccurately high due -to how Linux (as of 2018) accounts for memory mapped file memory.
  • -
-
-
-

IO Usage (iostat)

-

Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:

-
$ sudo iostat -xdm 2
-Linux 4.13.0-13-generic (hostname)     07/03/2018     _x86_64_    (8 CPU)
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.28    0.32    5.42     0.01     0.13    48.55     0.01    2.21    0.26    2.32   0.64   0.37
-sdb               0.00     0.00    0.00    0.00     0.00     0.00    79.34     0.00    0.20    0.20    0.00   0.16   0.00
-sdc               0.34     0.27    0.76    0.36     0.01     0.02    47.56     0.03   26.90    2.98   77.73   9.21   1.03
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.00    2.00   32.00     0.01     4.04   244.24     0.54   16.00    0.00   17.00   1.06   3.60
-sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00    0.00    0.00   0.00   0.00
-sdc               0.00    24.50    0.00  114.00     0.00    11.62   208.70     5.56   48.79    0.00   48.79   1.12  12.80
-
-
-

In this case we can see that /dev/sdc1 is a very slow drive, having an -await close to 50 milliseconds and an avgqu-sz close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user.

-

Important metrics to assess using iostat:

-
    -
  • Reads and writes per second. These numbers will change with the workload, -but generally speaking the more reads Cassandra has to do from disk the -slower Cassandra read latencies are. Large numbers of reads per second -can be a dead giveaway that the cluster has insufficient memory for OS -page caching.
  • -
  • Write throughput. Cassandra’s LSM model defers user writes and batches them -together, which means that throughput to the underlying medium is the most -important write metric for Cassandra.
  • -
  • Read latency (r_await). When Cassandra missed the OS page cache and reads -from SSTables, the read latency directly determines how fast Cassandra can -respond with the data.
  • -
  • Write latency. Cassandra is less sensitive to write latency except when it -syncs the commit log. This typically enters into the very high percentiles of -write latency.
  • -
-

Note that to get detailed latency breakdowns you will need a more advanced -tool such as bcc-tools.

-
-
-

OS page Cache Usage

-

As Cassandra makes heavy use of memory mapped files, the health of the -operating system’s Page Cache is -crucial to performance. Start by finding how much available cache is in the -system:

-
$ free -g
-              total        used        free      shared  buff/cache   available
-Mem:             15           9           2           0           3           5
-Swap:             0           0           0
-
-
-

In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap.

-

If you suspect that you are missing the OS page cache frequently you can use -advanced tools like cachestat or -vmtouch to dive deeper.

-
-
-

Network Latency and Reliability

-

Whenever Cassandra does writes or reads that involve other replicas, -LOCAL_QUORUM reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ping and traceroute or most -effectively mtr:

-
$ mtr -nr www.google.com
-Start: Sun Jul 22 13:10:28 2018
-HOST: hostname                     Loss%   Snt   Last   Avg  Best  Wrst StDev
-  1.|-- 192.168.1.1                0.0%    10    2.0   1.9   1.1   3.7   0.7
-  2.|-- 96.123.29.15               0.0%    10   11.4  11.0   9.0  16.4   1.9
-  3.|-- 68.86.249.21               0.0%    10   10.6  10.7   9.0  13.7   1.1
-  4.|-- 162.141.78.129             0.0%    10   11.5  10.6   9.6  12.4   0.7
-  5.|-- 162.151.78.253             0.0%    10   10.9  12.1  10.4  20.2   2.8
-  6.|-- 68.86.143.93               0.0%    10   12.4  12.6   9.9  23.1   3.8
-  7.|-- 96.112.146.18              0.0%    10   11.9  12.4  10.6  15.5   1.6
-  9.|-- 209.85.252.250             0.0%    10   13.7  13.2  12.5  13.9   0.0
- 10.|-- 108.170.242.238            0.0%    10   12.7  12.4  11.1  13.0   0.5
- 11.|-- 74.125.253.149             0.0%    10   13.4  13.7  11.8  19.2   2.1
- 12.|-- 216.239.62.40              0.0%    10   13.4  14.7  11.5  26.9   4.6
- 13.|-- 108.170.242.81             0.0%    10   14.4  13.2  10.9  16.0   1.7
- 14.|-- 72.14.239.43               0.0%    10   12.2  16.1  11.0  32.8   7.1
- 15.|-- 216.58.195.68              0.0%    10   25.1  15.3  11.1  25.1   4.8
-
-
-

In this example of mtr, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between 200ms and 3s of additional latency, so that -can be a common cause of latency issues.

-
-
-

Network Throughput

-

As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is iftop which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ccm cluster:

-
$ # remove the -t for ncurses instead of pure text
-$ sudo iftop -nNtP -i lo
-interface: lo
-IP address is: 127.0.0.1
-MAC address is: 00:00:00:00:00:00
-Listening on lo
-   # Host name (port/service if enabled)            last 2s   last 10s   last 40s cumulative
---------------------------------------------------------------------------------------------
-   1 127.0.0.1:58946                          =>      869Kb      869Kb      869Kb      217KB
-     127.0.0.3:9042                           <=         0b         0b         0b         0B
-   2 127.0.0.1:54654                          =>      736Kb      736Kb      736Kb      184KB
-     127.0.0.1:9042                           <=         0b         0b         0b         0B
-   3 127.0.0.1:51186                          =>      669Kb      669Kb      669Kb      167KB
-     127.0.0.2:9042                           <=         0b         0b         0b         0B
-   4 127.0.0.3:9042                           =>     3.30Kb     3.30Kb     3.30Kb       845B
-     127.0.0.1:58946                          <=         0b         0b         0b         0B
-   5 127.0.0.1:9042                           =>     2.79Kb     2.79Kb     2.79Kb       715B
-     127.0.0.1:54654                          <=         0b         0b         0b         0B
-   6 127.0.0.2:9042                           =>     2.54Kb     2.54Kb     2.54Kb       650B
-     127.0.0.1:51186                          <=         0b         0b         0b         0B
-   7 127.0.0.1:36894                          =>     1.65Kb     1.65Kb     1.65Kb       423B
-     127.0.0.5:7000                           <=         0b         0b         0b         0B
-   8 127.0.0.1:38034                          =>     1.50Kb     1.50Kb     1.50Kb       385B
-     127.0.0.2:7000                           <=         0b         0b         0b         0B
-   9 127.0.0.1:56324                          =>     1.50Kb     1.50Kb     1.50Kb       383B
-     127.0.0.1:7000                           <=         0b         0b         0b         0B
-  10 127.0.0.1:53044                          =>     1.43Kb     1.43Kb     1.43Kb       366B
-     127.0.0.4:7000                           <=         0b         0b         0b         0B
---------------------------------------------------------------------------------------------
-Total send rate:                                     2.25Mb     2.25Mb     2.25Mb
-Total receive rate:                                      0b         0b         0b
-Total send and receive rate:                         2.25Mb     2.25Mb     2.25Mb
---------------------------------------------------------------------------------------------
-Peak rate (sent/received/total):                     2.25Mb         0b     2.25Mb
-Cumulative (sent/received/total):                     576KB         0B      576KB
-============================================================================================
-
-
-

In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring.

-
-
-
-

Advanced tools

-

Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy.

-
-

bcc-tools

-

Most modern Linux distributions (kernels newer than 4.1) support bcc-tools for diving deep into performance problems. -First install bcc-tools, e.g. via apt on Debian:

-
$ apt install bcc-tools
-
-
-

Then you can use all the tools that bcc-tools contains. One of the most -useful tools is cachestat -(cachestat examples) -which allows you to determine exactly how many OS page cache hits and misses -are happening:

-
$ sudo /usr/share/bcc/tools/cachestat -T 1
-TIME        TOTAL   MISSES     HITS  DIRTIES   BUFFERS_MB  CACHED_MB
-18:44:08       66       66        0       64           88       4427
-18:44:09       40       40        0       75           88       4427
-18:44:10     4353       45     4308      203           88       4427
-18:44:11       84       77        7       13           88       4428
-18:44:12     2511       14     2497       14           88       4428
-18:44:13      101       98        3       18           88       4428
-18:44:14    16741        0    16741       58           88       4428
-18:44:15     1935       36     1899       18           88       4428
-18:44:16       89       34       55       18           88       4428
-
-
-

In this case there are not too many page cache MISSES which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node’s “hot” dataset. If you don’t have enough cache, MISSES will -be high and performance will be slow. If you have enough cache, MISSES will -be low and performance will be fast (as almost all reads are being served out -of memory).

-

You can also measure disk latency distributions using biolatency -(biolatency examples) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:

-
$ sudo /usr/share/bcc/tools/biolatency -D 10
-Tracing block device I/O... Hit Ctrl-C to end.
-
-
-disk = 'sda'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 12       |****************************************|
-        32 -> 63         : 9        |******************************          |
-        64 -> 127        : 1        |***                                     |
-       128 -> 255        : 3        |**********                              |
-       256 -> 511        : 7        |***********************                 |
-       512 -> 1023       : 2        |******                                  |
-
-disk = 'sdc'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 0        |                                        |
-        32 -> 63         : 0        |                                        |
-        64 -> 127        : 41       |************                            |
-       128 -> 255        : 17       |*****                                   |
-       256 -> 511        : 13       |***                                     |
-       512 -> 1023       : 2        |                                        |
-      1024 -> 2047       : 0        |                                        |
-      2048 -> 4095       : 0        |                                        |
-      4096 -> 8191       : 56       |*****************                       |
-      8192 -> 16383      : 131      |****************************************|
-     16384 -> 32767      : 9        |**                                      |
-
-
-

In this case most ios on the data drive (sdc) are fast, but many take -between 8 and 16 milliseconds.

-

Finally biosnoop (examples) -can be used to dive even deeper and see per IO latencies:

-
$ sudo /usr/share/bcc/tools/biosnoop | grep java | head
-0.000000000    java           17427  sdc     R  3972458600 4096      13.58
-0.000818000    java           17427  sdc     R  3972459408 4096       0.35
-0.007098000    java           17416  sdc     R  3972401824 4096       5.81
-0.007896000    java           17416  sdc     R  3972489960 4096       0.34
-0.008920000    java           17416  sdc     R  3972489896 4096       0.34
-0.009487000    java           17427  sdc     R  3972401880 4096       0.32
-0.010238000    java           17416  sdc     R  3972488368 4096       0.37
-0.010596000    java           17427  sdc     R  3972488376 4096       0.34
-0.011236000    java           17410  sdc     R  3972488424 4096       0.32
-0.011825000    java           17427  sdc     R  3972488576 16384      0.65
-... time passes
-8.032687000    java           18279  sdc     R  10899712  122880     3.01
-8.033175000    java           18279  sdc     R  10899952  8192       0.46
-8.073295000    java           18279  sdc     R  23384320  122880     3.01
-8.073768000    java           18279  sdc     R  23384560  8192       0.46
-
-
-

With biosnoop you see every single IO and how long they take. This data -can be used to construct the latency distributions in biolatency but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (128kb) of read_ahead_kb. To improve point read -performance you may may want to decrease read_ahead_kb on fast data volumes -such as SSDs while keeping the a higher value like 128kb value is probably -right for HDs. There are tradeoffs involved, see queue-sysfs docs for more -information, but regardless biosnoop is useful for understanding how -Cassandra uses drives.

-
-
-

vmtouch

-

Sometimes it’s useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -vmtouch.

-

First install it:

-
$ git clone https://github.com/hoytech/vmtouch.git
-$ cd vmtouch
-$ make
-
-
-

Then run it on the Cassandra data directory:

-
$ ./vmtouch /var/lib/cassandra/data/
-           Files: 312
-     Directories: 92
-  Resident Pages: 62503/64308  244M/251M  97.2%
-         Elapsed: 0.005657 seconds
-
-
-

In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn’t really matter unless reads are missing the -cache (per e.g. cachestat), in which case having -additional memory may help read performance.

-
-
-

CPU Flamegraphs

-

Cassandra often uses a lot of CPU, but telling what it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -CPU Flamegraphs -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a “compaction problem dropping -tombstones” or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -Java Flamegraphs.

-

Generally:

-
    -
  1. Enable the -XX:+PreserveFramePointer option in Cassandra’s -jvm.options configuation file. This has a negligible performance impact -but allows you actually see what Cassandra is doing.
  2. -
  3. Run perf to get some data.
  4. -
  5. Send that data through the relevant scripts in the FlameGraph toolset and -convert the data into a pretty flamegraph. View the resulting SVG image in -a browser or other image browser.
  6. -
-

For example just cloning straight off github we first install the -perf-map-agent to the location of our JVMs (assumed to be -/usr/lib/jvm):

-
$ sudo bash
-$ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
-$ cd /usr/lib/jvm
-$ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent
-$ cd perf-map-agent
-$ cmake .
-$ make
-
-
-

Now to get a flamegraph:

-
$ git clone --depth=1 https://github.com/brendangregg/FlameGraph
-$ sudo bash
-$ cd FlameGraph
-$ # Record traces of Cassandra and map symbols for all java processes
-$ perf record -F 49 -a -g -p <CASSANDRA PID> -- sleep 30; ./jmaps
-$ # Translate the data
-$ perf script > cassandra_stacks
-$ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \
-    ./flamegraph.pl --color=java --hash > cassandra_flames.svg
-
-
-

The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser.

-
-
-

Packet Capture

-

Sometimes you have to understand what queries a Cassandra node is performing -right now to troubleshoot an issue. For these times trusty packet capture -tools like tcpdump and Wireshark can be very helpful to dissect packet captures. -Wireshark even has native CQL support although it sometimes has -compatibility issues with newer Cassandra protocol releases.

-

To get a packet capture first capture some packets:

-
$ sudo tcpdump -U -s0 -i <INTERFACE> -w cassandra.pcap -n "tcp port 9042"
-
-
-

Now open it up with wireshark:

-
$ wireshark cassandra.pcap
-
-
-

If you don’t see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> Decode as -> select CQL from the -dropdown for port 9042.

-

If you don’t want to do this manually or use a GUI, you can also use something -like cqltrace to ease obtaining and -parsing CQL packet captures.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/.buildinfo b/src/doc/4.0-alpha3/.buildinfo deleted file mode 100644 index 109c50469..000000000 --- a/src/doc/4.0-alpha3/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 803b7af322ffb14196a3d6199245799f -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/4.0-alpha3/_downloads/54214fb4f99d59730d4cda283a1e9a75/stress-example.yaml b/src/doc/4.0-alpha3/_downloads/54214fb4f99d59730d4cda283a1e9a75/stress-example.yaml deleted file mode 100644 index 17161af27..000000000 --- a/src/doc/4.0-alpha3/_downloads/54214fb4f99d59730d4cda283a1e9a75/stress-example.yaml +++ /dev/null @@ -1,44 +0,0 @@ -spacenam: example # idenitifier for this spec if running with multiple yaml files -keyspace: example - -# Would almost always be network topology unless running something locally -keyspace_definition: | - CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -table: staff_activities - -# The table under test. Start with a partition per staff member -# Is this a good idea? -table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when) - ) - -columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -insert: - # we only update a single partition in any given insert - partitions: fixed(1) - # we want to insert a single row per partition and we have between 20 and 500 - # rows per partition - select: fixed(1)/500 - batchtype: UNLOGGED # Single partition unlogged batches are essentially noops - -queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - diff --git a/src/doc/4.0-alpha3/_downloads/87a3a4aa5557ff2196758c8fe2aecc51/stress-lwt-example.yaml b/src/doc/4.0-alpha3/_downloads/87a3a4aa5557ff2196758c8fe2aecc51/stress-lwt-example.yaml deleted file mode 100644 index fc5db0814..000000000 --- a/src/doc/4.0-alpha3/_downloads/87a3a4aa5557ff2196758c8fe2aecc51/stress-lwt-example.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Keyspace Name -keyspace: stresscql - -# The CQL for creating a keyspace (optional if it already exists) -# Would almost always be network topology unless running something locall -keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -# Table name -table: blogposts - -# The CQL for creating a table you wish to stress (optional if it already exists) -table_definition: | - CREATE TABLE blogposts ( - domain text, - published_date timeuuid, - url text, - author text, - title text, - body text, - PRIMARY KEY(domain, published_date) - ) WITH CLUSTERING ORDER BY (published_date DESC) - AND compaction = { 'class':'LeveledCompactionStrategy' } - AND comment='A table to hold blog posts' - -### Column Distribution Specifications ### - -columnspec: - - name: domain - size: gaussian(5..100) #domain names are relatively short - population: uniform(1..10M) #10M possible domains to pick from - - - name: published_date - cluster: fixed(1000) #under each domain we will have max 1000 posts - - - name: url - size: uniform(30..300) - - - name: title #titles shouldn't go beyond 200 chars - size: gaussian(10..200) - - - name: author - size: uniform(5..20) #author names should be short - - - name: body - size: gaussian(100..5000) #the body of the blog post can be long - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # Our partition key is the domain so only insert one per batch - - select: fixed(1)/1000 # We have 1000 posts per domain so 1/1000 will allow 1 post per batch - - batchtype: UNLOGGED # Unlogged batches - - -# -# A list of queries you wish to run against the schema -# -queries: - singlepost: - cql: select * from blogposts where domain = ? LIMIT 1 - fields: samerow - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow diff --git a/src/doc/4.0-alpha3/_images/data_modeling_chebotko_logical.png b/src/doc/4.0-alpha3/_images/data_modeling_chebotko_logical.png deleted file mode 100644 index e54b5f274..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_chebotko_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_chebotko_physical.png b/src/doc/4.0-alpha3/_images/data_modeling_chebotko_physical.png deleted file mode 100644 index bfdaec552..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_chebotko_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_bucketing.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_bucketing.png deleted file mode 100644 index 8b53e38f9..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_bucketing.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_erd.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_erd.png deleted file mode 100644 index e86fe68f3..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_erd.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_logical.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_logical.png deleted file mode 100644 index e920f1248..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_physical.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_physical.png deleted file mode 100644 index 2d20a6ddb..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_queries.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_queries.png deleted file mode 100644 index 2434db39d..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_queries.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_hotel_relational.png b/src/doc/4.0-alpha3/_images/data_modeling_hotel_relational.png deleted file mode 100644 index 43e784eea..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_hotel_relational.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_reservation_logical.png b/src/doc/4.0-alpha3/_images/data_modeling_reservation_logical.png deleted file mode 100644 index 0460633b6..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_reservation_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/data_modeling_reservation_physical.png b/src/doc/4.0-alpha3/_images/data_modeling_reservation_physical.png deleted file mode 100644 index 1e6e76c16..000000000 Binary files a/src/doc/4.0-alpha3/_images/data_modeling_reservation_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_commit.png b/src/doc/4.0-alpha3/_images/docs_commit.png deleted file mode 100644 index d90d96a88..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_commit.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_create_branch.png b/src/doc/4.0-alpha3/_images/docs_create_branch.png deleted file mode 100644 index a04cb54f3..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_create_branch.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_create_file.png b/src/doc/4.0-alpha3/_images/docs_create_file.png deleted file mode 100644 index b51e37035..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_create_file.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_editor.png b/src/doc/4.0-alpha3/_images/docs_editor.png deleted file mode 100644 index 5b9997bcc..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_editor.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_fork.png b/src/doc/4.0-alpha3/_images/docs_fork.png deleted file mode 100644 index 20a592a98..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_fork.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_pr.png b/src/doc/4.0-alpha3/_images/docs_pr.png deleted file mode 100644 index 211eb25ef..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_pr.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/docs_preview.png b/src/doc/4.0-alpha3/_images/docs_preview.png deleted file mode 100644 index 207f0ac43..000000000 Binary files a/src/doc/4.0-alpha3/_images/docs_preview.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug0.png b/src/doc/4.0-alpha3/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug1.png b/src/doc/4.0-alpha3/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug2.png b/src/doc/4.0-alpha3/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug3.png b/src/doc/4.0-alpha3/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug4.png b/src/doc/4.0-alpha3/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug5.png b/src/doc/4.0-alpha3/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/eclipse_debug6.png b/src/doc/4.0-alpha3/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/4.0-alpha3/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_images/example-stress-graph.png b/src/doc/4.0-alpha3/_images/example-stress-graph.png deleted file mode 100644 index a65b08b16..000000000 Binary files a/src/doc/4.0-alpha3/_images/example-stress-graph.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_sources/architecture/dynamo.rst.txt b/src/doc/4.0-alpha3/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index 12c586e2c..000000000 --- a/src/doc/4.0-alpha3/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,164 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo ------- - -.. _gossip: - -Gossip -^^^^^^ - -.. todo:: todo - -Failure Detection -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -Token Ring/Ranges -^^^^^^^^^^^^^^^^^ - -.. todo:: todo - -.. _replication-strategy: - -Replication -^^^^^^^^^^^ - -The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are :ref:`simple-strategy` and :ref:`network-topology-strategy`. - -.. _simple-strategy: - -SimpleStrategy -~~~~~~~~~~~~~~ - -SimpleStrategy allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _network-topology-strategy: - -NetworkTopologyStrategy -~~~~~~~~~~~~~~~~~~~~~~~ - -NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially `surprising -implications `_. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single "rack". - -.. _transient-replication: - -Transient Replication -~~~~~~~~~~~~~~~~~~~~~ - -Transient replication allows you to configure a subset of replicas to only replicate data that hasn't been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn't been incrementally repaired. - -To use transient replication, you first need to enable it in ``cassandra.yaml``. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as ``/ RF``, where ``W`` is the write consistency level, ``R`` is the -read consistency level, and ``RF`` is the replication factor. For example, if ``RF = 3``, a ``QUORUM`` request will -require responses from at least two of the three replicas. If ``QUORUM`` is used for both writes and reads, at least -one of the replicas is guaranteed to participate in *both* the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter. - -If this type of strong consistency isn't required, lower consistency levels like ``ONE`` may be used to improve -throughput, latency, and availability. diff --git a/src/doc/4.0-alpha3/_sources/architecture/guarantees.rst.txt b/src/doc/4.0-alpha3/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index c0b58d880..000000000 --- a/src/doc/4.0-alpha3/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Guarantees ----------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha3/_sources/architecture/index.rst.txt b/src/doc/4.0-alpha3/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/4.0-alpha3/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/4.0-alpha3/_sources/architecture/overview.rst.txt b/src/doc/4.0-alpha3/_sources/architecture/overview.rst.txt deleted file mode 100644 index 005b15b94..000000000 --- a/src/doc/4.0-alpha3/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Overview --------- - -.. todo:: todo diff --git a/src/doc/4.0-alpha3/_sources/architecture/storage_engine.rst.txt b/src/doc/4.0-alpha3/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index 23b738de7..000000000 --- a/src/doc/4.0-alpha3/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables. - -All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the "commitlog_segment_size_in_mb" option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running "nodetool drain" before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup. - -- ``commitlog_segment_size_in_mb``: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. - -***NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*** - -*Default Value:* 32 - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied. - -- ``commitlog_sync``: may be either “periodic” or “batch.” - - - ``batch``: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait "commitlog_sync_batch_window_in_ms" milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason. - - - ``commitlog_sync_batch_window_in_ms``: Time to wait between "batch" fsyncs - *Default Value:* 2 - - - ``periodic``: In periodic mode, writes are immediately ack'ed, and the CommitLog is simply synced every "commitlog_sync_period_in_ms" milliseconds. - - - ``commitlog_sync_period_in_ms``: Time to wait between "periodic" fsyncs - *Default Value:* 10000 - -*Default Value:* batch - -*** NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using "batch" mode, it is recommended to store commitlogs in a separate, dedicated device.** - - -- ``commitlog_directory``: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -- ``commitlog_compression``: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported. - -(Default Value: (complex option):: - - # - class_name: LZ4Compressor - # parameters: - # - - -- ``commitlog_total_space_in_mb``: Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume. - -*Default Value:* 8192 - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. - -SSTable Versions -^^^^^^^^^^^^^^^^ - -This section was created using the following -`gist `_ -which utilized this original -`source `_. - -The version numbers, to date are: - -Version 0 -~~~~~~~~~ - -* b (0.7.0): added version to sstable filenames -* c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings -* d (0.7.0): row size in data component becomes a long instead of int -* e (0.7.0): stores undecorated keys in data and index components -* f (0.7.0): switched bloom filter implementations in data component -* g (0.8): tracks flushed-at context in metadata component - -Version 1 -~~~~~~~~~ - -* h (1.0): tracks max client timestamp in metadata component -* hb (1.0.3): records compression ration in metadata component -* hc (1.0.4): records partitioner in metadata component -* hd (1.0.10): includes row tombstones in maxtimestamp -* he (1.1.3): includes ancestors generation in metadata component -* hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) -* ia (1.2.0): - - * column indexes are promoted to the index file - * records estimated histogram of deletion times in tombstones - * bloom filter (keys and columns) upgraded to Murmur3 -* ib (1.2.1): tracks min client timestamp in metadata component -* ic (1.2.5): omits per-row bloom filter of column names - -Version 2 -~~~~~~~~~ - -* ja (2.0.0): - - * super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format) - * tracks max local deletiontime in sstable metadata - * records bloom_filter_fp_chance in metadata component - * remove data size and column count from data file (CASSANDRA-4180) - * tracks max/min column values (according to comparator) -* jb (2.0.1): - - * switch from crc32 to adler32 for compression checksums - * checksum the compressed data -* ka (2.1.0): - - * new Statistics.db file format - * index summaries can be downsampled and the sampling level is persisted - * switch uncompressed checksums to adler32 - * tracks presense of legacy (local and remote) counter shards -* la (2.2.0): new file name format -* lb (2.2.7): commit log lower bound included - -Version 3 -~~~~~~~~~ - -* ma (3.0.0): - - * swap bf hash order - * store rows natively -* mb (3.0.7, 3.7): commit log lower bound included -* mc (3.0.8, 3.9): commit log intervals included - -Example Code -~~~~~~~~~~~~ - -The following example is useful for finding all sstables that do not match the "ib" SSTable version - -.. code-block:: bash - - find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots" diff --git a/src/doc/4.0-alpha3/_sources/bugs.rst.txt b/src/doc/4.0-alpha3/_sources/bugs.rst.txt deleted file mode 100644 index 32d676f9d..000000000 --- a/src/doc/4.0-alpha3/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs -============== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``cassandra`` :ref:`Slack room `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/4.0-alpha3/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/4.0-alpha3/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index 77a8c9978..000000000 --- a/src/doc/4.0-alpha3/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,2049 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -Replica factor is determined via the replication strategy used by the specified -keyspace. - -*Default Value:* KEYSPACE - -``allocate_tokens_for_local_replication_factor`` ------------------------------------------------- -*This option is commented out by default.* - -Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS. - -*Default Value:* 3 - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``network_authorizer`` ----------------------- - -Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}. - -- AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization. -- CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllNetworkAuthorizer - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using. - -The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down the node and kill the JVM, so the node can be replaced. - -stop - shut down the node, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic", "group", or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed. - - -*Default Value:* 2 - -``commitlog_sync_group_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes. - - -*Default Value:* 1000 - -``commitlog_sync`` ------------------- - -the default option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``periodic_commitlog_sync_lag_block_in_ms`` -------------------------------------------- -*This option is commented out by default.* - -When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete. - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1:7000" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_space_in_mb`` ------------------------------- -*This option is commented out by default.* - -Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_frame_block_size_in_kb`` -------------------------------------------- -*This option is commented out by default.* - -If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed. - -*Default Value:* 32 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_allow_older_protocols`` ------------------------------------------- - -Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored. - -*Default Value:* true - -``native_transport_idle_timeout_in_ms`` ---------------------------------------- -*This option is commented out by default.* - -Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period. - -Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side. - -Idle connection timeouts are disabled by default. - -*Default Value:* 60000 - -``rpc_address`` ---------------- - -The address or interface to bind the native transport server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``concurrent_validations`` --------------------------- -*This option is commented out by default.* - -Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default) - -*Default Value:* 0 - -``concurrent_materialized_view_builders`` ------------------------------------------ - -Number of simultaneous materialized view builder tasks to allow. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_entire_sstables`` --------------------------- -*This option is commented out by default.* - -When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696. - -*Default Value:* true - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms. - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms. - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``internode_application_send_queue_capacity_in_bytes`` ------------------------------------------------------- -*This option is commented out by default.* - -Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details. - -The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000 - -The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000 - -The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000 - -Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received. - -The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth. - -The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster. - - -*Default Value:* 4194304 #4MiB - -``internode_application_send_queue_reserve_endpoint_capacity_in_bytes`` ------------------------------------------------------------------------ -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_send_queue_reserve_global_capacity_in_bytes`` ---------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``internode_application_receive_queue_capacity_in_bytes`` ---------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 4194304 #4MiB - -``internode_application_receive_queue_reserve_endpoint_capacity_in_bytes`` --------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_receive_queue_reserve_global_capacity_in_bytes`` ------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``slow_query_log_timeout_in_ms`` --------------------------------- - - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes. - -*Default Value:* false - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``streaming_connections_per_host`` ----------------------------------- -*This option is commented out by default.* - -Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files). - -*Default Value:* 1 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html - -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - # set to true for allowing secure incoming connections - enabled: false - # If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port - optional: false - # if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used - # during upgrade to 4.0; otherwise, set to false. - enable_legacy_ssl_storage_port: false - # on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true. - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client-to-server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``gc_warn_threshold_in_ms`` ---------------------------- -*This option is commented out by default.* - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature. - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``ideal_consistency_level`` ---------------------------- -*This option is commented out by default.* - -Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability. - -*Default Value:* EACH_QUORUM - -``automatic_sstable_upgrade`` ------------------------------ -*This option is commented out by default.* - -Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version - -*Default Value:* false - -``max_concurrent_automatic_sstable_upgrades`` ---------------------------------------------- -*This option is commented out by default.* -Limit the number of concurrent sstable upgrades - -*Default Value:* 1 - -``audit_logging_options`` -------------------------- - -Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options. - -``full_query_logging_options`` ------------------------------- -*This option is commented out by default.* - - -default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog - -``corrupted_tombstone_strategy`` --------------------------------- -*This option is commented out by default.* - -validate tombstones on reads and compaction -can be either "disabled", "warn" or "exception" - -*Default Value:* disabled - -``diagnostic_events_enabled`` ------------------------------ - -Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX. - -*Default Value:* false - -``native_transport_flush_in_batches_legacy`` --------------------------------------------- -*This option is commented out by default.* - -Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating. - -*Default Value:* false - -``repaired_data_tracking_for_range_reads_enabled`` --------------------------------------------------- - -Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads - -*Default Value:* false - -``repaired_data_tracking_for_partition_reads_enabled`` ------------------------------------------------------- - -*Default Value:* false - -``report_unconfirmed_repaired_data_mismatches`` ------------------------------------------------ -If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_transient_replication`` --------------------------------- - -Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use. - -*Default Value:* false diff --git a/src/doc/4.0-alpha3/_sources/configuration/index.rst.txt b/src/doc/4.0-alpha3/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/4.0-alpha3/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/4.0-alpha3/_sources/contactus.rst.txt b/src/doc/4.0-alpha3/_sources/contactus.rst.txt deleted file mode 100644 index 3ed9004dd..000000000 --- a/src/doc/4.0-alpha3/_sources/contactus.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or :ref:`Slack rooms `. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _slack: - -Slack ------ -To chat with developers or users in real-time, join our rooms on `ASF Slack `__: - -- ``cassandra`` - for user questions and general discussions. -- ``cassandra-dev`` - strictly for questions or discussions related to Cassandra development. - diff --git a/src/doc/4.0-alpha3/_sources/cql/appendices.rst.txt b/src/doc/4.0-alpha3/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/4.0-alpha3/_sources/cql/changes.rst.txt b/src/doc/4.0-alpha3/_sources/cql/changes.rst.txt deleted file mode 100644 index 6691f156a..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,211 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.5 -^^^^^ - -- Adds support for arithmetic operators (:jira:`11935`) -- Adds support for ``+`` and ``-`` operations on dates (:jira:`11936`) -- Adds ``currentTimestamp``, ``currentDate``, ``currentTime`` and ``currentTimeUUID`` functions (:jira:`13132`) - - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/4.0-alpha3/_sources/cql/ddl.rst.txt b/src/doc/4.0-alpha3/_sources/cql/ddl.rst.txt deleted file mode 100644 index afb130e48..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,788 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -``SimpleStrategy`` -"""""""""""""""""" - -A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -``NetworkTopologyStrategy``. ``SimpleStrategy`` supports a single mandatory argument: - -========================= ====== ======= ============================================= -sub-option type since description -========================= ====== ======= ============================================= -``'replication_factor'`` int all The number of replicas to store per range -========================= ====== ======= ============================================= - -``NetworkTopologyStrategy`` -""""""""""""""""""""""""""" - -A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options: - -===================================== ====== ====== ============================================= -sub-option type since description -===================================== ====== ====== ============================================= -``''`` int all The number of replicas to store per range in - the provided datacenter. -``'replication_factor'`` int 4.0 The number of replicas to use as a default - per datacenter if not specifically provided. - Note that this always defers to existing - definitions or explicit datacenter settings. - For example, to have three replicas per - datacenter, supply this with a value of 3. -===================================== ====== ====== ============================================= - -Note that when ``ALTER`` ing keyspaces and supplying ``replication_factor``, -auto-expansion will only *add* new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying ``replication_factor``, -explicitly zero out the datacenter you want to have zero replicas. - -An example of auto-expanding datacenters with two datacenters: ``DC1`` and ``DC2``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true; - - -An example of auto-expanding and overriding a datacenter:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true; - -An example that excludes a datacenter while using ``replication_factor``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ; - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true; - -If :ref:`transient replication ` has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format ``'/'`` - -For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:: - - CREATE KEYSPACE some_keysopace - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'}; - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records'; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, b, c) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``speculative_retry`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``additional_write_policy`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``memtable_flush_period_in_ms``| *simple* | 0 | Time (in ms) before Cassandra flushes memtables to disk. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair`` | *simple* | BLOCKING | Sets read repair behavior (see below) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _speculative-retry-options: - -Speculative retry options -######################### - -By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ``ONE``, a quorum for ``QUORUM``, and so on. -``speculative_retry`` determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. ``additional_write_policy`` specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive): - -============================ ======================== ============================================================================= - Format Example Description -============================ ======================== ============================================================================= - ``XPERCENTILE`` 90.5PERCENTILE Coordinators record average per-table response times for all replicas. - If a replica takes longer than ``X`` percent of this table's average - response time, the coordinator queries an additional replica. - ``X`` must be between 0 and 100. - ``XP`` 90.5P Synonym for ``XPERCENTILE`` - ``Yms`` 25ms If a replica takes more than ``Y`` milliseconds to respond, - the coordinator queries an additional replica. - ``MIN(XPERCENTILE,YMS)`` MIN(99PERCENTILE,35MS) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is lower at the time of calculation. - Parameters are ``XPERCENTILE``, ``XP``, or ``Yms``. - This is helpful to help protect against a single slow instance; in the - happy case the 99th percentile is normally lower than the specified - fixed value however, a slow host may skew the percentile very high - meaning the slower the cluster gets, the higher the value of the percentile, - and the higher the calculated time used to determine if we should - speculate or not. This allows us to set an upper limit that we want to - speculate at, but avoid skewing the tail latencies by speculating at the - lower value when the percentile is less than the specified fixed upper bound. - ``MAX(XPERCENTILE,YMS)`` MAX(90.5P,25ms) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is higher at the time of calculation. - ``ALWAYS`` Coordinators always query all replicas. - ``NEVER`` Coordinators never query additional replicas. -============================ =================== ============================================================================= - -This setting does not affect reads with consistency level ``ALL`` because they already query all replicas. - -Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default ``99PERCENTILE``. - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The default supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor and DeflateCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - ``enabled`` true Enable/disable sstable compression. - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read - ``crc_check_chance`` 1.0 When compression is enabled, each compressed block includes a checksum of - that block for the purpose of detecting disk bitrot and avoiding the - propagation of corruption to other replica. This option defines the - probability with which those checksums are checked during read. By default - they are always checked. Set to 0 to disable checksum checking and to 0.5 for - instance to check them every other read | -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -The ``caching`` options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Read Repair options -################### - -The ``read_repair`` options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back - in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of - replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. - Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement - is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it - is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a - batch, but then select a single row by specifying the clustering column in a SELECT statement. - -The available read repair settings are: - -Blocking -```````` -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity - -None -```` - -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table'; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/4.0-alpha3/_sources/cql/definitions.rst.txt b/src/doc/4.0-alpha3/_sources/cql/definitions.rst.txt deleted file mode 100644 index 3df6f2099..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,234 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `arithmetic_operation` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - arithmetic_operation: '-' `term` | `term` ('+' | '-' | '*' | '/' | '%') `term` - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- An arithmetic operation between terms. see :ref:`the section on arithmetic operations ` -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/4.0-alpha3/_sources/cql/dml.rst.txt b/src/doc/4.0-alpha3/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/4.0-alpha3/_sources/cql/functions.rst.txt b/src/doc/4.0-alpha3/_sources/cql/functions.rst.txt deleted file mode 100644 index 965125a79..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,581 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``currentTimeUUID`` is an alias of ``now``. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Datetime functions -`````````````````` - -Retrieving the current date/time -################################ - -The following functions can be used to retrieve the date/time at the time where the function is invoked: - -===================== =============== - Function name Output type -===================== =============== - ``currentTimestamp`` ``timestamp`` - ``currentDate`` ``date`` - ``currentTime`` ``time`` - ``currentTimeUUID`` ``timeUUID`` -===================== =============== - -For example the last 2 days of data can be retrieved using:: - - SELECT * FROM myTable WHERE date >= currentDate() - 2d - -Time conversion functions -######################### - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/4.0-alpha3/_sources/cql/index.rst.txt b/src/doc/4.0-alpha3/_sources/cql/index.rst.txt deleted file mode 100644 index b4c21cf6c..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - operators - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/4.0-alpha3/_sources/cql/indexes.rst.txt b/src/doc/4.0-alpha3/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/4.0-alpha3/_sources/cql/json.rst.txt b/src/doc/4.0-alpha3/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/4.0-alpha3/_sources/cql/mvs.rst.txt b/src/doc/4.0-alpha3/_sources/cql/mvs.rst.txt deleted file mode 100644 index 200090a60..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. note:: By default, materialized views are built in a single thread. The initial build can be parallelized by - increasing the number of threads specified by the property ``concurrent_materialized_view_builders`` in - ``cassandra.yaml``. This property can also be manipulated at runtime through both JMX and the - ``setconcurrentviewbuilders`` and ``getconcurrentviewbuilders`` nodetool commands. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. - -MV Limitations -``````````````` - -.. Note:: Removal of columns not selected in the Materialized View (via ``UPDATE base SET unselected_column = null`` or - ``DELETE unselected_column FROM base``) may shadow missed updates to other columns received by hints or repair. - For this reason, we advise against doing deletions on base columns not selected in views until this is - fixed on CASSANDRA-13826. diff --git a/src/doc/4.0-alpha3/_sources/cql/operators.rst.txt b/src/doc/4.0-alpha3/_sources/cql/operators.rst.txt deleted file mode 100644 index 1faf0d045..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/operators.rst.txt +++ /dev/null @@ -1,74 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _arithmetic_operators: - -Arithmetic Operators --------------------- - -CQL supports the following operators: - -=============== ======================================================================================================= - Operator Description -=============== ======================================================================================================= - \- (unary) Negates operand - \+ Addition - \- Substraction - \* Multiplication - / Division - % Returns the remainder of a division -=============== ======================================================================================================= - -.. _number-arithmetic: - -Number Arithmetic -^^^^^^^^^^^^^^^^^ - -All arithmetic operations are supported on numeric types or counters. - -The return type of the operation will be based on the operand types: - -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - left/right tinyint smallint int bigint counter float double varint decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - **tinyint** tinyint smallint int bigint bigint float double varint decimal - **smallint** smallint smallint int bigint bigint float double varint decimal - **int** int int int bigint bigint float double varint decimal - **bigint** bigint bigint bigint bigint bigint double double varint decimal - **counter** bigint bigint bigint bigint bigint double double varint decimal - **float** float float float double double float double decimal decimal - **double** double double double double double double double decimal decimal - **varint** varint varint varint decimal decimal decimal decimal decimal decimal - **decimal** decimal decimal decimal decimal decimal decimal decimal decimal decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - -``*``, ``/`` and ``%`` operators have a higher precedence level than ``+`` and ``-`` operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression. - -.. _datetime--arithmetic: - -Datetime Arithmetic -^^^^^^^^^^^^^^^^^^^ - -A ``duration`` can be added (+) or substracted (-) from a ``timestamp`` or a ``date`` to create a new -``timestamp`` or ``date``. So for instance:: - - SELECT * FROM myTable WHERE t = '2017-01-01' - 2d - -will select all the records with a value of ``t`` which is in the last 2 days of 2016. diff --git a/src/doc/4.0-alpha3/_sources/cql/security.rst.txt b/src/doc/4.0-alpha3/_sources/cql/security.rst.txt deleted file mode 100644 index 429a1ef0d..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/security.rst.txt +++ /dev/null @@ -1,538 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - :| ACCESS TO DATACENTERS `set_literal` - :| ACCESS TO ALL DATACENTERS - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'}; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ``ACCESS TO ALL DATACENTERS`` can be used for -explicitness, but there's no functional difference. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ``ACCESS TO ALL DATACENTERS`` clause. - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. note:: DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain - connected and will retain the ability to perform any database actions which do not require :ref:`authorization`. - However, if authorization is enabled, :ref:`permissions` of the dropped role are also revoked, - subject to the :ref:`caching options` configured in :ref:`cassandra.yaml`. - Should a dropped role be subsequently recreated and have new :ref:`permissions` or - :ref:`roles` granted to it, any client sessions still connected will acquire the newly granted - permissions and roles. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -Because of their function in normal driver operations, certain tables cannot have their `SELECT` permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:: - -* `system_schema.keyspaces` -* `system_schema.columns` -* `system_schema.tables` -* `system.local` -* `system.peers` - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/4.0-alpha3/_sources/cql/triggers.rst.txt b/src/doc/4.0-alpha3/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/4.0-alpha3/_sources/cql/types.rst.txt b/src/doc/4.0-alpha3/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/4.0-alpha3/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_conceptual.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_conceptual.rst.txt deleted file mode 100644 index 8749b799e..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_conceptual.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. conceptual_data_modeling - -Conceptual Data Modeling -^^^^^^^^^^^^^^^^^^^^^^^^ - -First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra. - -Let's use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about. - -For example, let's use a domain that is easily understood and that -everyone can relate to: making hotel reservations. - -The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances. - -The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection. - -.. image:: images/data_modeling_hotel_erd.png - -Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_logical.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_logical.rst.txt deleted file mode 100644 index 27fa4beb7..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_logical.rst.txt +++ /dev/null @@ -1,219 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Logical Data Modeling -===================== - -Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model. - -To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with ``_by_``. For example, ``hotels_by_poi``. - -Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering. - -The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads. - -Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static. - -Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models. - -Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model. - -.. image:: images/data_modeling_chebotko_logical.png - -Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as **K** for partition key -columns and **C**\ ↑ or **C**\ ↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support. - -Hotel Logical Data Model ------------------------- - -The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you'll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access. - -.. image:: images/data_modeling_hotel_logical.png - -Let’s explore the details of each of these tables. - -The first query Q1 is to find hotels near a point of interest, so you’ll -call this table ``hotels_by_poi``. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search. - -You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column. - -An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data. - -Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the ``hotels_by_poi`` table, but you added -only those attributes that were required by the application workflow. - -From the workflow diagram, you know that the ``hotels_by_poi`` table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -``hotel_id`` from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called ``hotels``. - -Another option would have been to store a set of ``poi_names`` in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application. - -Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -``pois_by_hotel`` table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness. - -At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the ``hotel_id`` as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the ``available_rooms_by_hotel_date`` table. - -To support searching over a range, use :ref:`clustering columns -` to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important. - -The design of the ``available_rooms_by_hotel_date`` table is an instance -of the **wide partition** pattern. This -pattern is sometimes called the **wide row** pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query. - -In order to round out the shopping portion of the data model, add the -``amenities_by_room`` table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates. - -Reservation Logical Data Model ------------------------------- - -Now let's switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys. - -.. image:: images/data_modeling_reservation_logical.png - -In order to satisfy Q6, the ``reservations_by_guest`` table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well. - -Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on. - -The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date. - -Finally, you create a ``guests`` table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the ``guests`` table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example. - - -Patterns and Anti-Patterns --------------------------- - -As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern. - -The **time series** pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments. - -The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application. - -One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now :ref:`tombstones ` that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance. - -The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_physical.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_physical.rst.txt deleted file mode 100644 index 758400496..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_physical.rst.txt +++ /dev/null @@ -1,117 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Physical Data Modeling -====================== - -Once you have a logical data model defined, creating the physical model -is a relatively simple process. - -You walk through each of the logical model tables, assigning types to -each item. You can use any valid :ref:`CQL data type `, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design. - -After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let's cover the data -modeling process in more detail by working through an example. - -Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table. - -.. image:: images/data_modeling_chebotko_physical.png - -The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern. - -Hotel Physical Data Model -------------------------- - -Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -``hotel`` keyspace to contain tables for hotel and availability -data, and a ``reservation`` keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns. - -For the ``hotels`` table, use Cassandra’s ``text`` type to -represent the hotel’s ``id``. For the address, create an -``address`` user defined type. Use the ``text`` type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries. - -While it would make sense to use the ``uuid`` type for attributes such -as the ``hotel_id``, this document uses mostly ``text`` attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like "AZ123" or "NY229". This example uses these values -for ``hotel_ids``, while acknowledging they are not necessarily globally -unique. - -You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these ``uuids`` as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type. - -As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure: - -.. image:: images/data_modeling_hotel_physical.png - -Note that the ``address`` type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the ``hotels`` and ``hotels_by_poi`` tables. - -User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the ``address`` -user-defined type. This can reduce complexity in the design. - -Remember that the scope of a UDT is the keyspace in which it is defined. -To use ``address`` in the ``reservation`` keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design. - -Reservation Physical Data Model -------------------------------- - -Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you're going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature. - -.. image:: images/data_modeling_reservation_physical.png - -Note that the ``address`` type is reproduced in this keyspace and -``guest_id`` is modeled as a ``uuid`` type in all of the tables. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_queries.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_queries.rst.txt deleted file mode 100644 index d0119944f..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_queries.rst.txt +++ /dev/null @@ -1,85 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Defining Application Queries -============================ - -Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following: - -- Q1. Find hotels near a given point of interest. - -- Q2. Find information about a given hotel, such as its name and - location. - -- Q3. Find points of interest near a given hotel. - -- Q4. Find an available room in a given date range. - -- Q5. Find the rate and amenities for a room. - -It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example. - -Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases. - -You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations: - -- Q6. Lookup a reservation by confirmation number. - -- Q7. Lookup a reservation by hotel, date, and guest name. - -- Q8. Lookup all reservations by guest name. - -- Q9. View guest details. - -All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries. - -.. image:: images/data_modeling_hotel_queries.png - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_rdbms.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_rdbms.rst.txt deleted file mode 100644 index 7d67d69fc..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_rdbms.rst.txt +++ /dev/null @@ -1,171 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -RDBMS Design -============ - -When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables. - -The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation). - -.. image:: images/data_modeling_hotel_relational.png - -.. design_differences_between_rdbms_and_cassandra - -Design Differences Between RDBMS and Cassandra -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database. - -No joins -~~~~~~~~ - -You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead. - -No referential integrity -~~~~~~~~~~~~~~~~~~~~~~~~ - -Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available. - -Denormalization -~~~~~~~~~~~~~~~ - -In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances. - -A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems. - -In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it. - -Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as :ref:`materialized views ` -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table. - -Query-first design -~~~~~~~~~~~~~~~~~~ - -Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true. - -By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them. - -Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS. - -Designing for optimal storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table. - -A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance. - -Sorting is a design decision -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In an RDBMS, you can easily change the order in which records are -returned to you by using ``ORDER BY`` in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns. - -In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the ``CREATE TABLE`` command. The CQL ``SELECT`` statement does support -``ORDER BY`` semantics, but only in the order specified by the -clustering columns. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_refining.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_refining.rst.txt deleted file mode 100644 index 13a276ed7..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_refining.rst.txt +++ /dev/null @@ -1,218 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. role:: raw-latex(raw) - :format: latex -.. - -Evaluating and Refining Data Models -=================================== - -Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance. - -Calculating Partition Size --------------------------- - -The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit. - -In order to calculate the size of partitions, use the following -formula: - -.. math:: N_v = N_r (N_c - N_{pk} - N_s) + N_s - -The number of values (or cells) in the partition (N\ :sub:`v`) is equal to -the number of static columns (N\ :sub:`s`) plus the product of the number -of rows (N\ :sub:`r`) and the number of of values per row. The number of -values per row is defined as the number of columns (N\ :sub:`c`) minus the -number of primary key columns (N\ :sub:`pk`) and static columns -(N\ :sub:`s`). - -The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast. - -Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the ``available_rooms_by_hotel_date`` table. The table has -four columns total (N\ :sub:`c` = 4), including three primary key columns -(N\ :sub:`pk` = 3) and no static columns (N\ :sub:`s` = 0). Plugging these -values into the formula, the result is: - -.. math:: N_v = N_r (4 - 3 - 0) + 0 = 1N_r - -Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let's assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel. - -Since there is a partition for each hotel, the estimated number of rows -per partition is as follows: - -.. math:: N_r = 100 rooms/hotel \times 730 days = 73,000 rows - -This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you'll see how to do shortly. - -When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems. - -Calculating Size on Disk ------------------------- - -In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -S\ :sub:`t` of a partition: - -.. math:: S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) + - -.. math:: N_v\times sizeOf\big(t_{avg}\big) - -This is a bit more complex than the previous formula, but let's break it -down a bit at a time. Let’s take a look at the notation first: - -- In this formula, c\ :sub:`k` refers to partition key columns, - c\ :sub:`s` to static columns, c\ :sub:`r` to regular columns, and - c\ :sub:`c` to clustering columns. - -- The term t\ :sub:`avg` refers to the average number of bytes of - metadata stored per cell, such as timestamps. It is typical to use an - estimate of 8 bytes for this value. - -- You'll recognize the number of rows N\ :sub:`r` and number of values - N\ :sub:`v` from previous calculations. - -- The **sizeOf()** function refers to the size in bytes of the CQL data - type of each referenced column. - -The first term asks you to sum the size of the partition key columns. For -this example, the ``available_rooms_by_hotel_date`` table has a single -partition key column, the ``hotel_id``, which is of type -``text``. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes. - -The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes. - -The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the ``date``, which is 4 bytes, and the ``room_number``, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean ``is_available``, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB). - -The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB. - -Adding these terms together, you get a final estimate: - -.. math:: Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB - -This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage. - -Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size. - -Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster. - -Breaking Up Large Partitions ----------------------------- - -As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both. - -The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic. - -Continuing to examine the available rooms example, if you add the ``date`` -column to the partition key for the ``available_rooms_by_hotel_date`` -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes. - -Another technique known as **bucketing** is often used to break the data -into moderate-size partitions. For example, you could bucketize the -``available_rooms_by_hotel_date`` table by adding a ``month`` column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -``month`` column is partially duplicative of the ``date``, it provides -a nice way of grouping related data in a partition that will not get -too large. - -.. image:: images/data_modeling_hotel_bucketing.png - -If you really felt strongly about preserving a wide partition design, you -could instead add the ``room_id`` to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_schema.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_schema.rst.txt deleted file mode 100644 index 1876ec3fa..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_schema.rst.txt +++ /dev/null @@ -1,144 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Defining Database Schema -======================== - -Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -``hotel`` keyspace, using CQL’s comment feature to document the query -pattern supported by each table:: - - CREATE KEYSPACE hotel WITH replication = - {‘class’: ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE hotel.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE hotel.hotels_by_poi ( - poi_name text, - hotel_id text, - name text, - phone text, - address frozen
, - PRIMARY KEY ((poi_name), hotel_id) ) - WITH comment = ‘Q1. Find hotels near given poi’ - AND CLUSTERING ORDER BY (hotel_id ASC) ; - - CREATE TABLE hotel.hotels ( - id text PRIMARY KEY, - name text, - phone text, - address frozen
, - pois set ) - WITH comment = ‘Q2. Find information about a hotel’; - - CREATE TABLE hotel.pois_by_hotel ( - poi_name text, - hotel_id text, - description text, - PRIMARY KEY ((hotel_id), poi_name) ) - WITH comment = Q3. Find pois near a hotel’; - - CREATE TABLE hotel.available_rooms_by_hotel_date ( - hotel_id text, - date date, - room_number smallint, - is_available boolean, - PRIMARY KEY ((hotel_id), date, room_number) ) - WITH comment = ‘Q4. Find available rooms by hotel date’; - - CREATE TABLE hotel.amenities_by_room ( - hotel_id text, - room_number smallint, - amenity_name text, - description text, - PRIMARY KEY ((hotel_id, room_number), amenity_name) ) - WITH comment = ‘Q5. Find amenities for a room’; - - -Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column ``poi_name``. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL. - -Similarly, here is the schema for the ``reservation`` keyspace:: - - CREATE KEYSPACE reservation WITH replication = {‘class’: - ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE reservation.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE reservation.reservations_by_confirmation ( - confirm_number text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - guest_id uuid, - PRIMARY KEY (confirm_number) ) - WITH comment = ‘Q6. Find reservations by confirmation number’; - - CREATE TABLE reservation.reservations_by_hotel_date ( - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((hotel_id, start_date), room_number) ) - WITH comment = ‘Q7. Find reservations by hotel and date’; - - CREATE TABLE reservation.reservations_by_guest ( - guest_last_name text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((guest_last_name), hotel_id) ) - WITH comment = ‘Q8. Find reservations by guest name’; - - CREATE TABLE reservation.guests ( - guest_id uuid PRIMARY KEY, - first_name text, - last_name text, - title text, - emails set, - phone_numbers list, - addresses map, - confirm_number text ) - WITH comment = ‘Q9. Find guest by ID’; - -You now have a complete Cassandra schema for storing data for a hotel -application. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_tools.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_tools.rst.txt deleted file mode 100644 index 46fad3346..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/data_modeling_tools.rst.txt +++ /dev/null @@ -1,64 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Data Modeling Tools -============================= - -There are several tools available to help you design and -manage your Cassandra schema and build queries. - -* `Hackolade `_ - is a data modeling tool that supports schema design for Cassandra and - many other NoSQL databases. Hackolade supports the unique concepts of - CQL such as partition keys and clustering columns, as well as data types - including collections and UDTs. It also provides the ability to create - Chebotko diagrams. - -* `Kashlev Data Modeler `_ is a Cassandra - data modeling tool that automates the data modeling methodology - described in this documentation, including identifying - access patterns, conceptual, logical, and physical data modeling, and - schema generation. It also includes model patterns that you can - optionally leverage as a starting point for your designs. - -* DataStax DevCenter is a tool for managing - schema, executing queries and viewing results. While the tool is no - longer actively supported, it is still popular with many developers and - is available as a `free download `_. - DevCenter features syntax highlighting for CQL commands, types, and name - literals. DevCenter provides command completion as you type out CQL - commands and interprets the commands you type, highlighting any errors - you make. The tool provides panes for managing multiple CQL scripts and - connections to multiple clusters. The connections are used to run CQL - commands against live clusters and view the results. The tool also has a - query trace feature that is useful for gaining insight into the - performance of your queries. - -* IDE Plugins - There are CQL plugins available for several Integrated - Development Environments (IDEs), such as IntelliJ IDEA and Apache - NetBeans. These plugins typically provide features such as schema - management and query execution. - -Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/data_modeling/index.rst.txt b/src/doc/4.0-alpha3/_sources/data_modeling/index.rst.txt deleted file mode 100644 index f01c92cb2..000000000 --- a/src/doc/4.0-alpha3/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -************* - -.. toctree:: - :maxdepth: 2 - - data_modeling_conceptual - data_modeling_rdbms - data_modeling_queries - data_modeling_logical - data_modeling_physical - data_modeling_refining - data_modeling_schema - data_modeling_tools - - - - - diff --git a/src/doc/4.0-alpha3/_sources/development/ci.rst.txt b/src/doc/4.0-alpha3/_sources/development/ci.rst.txt deleted file mode 100644 index 77360aea9..000000000 --- a/src/doc/4.0-alpha3/_sources/development/ci.rst.txt +++ /dev/null @@ -1,72 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Jenkins CI Environment -********************** - -About CI testing and Apache Cassandra -===================================== - -Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the `dtest `_ scripts written in Python. As outlined in :doc:`testing`, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at `builds.apache.org `_, running `Jenkins `_. - - - -Setting up your own Jenkins server -================================== - -Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution. - -Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment. - -Required plugins ----------------- - -The following plugins need to be installed additionally to the standard plugins (git, ant, ..). - -You can install any missing plugins through the install manager. - -Go to ``Manage Jenkins -> Manage Plugins -> Available`` and install the following plugins and respective dependencies: - -* Job DSL -* Javadoc Plugin -* description setter plugin -* Throttle Concurrent Builds Plug-in -* Test stability history -* Hudson Post build task - - -Setup seed job --------------- - -Config ``New Item`` - -* Name it ``Cassandra-Job-DSL`` -* Select ``Freestyle project`` - -Under ``Source Code Management`` select Git using the repository: ``https://github.com/apache/cassandra-builds`` - -Under ``Build``, confirm ``Add build step`` -> ``Process Job DSLs`` and enter at ``Look on Filesystem``: ``jenkins-dsl/cassandra_job_dsl_seed.groovy`` - -Generated jobs will be created based on the Groovy script's default settings. You may want to override settings by checking ``This project is parameterized`` and add ``String Parameter`` for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches). - -**When done, confirm "Save"** - -You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message `"Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use"`. Goto ``Manage Jenkins`` -> ``In-process Script Approval`` to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates. - -Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label "cassandra", once the job is to be run. Please make sure to make any executors available by selecting ``Build Executor Status`` -> ``Configure`` -> Add "``cassandra``" as label and save. - -Executors need to have "JDK 1.8 (latest)" installed. This is done under ``Manage Jenkins -> Global Tool Configuration -> JDK Installations…``. Executors also need to have the virtualenv package installed on their system. - diff --git a/src/doc/4.0-alpha3/_sources/development/code_style.rst.txt b/src/doc/4.0-alpha3/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/4.0-alpha3/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/4.0-alpha3/_sources/development/dependencies.rst.txt b/src/doc/4.0-alpha3/_sources/development/dependencies.rst.txt deleted file mode 100644 index 7d230d3ae..000000000 --- a/src/doc/4.0-alpha3/_sources/development/dependencies.rst.txt +++ /dev/null @@ -1,54 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dependency Management -********************* - -Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the :doc:`ci` and reported related issues on Jira/ML, in case of any project dependency changes. - -As Cassandra is an Apache product, all included libraries must follow Apache's `software license requirements `_. - -Required steps to add or update libraries -========================================= - -* Add or replace jar file in ``lib`` directory -* Add or update ``lib/license`` files -* Update dependencies in ``build.xml`` - - * Add to ``parent-pom`` with correct version - * Add to ``all-pom`` if simple Cassandra dependency (see below) - - -POM file types -============== - -* **parent-pom** - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here. -* **build-deps-pom(-sources)** + **coverage-deps-pom** - used by ``ant build`` compile target. Listed dependenices will be resolved and copied to ``build/lib/{jar,sources}`` by executing the ``maven-ant-tasks-retrieve-build`` target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution. -* **test-deps-pom** - refered by ``maven-ant-tasks-retrieve-test`` to retrieve and save dependencies to ``build/test/lib``. Exclusively used during JUnit test execution. -* **all-pom** - pom for `cassandra-all.jar `_ that can be installed or deployed to public maven repos via ``ant publish`` -* **dist-pom** - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ``ant artifacts``. Should be left as is, but needed for installing or deploying releases. - - -Troubleshooting and conflict resolution -======================================= - -Here are some useful commands that may help you out resolving conflicts. - -* ``ant realclean`` - gets rid of the build directory, including build artifacts. -* ``mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j`` - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ``ant mvn-install``. -* ``rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/`` - removes cached local Cassandra maven artifacts - - diff --git a/src/doc/4.0-alpha3/_sources/development/documentation.rst.txt b/src/doc/4.0-alpha3/_sources/development/documentation.rst.txt deleted file mode 100644 index c623d54b9..000000000 --- a/src/doc/4.0-alpha3/_sources/development/documentation.rst.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -Working on Documentation -************************* - -How Cassandra is documented -=========================== - -The official Cassandra documentation lives in the project's git repository. We use a static site generator, `Sphinx `_, to create pages hosted at `cassandra.apache.org `_. You'll also find developer centric content about Cassandra internals in our retired `wiki `_ (not covered by this guide). - -Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses `reStructuredText `_ for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at `existing documents <..>`_ to get a better idea how we use reStructuredText to write our documents. - -So how do you actually start making contributions? - -GitHub based work flow -====================== - -*Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)* - -Follow these steps to contribute using GitHub. It's assumed that you're logged in with an existing account. - -1. Fork the GitHub mirror of the `Cassandra repository `_ - -.. image:: images/docs_fork.png - -2. Create a new branch that you can use to make your edits. It's recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work. - -.. image:: images/docs_create_branch.png - -3. Navigate to document sources ``doc/source`` to find the ``.rst`` file to edit. The URL of the document should correspond to the directory structure. New files can be created using the "Create new file" button: - -.. image:: images/docs_create_file.png - -4. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing ``.rst`` files to get a better idea what format elements to use. - -.. image:: images/docs_editor.png - -Make sure to preview added content before committing any changes. - -.. image:: images/docs_preview.png - -5. Commit your work when you're done. Make sure to add a short description of all your edits since the last time you committed before. - -.. image:: images/docs_commit.png - -6. Finally if you decide that you're done working on your branch, it's time to create a pull request! - -.. image:: images/docs_pr.png - -Afterwards the GitHub Cassandra mirror will list your pull request and you're done. Congratulations! Please give us some time to look at your suggested changes before we get back to you. - - -Jira based work flow -==================== - -*Recommended for major changes* - -Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same `contribution guides `_ as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed. - -Working on documents locally using Sphinx -========================================= - -*Recommended for advanced editing* - -Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at ``doc/README.md``. Setup is very easy (at least on OSX and Linux). - -Notes for committers -==================== - -Please feel free to get involved and merge pull requests created on the GitHub mirror if you're a committer. As this is a read-only repository, you won't be able to merge a PR directly on GitHub. You'll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub. - -You may use a git work flow like this:: - - git remote add github https://github.com/apache/cassandra.git - git fetch github pull//head: - git checkout - -Now either rebase or squash the commit, e.g. for squashing:: - - git reset --soft origin/trunk - git commit --author - -Make sure to add a proper commit message including a "Closes #" text to automatically close the PR. - -Publishing ----------- - -Details for building and publishing of the site at cassandra.apache.org can be found `here `_. - diff --git a/src/doc/4.0-alpha3/_sources/development/gettingstarted.rst.txt b/src/doc/4.0-alpha3/_sources/development/gettingstarted.rst.txt deleted file mode 100644 index c2f5ef36e..000000000 --- a/src/doc/4.0-alpha3/_sources/development/gettingstarted.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _gettingstarted: - -Getting Started -************************* - -Initial Contributions -======================== - -Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we'd suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work. - * Add to or update the documentation - * Answer questions on the user list - * Review and test a submitted patch - * Investigate and fix a reported bug - * Create unit tests and d-tests - -Updating documentation -======================== - -The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (:ref:`patches`). - -Answering questions on the user list -==================================== - -Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the `community `_ page for details on how to subscribe to the mailing list. - -Reviewing and testing a submitted patch -======================================= - -Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in :ref:`_development_how_to_review` or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, "I tested this performance enhacement on our application's standard production load test and found a 3% improvement.") - -Investigate and/or fix a reported bug -===================================== - -Often, the hardest work in fixing a bug is reproducing it. Even if you don't have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (:ref:`patches`). - -Create unit tests and Dtests -============================ - -Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See :ref:`testing` and :ref:`patches` for more detail. - - - diff --git a/src/doc/4.0-alpha3/_sources/development/how_to_commit.rst.txt b/src/doc/4.0-alpha3/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index dff39832d..000000000 --- a/src/doc/4.0-alpha3/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``-atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/4.0-alpha3/_sources/development/how_to_review.rst.txt b/src/doc/4.0-alpha3/_sources/development/how_to_review.rst.txt deleted file mode 100644 index 4778b6946..000000000 --- a/src/doc/4.0-alpha3/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _how_to_review: - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/4.0-alpha3/_sources/development/ide.rst.txt b/src/doc/4.0-alpha3/_sources/development/ide.rst.txt deleted file mode 100644 index 97c73ae61..000000000 --- a/src/doc/4.0-alpha3/_sources/development/ide.rst.txt +++ /dev/null @@ -1,185 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -| - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -| - -Opening Cassandra in Apache NetBeans -======================================= - -`Apache NetBeans `_ is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans. - -Open Cassandra as a Project (C* 4.0 and newer) ------------------------------------------------ - -Please clone and build Cassandra as described above and execute the following steps: - -1. Start Apache NetBeans - -2. Open the NetBeans project from the `ide/` folder of the checked out Cassandra directory using the menu item "Open Project…" in NetBeans' File menu - -The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant `build.xml` script. - - * Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu. - * Profile Project is available via the Profile menu. In the opened Profiler tab, click the green "Profile" button. - * Cassandra's code style is honored in `ide/nbproject/project.properties` - -The `JAVA8_HOME` system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute. - -| - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/4.0-alpha3/_sources/development/index.rst.txt b/src/doc/4.0-alpha3/_sources/development/index.rst.txt deleted file mode 100644 index ffa7134dd..000000000 --- a/src/doc/4.0-alpha3/_sources/development/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contributing to Cassandra -************************* - -.. toctree:: - :maxdepth: 2 - - gettingstarted - ide - testing - patches - code_style - how_to_review - how_to_commit - documentation - ci - dependencies - release_process diff --git a/src/doc/4.0-alpha3/_sources/development/patches.rst.txt b/src/doc/4.0-alpha3/_sources/development/patches.rst.txt deleted file mode 100644 index f3a2cca0f..000000000 --- a/src/doc/4.0-alpha3/_sources/development/patches.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _patches: - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue marked as `Low Hanging Fruit `_ Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or :ref:`Slack `. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -4.0 Code freeze (see below) -3.11 Critical bug fixes only -3.0 Critical bug fixes only -2.2 Critical bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -4.0 Code Freeze -""""""""""""""" - -Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance: - - * Bug fixes - * Measurable performance improvements - * Changes not distributed as part of the release such as: - * Testing related improvements and fixes - * Build and infrastructure related changes - * Documentation - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. - - :: - - - - patch by ; reviewed by for CASSANDRA-##### - - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/4.0-alpha3/_sources/development/release_process.rst.txt b/src/doc/4.0-alpha3/_sources/development/release_process.rst.txt deleted file mode 100644 index 0ab6dff1a..000000000 --- a/src/doc/4.0-alpha3/_sources/development/release_process.rst.txt +++ /dev/null @@ -1,268 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. release_process: - -Release Process -*************** - -.. contents:: :depth: 3 - -|  -| - -.. attention:: - - WORK IN PROGRESS - * A number of these steps still have been finalised/tested. - * The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org - - -The steps for Release Managers to create, vote and publish releases for Apache Cassandra. - -While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release. - - -Prerequisites -============= - -Background docs - * `ASF Release Policy `_ - * `ASF Release Distribution Policy `_ - * `ASF Release Best Practices `_ - - -A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools. - - -Create and publish your GPG key -------------------------------- - -To create a GPG key, follow the `guidelines `_. -Include your public key in:: - - https://dist.apache.org/repos/dist/release/cassandra/KEYS - - -Publish your GPG key in a PGP key server, such as `MIT Keyserver `_. - - -Create Release Artifacts -======================== - -Any committer can perform the following steps to create and call a vote on a proposed release. - -Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there's security vulnerabilities currently being worked on in private. - -Perform the Release -------------------- - -Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:: - - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh` - - # After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout, - # on the branch/commit that we wish to tag for the tentative release along with version number to tag. - # For example here might be `3.11` and `3.11.3` - cd ~/git/cassandra/ - git checkout cassandra- - ../cassandra-builds/cassandra-release/prepare_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Call for a Vote". - -The ``prepare_release.sh`` script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:: - - cd ~/git/cassandra-build - docker build -f docker/centos7-image.docker docker/ - docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh -tentative - rpmsign --addsign dist/*.rpm - -For more information on the above steps see the `cassandra-builds documentation `_. -The next step is to copy and commit these binaries to staging svnpubsub:: - - # FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org - cd ~/git - svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev - mkdir cassandra-dist-dev/ - cp cassandra-build/dist/*.rpm cassandra-dist-dev// - - svn add cassandra-dist-dev/ - svn ci cassandra-dist-dev/ - -After committing the binaries to staging, increment the version number in Cassandra on the `cassandra-` - - cd ~/git/cassandra/ - git checkout cassandra- - edit build.xml # update ` ` - edit debian/changelog # add entry for new version - edit CHANGES.txt # add entry for new version - git commit -m "Update version to " build.xml debian/changelog CHANGES.txt - git push - -Call for a Vote -=============== - -Fill out the following email template and send to the dev mailing list:: - - I propose the following artifacts for release as . - - sha1: - - Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative - - Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// - - Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ - - The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ - - The vote will be open for 72 hours (longer if needed). - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative - - - -Post-vote operations -==================== - -Any PMC can perform the following steps to formalize and publish a successfully voted release. - -Publish Artifacts ------------------ - -Run the following commands to publish the voted release artifacts:: - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # edit the variables at the top of `finish_release.sh` - - # After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, - # on the tentative release tag that we wish to tag for the final release version number tag. - cd ~/git/cassandra/ - git checkout -tentative - ../cassandra-builds/cassandra-release/finish_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Send Release Announcement". -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout. - - -Promote Nexus Repository ------------------------- - - * Login to `Nexus repository `_ again. - * Click on "Staging" and then on the repository with id "cassandra-staging". - * Find your closed staging repository, right click on it and choose "Promote". - * Select the "Releases" repository and click "Promote". - * Next click on "Repositories", select the "Releases" repository and validate that your artifacts exist as you expect them. - -Sign and Upload Distribution Packages to Bintray ---------------------------------------- - -Run the following command:: - - cd ~/git - # FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org - svn mv https://dist.apache.org/repos/dist/dev/cassandra/ https://dist.apache.org/repos/dist/release/cassandra/ - - # Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on - svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat - cd cassandra-dist-redhat/x/ - createrepo . - gpg --detach-sign --armor repodata/repomd.xml - for f in `find repodata/ -name *.bz2`; do - gpg --detach-sign --armor $f; - done - - svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist- - cd cassandra-dist- - cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist- - - -Update and Publish Website --------------------------- - -See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate. - -Release version in JIRA ------------------------ - -Release the JIRA version. - - * In JIRA go to the version that you want to release and release it. - * Create a new version, if it has not been done before. - -Update to Next Development Version ----------------------------------- - -Edit and commit ``build.xml`` so the base.version property points to the next version. - -Wait for Artifacts to Sync --------------------------- - -Wait for the artifacts to sync at http://www.apache.org/dist/cassandra/ - -Send Release Announcement -------------------------- - -Fill out the following email template and send to both user and dev mailing lists:: - - The Cassandra team is pleased to announce the release of Apache Cassandra version . - - Apache Cassandra is a fully distributed database. It is the right choice - when you need scalability and high availability without compromising - performance. - - http://cassandra.apache.org/ - - Downloads of source and binary distributions are listed in our download - section: - - http://cassandra.apache.org/download/ - - This version is release[1] on the series. As always, - please pay attention to the release notes[2] and let us know[3] if you - were to encounter any problem. - - Enjoy! - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= - [3]: https://issues.apache.org/jira/browse/CASSANDRA - -Update Slack Cassandra topic ---------------------------- - -Update topic in ``cassandra`` :ref:`Slack room ` - /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don't ask to ask - -Tweet from @Cassandra ---------------------- - -Tweet the new release, from the @Cassandra account - -Delete Old Releases -------------------- - -As described in `When to Archive `_. -Also check people.apache.org as previous release scripts used it. diff --git a/src/doc/4.0-alpha3/_sources/development/testing.rst.txt b/src/doc/4.0-alpha3/_sources/development/testing.rst.txt deleted file mode 100644 index 7f38fe590..000000000 --- a/src/doc/4.0-alpha3/_sources/development/testing.rst.txt +++ /dev/null @@ -1,98 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _testing: - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -If you see an error like this:: - - Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: - org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader - AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] - -You will need to install the ant-optional package since it contains the ``JUnitTask`` class. - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -See :ref:`cassandra_stress` - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/4.0-alpha3/_sources/faq/index.rst.txt b/src/doc/4.0-alpha3/_sources/faq/index.rst.txt deleted file mode 100644 index acb7538d6..000000000 --- a/src/doc/4.0-alpha3/_sources/faq/index.rst.txt +++ /dev/null @@ -1,299 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair -full`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. Note that you will need to run a full repair (``-full``) to make sure that already repaired - sstables are not skipped. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/4.0-alpha3/_sources/getting_started/configuring.rst.txt b/src/doc/4.0-alpha3/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index e71eeedbe..000000000 --- a/src/doc/4.0-alpha3/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the default configuration file present at ``./conf/cassandra.yaml`` is enough, -you shouldn't need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/4.0-alpha3/_sources/getting_started/drivers.rst.txt b/src/doc/4.0-alpha3/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index 9a2c1567a..000000000 --- a/src/doc/4.0-alpha3/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ - -Perl -^^^^ - -- `Cassandra::Client and DBD::Cassandra `__ - -Elixir -^^^^^^ - -- `Xandra `__ -- `CQEx `__ - -Dart -^^^^ - -- `dart_cassandra_cql `__ diff --git a/src/doc/4.0-alpha3/_sources/getting_started/index.rst.txt b/src/doc/4.0-alpha3/_sources/getting_started/index.rst.txt deleted file mode 100644 index 4ca9c4d40..000000000 --- a/src/doc/4.0-alpha3/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - - diff --git a/src/doc/4.0-alpha3/_sources/getting_started/installing.rst.txt b/src/doc/4.0-alpha3/_sources/getting_started/installing.rst.txt deleted file mode 100644 index fb8a0463f..000000000 --- a/src/doc/4.0-alpha3/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,106 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -Prerequisites -^^^^^^^^^^^^^ - -- The latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. - -- For using cqlsh, the latest version of `Python 2.7 `__. To verify that you have - the correct version of Python installed, type ``python --version``. - -Installation from binary tarball files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Download the latest stable release from the `Apache Cassandra downloads website `__. - -- Untar the file somewhere, for example: - -:: - - tar -xzvf apache-cassandra-3.6-bin.tar.gz - -The files will be extracted into ``apache-cassandra-3.6``, you need to substitute 3.6 with the release number that you -have downloaded. - -- Optionally add ``apache-cassandra-3.6\bin`` to your path. -- Start Cassandra in the foreground by invoking ``bin/cassandra -f`` from the command line. Press "Control-C" to stop - Cassandra. Start Cassandra in the background by invoking ``bin/cassandra`` from the command line. Invoke ``kill pid`` - or ``pkill -f CassandraDaemon`` to stop Cassandra, where pid is the Cassandra process id, which you can find for - example by invoking ``pgrep -f CassandraDaemon``. -- Verify that Cassandra is running by invoking ``bin/nodetool status`` from the command line. -- Configuration files are located in the ``conf`` sub-directory. -- Since Cassandra 2.1, log and data directories are located in the ``logs`` and ``data`` sub-directories respectively. - Older versions defaulted to ``/var/log/cassandra`` and ``/var/lib/cassandra``. Due to this, it is necessary to either - start Cassandra with root privileges or change ``conf/cassandra.yaml`` to use directories owned by the current user, - as explained below in the section on changing the location of directories. - -Installation from Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Add the Apache repository of Cassandra to ``/etc/apt/sources.list.d/cassandra.sources.list``, for example for version - 3.6: - -:: - - echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - -- Add the Apache Cassandra repository keys: - -:: - - curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - -- Update the repositories: - -:: - - sudo apt-get update - -- If you encounter this error: - -:: - - GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA - -Then add the public key A278B781FE4B2BDA as follows: - -:: - - sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA - -and repeat ``sudo apt-get update``. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to `this link `__. - -- Install Cassandra: - -:: - - sudo apt-get install cassandra - -- You can start Cassandra with ``sudo service cassandra start`` and stop it with ``sudo service cassandra stop``. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -- Verify that Cassandra is running by invoking ``nodetool status`` from the command line. -- The default location of configuration files is ``/etc/cassandra``. -- The default location of log and data directories is ``/var/log/cassandra/`` and ``/var/lib/cassandra``. diff --git a/src/doc/4.0-alpha3/_sources/getting_started/querying.rst.txt b/src/doc/4.0-alpha3/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/4.0-alpha3/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/4.0-alpha3/_sources/index.rst.txt b/src/doc/4.0-alpha3/_sources/index.rst.txt deleted file mode 100644 index 302f8e7fa..000000000 --- a/src/doc/4.0-alpha3/_sources/index.rst.txt +++ /dev/null @@ -1,43 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - new/index - architecture/index - cql/index - data_modeling/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - plugins/index - - bugs - contactus diff --git a/src/doc/4.0-alpha3/_sources/new/index.rst.txt b/src/doc/4.0-alpha3/_sources/new/index.rst.txt deleted file mode 100644 index 18a4b5b3b..000000000 --- a/src/doc/4.0-alpha3/_sources/new/index.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -New Features in Apache Cassandra 4.0 -==================================== - -This section covers the new features in Apache Cassandra 4.0. - -.. toctree:: - :maxdepth: 2 - - java11 - virtualtables - auditlogging - fqllogging - transientreplication - diff --git a/src/doc/4.0-alpha3/_sources/new/java11.rst.txt b/src/doc/4.0-alpha3/_sources/new/java11.rst.txt deleted file mode 100644 index df906d409..000000000 --- a/src/doc/4.0-alpha3/_sources/new/java11.rst.txt +++ /dev/null @@ -1,274 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Support for Java 11 -------------------- - -In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions. - -One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (`CASSANDRA-9608 -`_). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0. - -**Note**: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use. - -Support Matrix -^^^^^^^^^^^^^^ - -The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis. - -Table 1 : Support Matrix for Java - -+---------------+--------------+-----------------+ -| | Java 8 (Run) | Java 11 (Run) | -+---------------+--------------+-----------------+ -| Java 8 (Build)|Supported |Supported | -+---------------+--------------+-----------------+ -| Java 11(Build)| Not Supported|Supported | -+---------------+--------------+-----------------+ - -Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0. - -Using Java 8 to Build -^^^^^^^^^^^^^^^^^^^^^ - -To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows: - -:: - -$ sudo yum install java-1.8.0-openjdk-devel - -Set ``JAVA_HOME`` and ``JRE_HOME`` environment variables in the shell bash script. First, open the bash script: - -:: - -$ sudo vi ~/.bashrc - -Set the environment variables including the ``PATH``. - -:: - - $ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies. - -:: - - $ git clone https://github.com/apache/cassandra.git - -If Cassandra is already running stop Cassandra with the following command. - -:: - - [ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon - -Build the source code from the ``cassandra`` directory, which has the ``build.xml`` build script. The Apache Ant uses the Java version set in the ``JAVA_HOME`` environment variable. - -:: - - $ cd ~/cassandra - $ ant - -Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for ``CASSANDRA_HOME`` in the bash script. Also add the ``CASSANDRA_HOME/bin`` to the ``PATH`` variable. - -:: - - $ export CASSANDRA_HOME=~/cassandra - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin - -To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the ``CASSANDRA_HOME/bin`` directory, which is in the ``PATH`` env variable. - -:: - - $ cassandra - -The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet: - -:: - - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3- - 146.ec2.internal:7000:7001 - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK - 64-Bit Server VM/11.0.3 - INFO [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size: - 1004.000MiB/1004.000MiB - -The following output indicates a single node Cassandra 4.0 cluster has started. - -:: - - INFO [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on - address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl) - ... - ... - INFO [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any - peers but continuing anyway since node is in its own seed list - INFO [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state - INFO [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip - INFO [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled, - when pool is exhausted (max is 251.000MiB) it will allocate on heap - INFO [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto - bootstrap because it is configured to be a seed node. - ... - ... - INFO [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring - INFO [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state - jump to NORMAL - -Using Java 11 to Build -^^^^^^^^^^^^^^^^^^^^^^ -If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command. - -:: - - $ yum install java-11-openjdk-devel - -Set the environment variables in the bash script for Java 11. The first command is to open the bash script. - -:: - - $ sudo vi ~/.bashrc - $ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -To build source code with Java 11 one of the following two options must be used. - - 1. Include Apache Ant command-line option ``-Duse.jdk=11`` as follows: - :: - - $ ant -Duse.jdk=11 - - 2. Set environment variable ``CASSANDRA_USE_JDK11`` to ``true``: - :: - - $ export CASSANDRA_USE_JDK11=true - -As an example, set the environment variable ``CASSANDRA_USE_JDK11`` to ``true``. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - -Or, set the command-line option. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true - -The build output should include the following. - -:: - - _build_java: - [echo] Compiling for Java 11 - ... - ... - build: - - _main-jar: - [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar - ... - ... - _build-test: - [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes - [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes - ... - ... - jar: - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF - [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar - - BUILD SUCCESSFUL - Total time: 1 minute 3 seconds - [ec2-user@ip-172-30-3-146 cassandra]$ - -Common Issues -^^^^^^^^^^^^^^ -One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must - be set when building from java 11 - Total time: 1 second - [ec2-user@ip-172-30-3-146 cassandra]$ - -The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - [ec2-user@ip-172-30-3-146 ~]$ cassandra - ... - ... - Error: A JNI error has occurred, please check your installation and try again - Exception in thread "main" java.lang.UnsupportedClassVersionError: - org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of - the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes - class file versions up to 52.0 - at java.lang.ClassLoader.defineClass1(Native Method) - at java.lang.ClassLoader.defineClass(ClassLoader.java:763) - at ... - ... - -The ``CASSANDRA_USE_JDK11`` variable or the command-line option ``-Duse.jdk11`` cannot be used to build with Java 8. To demonstrate set ``JAVA_HOME`` to version 8. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - -Set the ``CASSANDRA_USE_JDK11=true`` or command-line option ``-Duse.jdk11=true``. Subsequently, run Apache Ant to start the build. The build fails with error message listed. - -:: - - [ec2-user@ip-172-30-3-146 ~]$ cd - cassandra - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot - be set when building from java 8 - - Total time: 0 seconds - diff --git a/src/doc/4.0-alpha3/_sources/operating/audit_logging.rst.txt b/src/doc/4.0-alpha3/_sources/operating/audit_logging.rst.txt deleted file mode 100644 index 068209ee8..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/audit_logging.rst.txt +++ /dev/null @@ -1,236 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - - - -Audit Logging ------------------- - -Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml. - -- ``BinAuditLogger`` An efficient way to log events to file in a binary format. -- ``FileAuditLogger`` Logs events to ``audit/audit.log`` file using slf4j logger. - -*Recommendation* ``BinAuditLogger`` is a community recommended logger considering the performance - -What does it capture -^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging captures following events - -- Successful as well as unsuccessful login attempts. - -- All database commands executed via Native protocol (CQL) attempted or successfully executed. - -Limitations -^^^^^^^^^^^ - -Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log. - -What does it log -^^^^^^^^^^^^^^^^^^^ -Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with `|` s to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - -How to configure -^^^^^^^^^^^^^^^^^^ -Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -cassandra.yaml configurations for AuditLog -""""""""""""""""""""""""""""""""""""""""""""" - - ``enabled``: This option enables/ disables audit log - - ``logger``: Class name of the logger/ custom logger. - - ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ - - ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces - - ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except `system`, `system_schema` and `system_virtual_schema` - - ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories - - ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category - - ``included_users``: Comma separated list of users to be included in audit log, default - includes all users - - ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -NodeTool command to enable AuditLog -""""""""""""""""""""""""""""""""""""" -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - -:: - - nodetool enableauditlog - -Options -********** - - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used. - Please remeber that `system`, `system_schema` and `system_virtual_schema` are excluded by default, - if you are overwriting this option via nodetool, - remember to add these keyspaces back if you dont want them in audit logs - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -NodeTool command to disable AuditLog -""""""""""""""""""""""""""""""""""""""" - -``disableauditlog``: Disables AuditLog. - -:: - - nodetool disableuditlog - - - - - - - -NodeTool command to reload AuditLog filters -""""""""""""""""""""""""""""""""""""""""""""" - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - -E.g., - -:: - - nodetool enableauditlog --loggername --included-keyspaces - - - -View the contents of AuditLog Files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``auditlogviewer`` is the new tool introduced to help view the contents of binlog file in human readable text format. - -:: - - auditlogviewer [...] [options] - -Options -"""""""" - -``-f,--follow`` - Upon reacahing the end of the log continue indefinitely - waiting for more records -``-r,--roll_cycle`` - How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -``-h,--help`` - display this help message - -For example, to dump the contents of audit log files on the console - -:: - - auditlogviewer /logs/cassandra/audit - -Sample output -""""""""""""" - -:: - - LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1" - - - -Configuring BinAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``BinAuditLogger`` as a logger in AuditLogging, set the logger to ``BinAuditLogger`` in cassandra.yaml under ``audit_logging_options`` section. ``BinAuditLogger`` can be futher configued using its advanced options in cassandra.yaml. - - -Adcanced Options for BinAuditLogger -"""""""""""""""""""""""""""""""""""""" - -``block`` - Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to ``true`` so that AuditLog records wont be lost - -``max_queue_weight`` - Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to ``256 * 1024 * 1024`` - -``max_log_size`` - Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to ``16L * 1024L * 1024L * 1024L`` - -``roll_cycle`` - How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to ``"HOURLY"`` - -Configuring FileAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``FileAuditLogger`` as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log - - -.. code-block:: xml - - - - ${cassandra.logdir}/audit/audit.log - - - ${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip - - 50MB - 30 - 5GB - - - %-5level [%thread] %date{ISO8601} %F:%L - %msg%n - - - - - - - diff --git a/src/doc/4.0-alpha3/_sources/operating/backups.rst.txt b/src/doc/4.0-alpha3/_sources/operating/backups.rst.txt deleted file mode 100644 index c071e83b5..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Backups -======= - -.. todo:: TODO diff --git a/src/doc/4.0-alpha3/_sources/operating/bloom_filters.rst.txt b/src/doc/4.0-alpha3/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/4.0-alpha3/_sources/operating/bulk_loading.rst.txt b/src/doc/4.0-alpha3/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index c8224d5cb..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading ------------- - -.. todo:: TODO diff --git a/src/doc/4.0-alpha3/_sources/operating/cdc.rst.txt b/src/doc/4.0-alpha3/_sources/operating/cdc.rst.txt deleted file mode 100644 index a7177b544..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,96 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property ``cdc=true`` (either when :ref:`creating the table ` or -:ref:`altering it `). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in ``cassandra.yaml``. On segment fsync to disk, if CDC data is present anywhere in the segment a -_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word "COMPLETED" will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file. - -We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable. - -A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disabling CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -Use a `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -If CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `JIRA ticket `__ -- `JIRA ticket `__ diff --git a/src/doc/4.0-alpha3/_sources/operating/compaction.rst.txt b/src/doc/4.0-alpha3/_sources/operating/compaction.rst.txt deleted file mode 100644 index ace9aa9e4..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/compaction.rst.txt +++ /dev/null @@ -1,447 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). With ``TimeWindowCompactionStrategy`` -it is possible to remove the guarantee (not check for shadowing data) by enabling ``unsafe_aggressive_sstable_expiration``. - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - -.. _STCS: - -Size Tiered Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. -``unsafe_aggressive_sstable_expiration`` (default: false) - Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially - risky option that can lead to data loss or deleted data re-appearing, going beyond what - `unchecked_tombstone_compaction` does for single sstable compaction. Due to the risk the jvm must also be - started with `-Dcassandra.unsafe_aggressive_sstable_expiration=true`. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled). - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. diff --git a/src/doc/4.0-alpha3/_sources/operating/compression.rst.txt b/src/doc/4.0-alpha3/_sources/operating/compression.rst.txt deleted file mode 100644 index b4308b31a..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,97 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides four classes (``LZ4Compressor``, - ``SnappyCompressor``, ``DeflateCompressor`` and ``ZstdCompressor``). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. -- ``compression_level`` is only applicable for ``ZstdCompressor`` and accepts values between ``-131072`` and ``22``. - The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called - "ultra levels" and should be used with caution, as they require more memory. The default is 3. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/4.0-alpha3/_sources/operating/hardware.rst.txt b/src/doc/4.0-alpha3/_sources/operating/hardware.rst.txt deleted file mode 100644 index ad3aa8d21..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,87 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate - workloads -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/4.0-alpha3/_sources/operating/hints.rst.txt b/src/doc/4.0-alpha3/_sources/operating/hints.rst.txt deleted file mode 100644 index f79f18ab7..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Hints ------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha3/_sources/operating/index.rst.txt b/src/doc/4.0-alpha3/_sources/operating/index.rst.txt deleted file mode 100644 index e2cead255..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/4.0-alpha3/_sources/operating/metrics.rst.txt b/src/doc/4.0-alpha3/_sources/operating/metrics.rst.txt deleted file mode 100644 index e87bd5ac1..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,789 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _monitoring-metrics: - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -.. _table-metrics: - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorWriteLatency Timer Coordinator write latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -BytesRepaired Gauge Size of table data repaired on disk -BytesUnrepaired Gauge Size of table data unrepaired on disk -BytesPendingRepair Gauge Size of table data isolated for an ongoing incremental repair -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -SpeculativeFailedRetries Counter Number of speculative retries that failed to prevent a timeout -SpeculativeInsufficientReplicas Counter Number of speculative retries that couldn't be attempted due to lack of replicas -SpeculativeSampleLatencyNanos Gauge Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -AnticompactionTime Timer Time spent anticompacting before a consistent repair. -ValidationTime Timer Time spent doing validation compaction during repair. -SyncTime Timer Time spent doing streaming during repair. -BytesValidated Histogram Histogram over the amount of bytes read during validation. -PartitionsValidated Histogram Histogram over the number of partitions read during validation. -BytesAnticompacted Counter How many bytes we anticompacted. -BytesMutatedAnticompaction Counter How many bytes we avoided anticompacting because the sstable was fully contained in the repaired range. -MutatedAnticompactionGauge Gauge Ratio of bytes mutated vs total bytes repaired. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -Most of these metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -WriteFailedIdeaCL Counter Number of writes that failed to achieve the configured ideal consistency level or 0 if none is configured -IdealCLWriteLatency Latency Coordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured -RepairTime Timer Total time spent as repair coordinator. -RepairPrepareTime Timer Total time spent preparing for repair. -======================================= ============== =========== - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools path= scope= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -MaxTasksQueued Gauge The maximum number of tasks queued before a task get blocked. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -ViewBuildExecutor internal Performs materialized views initial build -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - -.. _dropped-metrics: - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessage..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMessage scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -HintsService Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintsService.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintsService name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -HintsSucceeded Meter A meter of the hints successfully delivered -HintsFailed Meter A meter of the hints that failed deliver -HintsTimedOut Meter A meter of the hints that timed out -Hint_delays Histogram Histogram of hint delivery delays (in milliseconds) -Hint_delays- Histogram Histogram of hint delivery delays (in milliseconds) per peer -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -============================== =============================== =========== -Name Type Description -============================== =============================== =========== -connectedNativeClients Gauge Number of clients connected to this nodes native protocol server -connections Gauge> List of all connections and their state information -connectedNativeClientsByUser Gauge Number of connnective native clients by username -============================== =============================== =========== - - -Batch Metrics -^^^^^^^^^^^^^ - -Metrics specifc to batch statements. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Batch.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Batch name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -PartitionsPerCounterBatch Histogram Distribution of the number of partitions processed per counter batch -PartitionsPerLoggedBatch Histogram Distribution of the number of partitions processed per logged batch -PartitionsPerUnloggedBatch Histogram Distribution of the number of partitions processed per unlogged batch -=========================== ============== =========== - - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/4.0-alpha3/_sources/operating/read_repair.rst.txt b/src/doc/4.0-alpha3/_sources/operating/read_repair.rst.txt deleted file mode 100644 index 0e52bf523..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Read repair ------------ - -.. todo:: todo diff --git a/src/doc/4.0-alpha3/_sources/operating/repair.rst.txt b/src/doc/4.0-alpha3/_sources/operating/repair.rst.txt deleted file mode 100644 index 97115dc66..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,107 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _repair: - -Repair ------- - -Cassandra is designed to remain available if one of it's nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren't guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire. - -These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes. - -Incremental and Full Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that's been written since the previous incremental repair. - -Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it's important to understand that once an incremental repair marks data as repaired, it won't -try to repair it again. This is fine for syncing up missed writes, but it doesn't protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally. - -Usage and Best Practices -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since repair can result in a lot of disk and network io, it's not run automatically by Cassandra. It is run by the operator -via nodetool. - -Incremental repair is the default and is run with the following command: - -:: - - nodetool repair - -A full repair can be run with the following command: - -:: - - nodetool repair --full - -Additionally, repair can be run on a single keyspace: - -:: - - nodetool repair [options] - -Or even on specific tables: - -:: - - nodetool repair [options] - - -The repair command only repairs token ranges on the node being repaired, it doesn't repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you're running repair on, which will cause duplicate work if you run it -on every node. The ``-pr`` flag will only repair the "primary" ranges on a node, so you can repair your entire cluster by running -``nodetool repair -pr`` on each node in a single datacenter. - -The specific frequency of repair that's right for your cluster, of course, depends on several factors. However, if you're -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don't want to run incremental repairs, a full repair every 5 days is a good place -to start. - -At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays. - -Other Options -^^^^^^^^^^^^^ - -``-pr, --partitioner-range`` - Restricts repair to the 'primary' token ranges of the node being repaired. A primary range is just a token range for - which a node is the first replica in the ring. - -``-prv, --preview`` - Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints - the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, - add the ``--full`` flag to estimate a full repair. - -``-vd, --validate`` - Verifies that the repaired data is the same across all nodes. Similiar to ``--preview``, this builds and compares merkle - trees of repaired data, but doesn't do any streaming. This is useful for troubleshooting. If this shows that the repaired - data is out of sync, a full repair should be run. - -.. seealso:: - :ref:`nodetool repair docs ` diff --git a/src/doc/4.0-alpha3/_sources/operating/security.rst.txt b/src/doc/4.0-alpha3/_sources/operating/security.rst.txt deleted file mode 100644 index c2d8b79b0..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/security.rst.txt +++ /dev/null @@ -1,441 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still: - -- Craft internode messages to insert users into authentication schema -- Craft internode messages to truncate or drop schema -- Use tools such as ``sstableloader`` to overwrite ``system_auth`` tables -- Attach to the cluster directly to capture write traffic - -Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra's -security features is crucial to configuring your cluster to meet your security needs. - - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -SSL Certificate Hot Reloading -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes. - -Certificate Hot reloading may also be triggered using the ``nodetool reloadssl`` command. Use this if you want to Cassandra to -immediately notice the changed certificates. - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -.. _authorization: - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -.. _auth-caching: - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``yes``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/4.0-alpha3/_sources/operating/snitch.rst.txt b/src/doc/4.0-alpha3/_sources/operating/snitch.rst.txt deleted file mode 100644 index 5f6760a41..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this will allow 'pinning' of replicas to hosts - in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. - The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this - will not work across multiple regions. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/4.0-alpha3/_sources/operating/topo_changes.rst.txt b/src/doc/4.0-alpha3/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index 6c8f8ecdf..000000000 --- a/src/doc/4.0-alpha3/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,129 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in ``nodetool netstats``. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344) - -Once the bootstrapping is complete the node will be marked "UP". - -.. Note:: If any of the following cases apply, you **MUST** run repair to make the replaced node consistent again, since - it missed ongoing writes during/prior to bootstrapping. The *replacement* timeframe refers to the period from when the - node initially dies to when a new node completes the replacement process. - - 1. The node is down for longer than ``max_hint_window_in_ms`` before being replaced. - 2. You are replacing using the same IP address as the dead node **and** replacement takes longer than ``max_hint_window_in_ms``. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/4.0-alpha3/_sources/plugins/index.rst.txt b/src/doc/4.0-alpha3/_sources/plugins/index.rst.txt deleted file mode 100644 index 4073a92cb..000000000 --- a/src/doc/4.0-alpha3/_sources/plugins/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Third-Party Plugins -=================== - -Available third-party plugins for Apache Cassandra - -CAPI-Rowcache -------------- - -The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments. - -The official page for the `CAPI-Rowcache plugin `__ contains further details how to build/run/download the plugin. - - -Stratio’s Cassandra Lucene Index --------------------------------- - -Stratio’s Lucene index is a Cassandra secondary index implementation based on `Apache Lucene `__. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or `Apache Solr `__, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed. - -The official Github repository `Cassandra Lucene Index `__ contains everything you need to build/run/configure the plugin. \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/tools/cassandra_stress.rst.txt b/src/doc/4.0-alpha3/_sources/tools/cassandra_stress.rst.txt deleted file mode 100644 index bcac54ec1..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/cassandra_stress.rst.txt +++ /dev/null @@ -1,269 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: yaml - -.. _cassandra_stress: - -Cassandra Stress ----------------- - -cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model. - -This documentation focuses on user mode as this allows the testing of your -actual schema. - -Usage -^^^^^ -There are several operation types: - - * write-only, read-only, and mixed workloads of standard data - * write-only and read-only workloads for counter columns - * user configured workloads, running custom queries on custom schemas - -The syntax is `cassandra-stress [options]`. If you want more information on a given command -or options, just run `cassandra-stress help `. - -Commands: - read: - Multiple concurrent reads - the cluster must first be populated by a write test - write: - Multiple concurrent writes against the cluster - mixed: - Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test - counter_write: - Multiple concurrent updates of counters. - counter_read: - Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. - user: - Interleaving of user provided queries, with configurable ratio and distribution. - help: - Print help for a command or option - print: - Inspect the output of a distribution definition - legacy: - Legacy support mode - -Primary Options: - -pop: - Population distribution and intra-partition visit order - -insert: - Insert specific options relating to various methods for batching and splitting partition updates - -col: - Column details such as size and count distribution, data generator, names, comparator and if super columns should be used - -rate: - Thread count, rate limit or automatic mode (default is auto) - -mode: - Thrift or CQL with options - -errors: - How to handle errors when encountered during stress - -sample: - Specify the number of samples to collect for measuring latency - -schema: - Replication settings, compression, compaction, etc. - -node: - Nodes to connect to - -log: - Where to log progress to, and the interval at which to do it - -transport: - Custom transport factories - -port: - The port to connect to cassandra nodes on - -sendto: - Specify a stress server to send this command to - -graph: - Graph recorded metrics - -tokenrange: - Token range settings - - -Suboptions: - Every command and primary option has its own collection of suboptions. These are too numerous to list here. - For information on the suboptions for each command or option, please use the help command, - `cassandra-stress help `. - -User mode -^^^^^^^^^ - -User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn't scale. - -Profile -+++++++ - -User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname. - -An identifier for the profile:: - - specname: staff_activities - -The keyspace for the test:: - - keyspace: staff - -CQL for the keyspace. Optional if the keyspace already exists:: - - keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -The table to be stressed:: - - table: staff_activities - -CQL for the table. Optional if the table already exists:: - - table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when, what) - ) - - -Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:: - - columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -Supported types are: - -An exponential distribution over the range [min..max]:: - - EXP(min..max) - -An extreme value (Weibull) distribution over the range [min..max]:: - - EXTREME(min..max,shape) - -A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:: - - GAUSSIAN(min..max,stdvrng) - -A gaussian/normal distribution, with explicitly defined mean and stdev:: - - GAUSSIAN(min..max,mean,stdev) - -A uniform distribution over the range [min, max]:: - - UNIFORM(min..max) - -A fixed distribution, always returning the same value:: - - FIXED(val) - -If preceded by ~, the distribution is inverted - -Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) - -Insert distributions:: - - insert: - # How many partition to insert per batch - partitions: fixed(1) - # How many rows to update per partition - select: fixed(1)/500 - # UNLOGGED or LOGGED batch for insert - batchtype: UNLOGGED - - -Currently all inserts are done inside batches. - -Read statements to use during the test:: - - queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - -Running a user mode test:: - - cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once - -This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test. - -The full example can be found here :download:`yaml <./stress-example.yaml>` - -Running a user mode test with multiple yaml files:: - cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m "ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)" truncate=once - -This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table - although care must be taken that the table definition is identical (data generation specs can be different). - -Lightweight transaction support -+++++++++++++++++++++++++++++++ - -cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s). - -Lightweight transaction update query:: - - queries: - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow - -The full example can be found here :download:`yaml <./stress-lwt-example.yaml>` - -Graphing -^^^^^^^^ - -Graphs can be generated for each run of stress. - -.. image:: example-stress-graph.png - -To create a new graph:: - - cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" - -To add a new run to an existing graph point to an existing file and add a revision name:: - - cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run" - -FAQ -^^^^ - -**How do you use NetworkTopologyStrategy for the keyspace?** - -Use the schema option making sure to either escape the parenthesis or enclose in quotes:: - - cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)" - -**How do you use SSL?** - -Use the transport option:: - - cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra" \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_sources/tools/cqlsh.rst.txt b/src/doc/4.0-alpha3/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index 45e2db8fc..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,455 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/4.0-alpha3/_sources/tools/index.rst.txt b/src/doc/4.0-alpha3/_sources/tools/index.rst.txt deleted file mode 100644 index d28929c84..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 3 - - cqlsh - nodetool/nodetool - sstable/index - cassandra_stress diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/compact.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/decommission.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/describering.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/drain.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/flush.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/help.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/import.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/info.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/join.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/move.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/netstats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c914c86e9..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,253 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-pwf | --password-file )] - [(-u | --username )] [(-h | --host )] - [(-pw | --password )] [(-pp | --print-port)] - [(-p | --port )] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/profileload.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/refresh.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/removenode.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/repair.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/ring.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/scrub.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/status.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/stop.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/verify.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/version.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/4.0-alpha3/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/index.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/index.rst.txt deleted file mode 100644 index b9e483f45..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -SSTable Tools -============= - -This section describes the functionality of the various sstable tools. - -Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped. - -.. toctree:: - :maxdepth: 2 - - sstabledump - sstableexpiredblockers - sstablelevelreset - sstableloader - sstablemetadata - sstableofflinerelevel - sstablerepairedset - sstablescrub - sstablesplit - sstableupgrade - sstableutil - sstableverify - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstabledump.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstabledump.rst.txt deleted file mode 100644 index 8f38afa09..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstabledump.rst.txt +++ /dev/null @@ -1,294 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstabledump ------------ - -Dump contents of a given SSTable to standard output in JSON format. - -You must supply exactly one sstable. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstabledump - -=================================== ================================================================================ --d CQL row per line internal representation --e Enumerate partition keys only --k Partition key --x Excluded partition key(s) --t Print raw timestamps instead of iso8601 date strings --l Output each row as a separate JSON object -=================================== ================================================================================ - -If necessary, use sstableutil first to find out the sstables used by a table. - -Dump entire table -^^^^^^^^^^^^^^^^^ - -Dump the entire table without any options. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26 - - cat eventlog_dump_2018Jul26 - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - ] - -Dump table in a more manageable format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848 - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines - - cat eventlog_dump_2018Jul26_justlines - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Dump only keys -^^^^^^^^^^^^^^ - -Dump only the keys by using the -e option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys - - cat eventlog_dump_2018Jul26b - [ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ] - -Dump row for a single key -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a single key using the -k option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey - - cat eventlog_dump_2018Jul26_singlekey - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Exclude a key or keys in dump of rows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a table except for the rows excluded with the -x option. Multiple keys can be used. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e > eventlog_dump_2018Jul26_excludekeys - - cat eventlog_dump_2018Jul26_excludekeys - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Display raw timestamps -^^^^^^^^^^^^^^^^^^^^^^ - -By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times - - cat eventlog_dump_2018Jul26_times - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "1532118147028809" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - - -Display internal structure in output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump the table in a format that reflects the internal structure. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d - - cat eventlog_dump_2018Jul26_d - [3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]: | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711] - [d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]: | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522] - [cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]: | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809] - - - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableexpiredblockers.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableexpiredblockers.rst.txt deleted file mode 100644 index ec837944c..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableexpiredblockers.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableexpiredblockers ----------------------- - -During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable. - -This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-10015 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableexpiredblockers
- -Output blocked sstables -^^^^^^^^^^^^^^^^^^^^^^^ - -If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing. - -Otherwise, the script will return ` blocks <#> expired sstables from getting dropped` followed by a list of the blocked sstables. - -Example:: - - sstableexpiredblockers keyspace1 standard1 - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablelevelreset.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstablelevelreset.rst.txt deleted file mode 100644 index 7069094dd..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablelevelreset.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablelevelreset ------------------ - -If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration. - -See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5271 - -Usage -^^^^^ - -sstablelevelreset --really-reset
- -The really-reset flag is required, to ensure this intrusive command is not run accidentally. - -Table not found -^^^^^^^^^^^^^^^ - -If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error. - -Example:: - - ColumnFamily not found: keyspace/evenlog. - -Table has no sstables -^^^^^^^^^^^^^^^^^^^^^ - -Example:: - - Found no sstables, did you give the correct keyspace/table? - - -Table already at level 0 -^^^^^^^^^^^^^^^^^^^^^^^^ - -The script will not set the level if it is already set to 0. - -Example:: - - Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0 - -Table levels reduced to 0 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the level is not already 0, then this will reset it to 0. - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 1 - - sstablelevelreset --really-reset keyspace eventlog - Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 0 - - - - - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableloader.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableloader.rst.txt deleted file mode 100644 index a9b37342c..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableloader.rst.txt +++ /dev/null @@ -1,273 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableloader ---------------- - -Bulk-load the sstables found in the directory to the configured cluster. The parent directories of are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files. - -Several of the options listed below don't work quite as intended, and in those cases, workarounds are mentioned for specific use cases. - -To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-1278 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableloader - -=================================================== ================================================================================ --d, --nodes Required. Try to connect to these hosts (comma-separated) - initially for ring information --u, --username username for Cassandra authentication --pw, --password password for Cassandra authentication --p, --port port used for native connection (default 9042) --sp, --storage-port port used for internode communication (default 7000) --ssp, --ssl-storage-port port used for TLS internode communication (default 7001) ---no-progress don't display progress --t, --throttle throttle speed in Mbits (default unlimited) --idct, --inter-dc-throttle inter-datacenter throttle speed in Mbits (default unlimited) --cph, --connections-per-host number of concurrent connections-per-host --i, --ignore don't stream to this (comma separated) list of nodes --alg, --ssl-alg Client SSL: algorithm (default: SunX509) --ciphers, --ssl-ciphers Client SSL: comma-separated list of encryption suites to use --ks, --keystore Client SSL: full path to keystore --kspw, --keystore-password Client SSL: password of the keystore --st, --store-type Client SSL: type of store --ts, --truststore Client SSL: full path to truststore --tspw, --truststore-password Client SSL: password of the truststore --prtcl, --ssl-protocol Client SSL: connections protocol to use (default: TLS) --ap, --auth-provider custom AuthProvider class name for cassandra authentication --f, --conf-path cassandra.yaml file path for streaming throughput and client/server SSL --v, --verbose verbose output --h, --help display this help message -=================================================== ================================================================================ - -You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options. - -Load sstables from a Snapshot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Copy the snapshot sstables into an accessible directory and use sstableloader to restore them. - -Example:: - - cp snapshots/1535397029191/* /path/to/keyspace1/standard1/ - - sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4700000 - Total duration (ms): : 4390 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -The -d or --nodes option is required, or the script will not run. - -Example:: - - sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Initial hosts must be specified (-d) - -Use a Config File for SSL Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If SSL encryption is enabled in the cluster, use the --conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line. - -Example:: - - sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 9.165KiB/s (avg: 9.165KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 5.147MiB/s (avg: 18.299KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 9.751MiB/s (avg: 27.423KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 8.203MiB/s (avg: 36.524KiB/s) - ... - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 9356 ms - Average transfer rate : 480.105KiB/s - Peak transfer rate : 586.410KiB/s - -Hide Progress Output -^^^^^^^^^^^^^^^^^^^^ - -To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the --no-progress option. - -Example:: - - sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2] - -Get More Detail -^^^^^^^^^^^^^^^ - -Using the --verbose option will provide much more progress output. - -Example:: - - sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 12.056KiB/s (avg: 12.056KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 9.092MiB/s (avg: 24.081KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 18.832MiB/s (avg: 36.099KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 2.253MiB/s (avg: 47.882KiB/s) - progress: [/172.17.0.2]0:0/1 7 % total: 7% 6.388MiB/s (avg: 59.743KiB/s) - progress: [/172.17.0.2]0:0/1 8 % total: 8% 14.606MiB/s (avg: 71.635KiB/s) - progress: [/172.17.0.2]0:0/1 9 % total: 9% 8.880MiB/s (avg: 83.465KiB/s) - progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s) - progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s) - progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s) - progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s) - progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s) - progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s) - progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s) - progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s) - progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s) - progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s) - progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s) - progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s) - progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s) - progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s) - progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s) - progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s) - progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s) - progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s) - progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s) - progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s) - progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s) - progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s) - progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s) - progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s) - progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s) - progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s) - progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s) - progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s) - progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s) - progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s) - progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s) - progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s) - progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s) - progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s) - progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s) - progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s) - progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s) - progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s) - progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s) - progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s) - progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s) - progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s) - progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s) - progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s) - progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s) - progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s) - progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s) - progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s) - progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s) - progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s) - progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s) - progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s) - progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s) - progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s) - progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s) - progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s) - progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s) - progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s) - progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s) - progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s) - progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s) - progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s) - progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 6706 ms - Average transfer rate : 669.835KiB/s - Peak transfer rate : 767.802KiB/s - - -Throttling Load -^^^^^^^^^^^^^^^ - -To prevent the table loader from overloading the system resources, you can throttle the process with the --throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below. - -Example:: - - sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 0 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 37634 - Average transfer rate (MB/s): : 0 - Peak transfer rate (MB/s): : 0 - -Speeding up Load -^^^^^^^^^^^^^^^^ - -To speed up the load process, the number of connections per host can be increased. - -Example:: - - sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 100 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 3486 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -This small data set doesn't benefit much from the increase in connections per host, but note that the total duration has decreased in this example. - - - - - - - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablemetadata.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstablemetadata.rst.txt deleted file mode 100644 index 0a7a42211..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablemetadata.rst.txt +++ /dev/null @@ -1,300 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablemetadata ---------------- - -Print information about an sstable from the related Statistics.db and Summary.db files to standard output. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstablemetadata - -========================= ================================================================================ ---gc_grace_seconds The gc_grace_seconds to use when calculating droppable tombstones -========================= ================================================================================ - -Print all the metadata -^^^^^^^^^^^^^^^^^^^^^^ - -Run sstablemetadata against the *Data.db file(s) related to a table. If necessary, find the *Data.db file(s) using sstableutil. - -Example:: - - sstableutil keyspace1 standard1 | grep Data - /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - SSTable: /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big - Partitioner: org.apache.cassandra.dht.Murmur3Partitioner - Bloom Filter FP chance: 0.010000 - Minimum timestamp: 1535025576141000 - Maximum timestamp: 1535025604309000 - SSTable min local deletion time: 2147483647 - SSTable max local deletion time: 2147483647 - Compressor: org.apache.cassandra.io.compress.LZ4Compressor - TTL min: 86400 - TTL max: 86400 - First token: -9223004712949498654 (key=39373333373831303130) - Last token: 9222554117157811897 (key=4f3438394e39374d3730) - Estimated droppable tombstones: 0.9188263888888889 - SSTable Level: 0 - Repaired at: 0 - Replay positions covered: {CommitLogPosition(segmentId=1535025390651, position=226400)=CommitLogPosition(segmentId=1535025390651, position=6849139)} - totalColumnsSet: 100000 - totalRows: 20000 - Estimated tombstone drop times: - 1535039100: 80390 - 1535039160: 5645 - 1535039220: 13965 - Count Row Size Cell Count - 1 0 0 - 2 0 0 - 3 0 0 - 4 0 0 - 5 0 20000 - 6 0 0 - 7 0 0 - 8 0 0 - 10 0 0 - 12 0 0 - 14 0 0 - 17 0 0 - 20 0 0 - 24 0 0 - 29 0 0 - 35 0 0 - 42 0 0 - 50 0 0 - 60 0 0 - 72 0 0 - 86 0 0 - 103 0 0 - 124 0 0 - 149 0 0 - 179 0 0 - 215 0 0 - 258 20000 0 - 310 0 0 - 372 0 0 - 446 0 0 - 535 0 0 - 642 0 0 - 770 0 0 - 924 0 0 - 1109 0 0 - 1331 0 0 - 1597 0 0 - 1916 0 0 - 2299 0 0 - 2759 0 0 - 3311 0 0 - 3973 0 0 - 4768 0 0 - 5722 0 0 - 6866 0 0 - 8239 0 0 - 9887 0 0 - 11864 0 0 - 14237 0 0 - 17084 0 0 - 20501 0 0 - 24601 0 0 - 29521 0 0 - 35425 0 0 - 42510 0 0 - 51012 0 0 - 61214 0 0 - 73457 0 0 - 88148 0 0 - 105778 0 0 - 126934 0 0 - 152321 0 0 - 182785 0 0 - 219342 0 0 - 263210 0 0 - 315852 0 0 - 379022 0 0 - 454826 0 0 - 545791 0 0 - 654949 0 0 - 785939 0 0 - 943127 0 0 - 1131752 0 0 - 1358102 0 0 - 1629722 0 0 - 1955666 0 0 - 2346799 0 0 - 2816159 0 0 - 3379391 0 0 - 4055269 0 0 - 4866323 0 0 - 5839588 0 0 - 7007506 0 0 - 8409007 0 0 - 10090808 0 0 - 12108970 0 0 - 14530764 0 0 - 17436917 0 0 - 20924300 0 0 - 25109160 0 0 - 30130992 0 0 - 36157190 0 0 - 43388628 0 0 - 52066354 0 0 - 62479625 0 0 - 74975550 0 0 - 89970660 0 0 - 107964792 0 0 - 129557750 0 0 - 155469300 0 0 - 186563160 0 0 - 223875792 0 0 - 268650950 0 0 - 322381140 0 0 - 386857368 0 0 - 464228842 0 0 - 557074610 0 0 - 668489532 0 0 - 802187438 0 0 - 962624926 0 0 - 1155149911 0 0 - 1386179893 0 0 - 1663415872 0 0 - 1996099046 0 0 - 2395318855 0 0 - 2874382626 0 - 3449259151 0 - 4139110981 0 - 4966933177 0 - 5960319812 0 - 7152383774 0 - 8582860529 0 - 10299432635 0 - 12359319162 0 - 14831182994 0 - 17797419593 0 - 21356903512 0 - 25628284214 0 - 30753941057 0 - 36904729268 0 - 44285675122 0 - 53142810146 0 - 63771372175 0 - 76525646610 0 - 91830775932 0 - 110196931118 0 - 132236317342 0 - 158683580810 0 - 190420296972 0 - 228504356366 0 - 274205227639 0 - 329046273167 0 - 394855527800 0 - 473826633360 0 - 568591960032 0 - 682310352038 0 - 818772422446 0 - 982526906935 0 - 1179032288322 0 - 1414838745986 0 - Estimated cardinality: 20196 - EncodingStats minTTL: 0 - EncodingStats minLocalDeletionTime: 1442880000 - EncodingStats minTimestamp: 1535025565275000 - KeyType: org.apache.cassandra.db.marshal.BytesType - ClusteringTypes: [org.apache.cassandra.db.marshal.UTF8Type] - StaticColumns: {C3:org.apache.cassandra.db.marshal.BytesType, C4:org.apache.cassandra.db.marshal.BytesType, C0:org.apache.cassandra.db.marshal.BytesType, C1:org.apache.cassandra.db.marshal.BytesType, C2:org.apache.cassandra.db.marshal.BytesType} - RegularColumns: {} - -Specify gc grace seconds -^^^^^^^^^^^^^^^^^^^^^^^^ - -To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn't access the schema directly, this is a way to more accurately estimate droppable tombstones -- for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds). - -ref: https://issues.apache.org/jira/browse/CASSANDRA-12208 - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4 - Estimated tombstone drop times: - 1536599100: 1 - 1536599640: 1 - 1536599700: 2 - - echo $(date +%s) - 1536602005 - - # if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 4.0E-5 - - # if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 9.61111111111111E-6 - - # if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 0.0 - -Explanation of each value printed above -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -=================================== ================================================================================ - Value Explanation -=================================== ================================================================================ -SSTable prefix of the sstable filenames related to this sstable -Partitioner partitioner type used to distribute data across nodes; defined in cassandra.yaml -Bloom Filter FP precision of Bloom filter used in reads; defined in the table definition -Minimum timestamp minimum timestamp of any entry in this sstable, in epoch microseconds -Maximum timestamp maximum timestamp of any entry in this sstable, in epoch microseconds -SSTable min local deletion time minimum timestamp of deletion date, based on TTL, in epoch seconds -SSTable max local deletion time maximum timestamp of deletion date, based on TTL, in epoch seconds -Compressor blank (-) by default; if not blank, indicates type of compression enabled on the table -TTL min time-to-live in seconds; default 0 unless defined in the table definition -TTL max time-to-live in seconds; default 0 unless defined in the table definition -First token lowest token and related key found in the sstable summary -Last token highest token and related key found in the sstable summary -Estimated droppable tombstones ratio of tombstones to columns, using configured gc grace seconds if relevant -SSTable level compaction level of this sstable, if leveled compaction (LCS) is used -Repaired at the timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds -Replay positions covered the interval of time and commitlog positions related to this sstable -totalColumnsSet number of cells in the table -totalRows number of rows in the table -Estimated tombstone drop times approximate number of rows that will expire, ordered by epoch seconds -Count Row Size Cell Count two histograms in two columns; one represents distribution of Row Size - and the other represents distribution of Cell Count -Estimated cardinality an estimate of unique values, used for compaction -EncodingStats* minTTL in epoch milliseconds -EncodingStats* minLocalDeletionTime in epoch seconds -EncodingStats* minTimestamp in epoch microseconds -KeyType the type of partition key, useful in reading and writing data - from/to storage; defined in the table definition -ClusteringTypes the type of clustering key, useful in reading and writing data - from/to storage; defined in the table definition -StaticColumns a list of the shared columns in the table -RegularColumns a list of non-static, non-key columns in the table -=================================== ================================================================================ -* For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way. - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableofflinerelevel.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableofflinerelevel.rst.txt deleted file mode 100644 index c031d2987..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableofflinerelevel.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableofflinerelevel ---------------------- - -When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-8301 - -The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):: - - L3 [][][][][][][][][][][] - L2 [ ][ ][ ][ ] - L1 [ ][ ] - L0 [ ] - -Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):: - - [][][] - [ ][][][] - [ ] - [ ] - ... - -Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below. - -If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableofflinerelevel [--dry-run]
- -Doing a dry run -^^^^^^^^^^^^^^^ - -Use the --dry-run option to see the current level distribution and predicted level after the change. - -Example:: - - sstableofflinerelevel --dry-run keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - Potential leveling: - L0=1 - L1=1 - -Running a relevel -^^^^^^^^^^^^^^^^^ - -Example:: - - sstableofflinerelevel keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - New leveling: - L0=1 - L1=1 - -Keyspace or table not found -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an invalid keyspace and/or table is provided, an exception will be thrown. - -Example:: - - sstableofflinerelevel --dry-run keyspace evenlog - - Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog - at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96) - - - - - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablerepairedset.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstablerepairedset.rst.txt deleted file mode 100644 index ebacef335..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablerepairedset.rst.txt +++ /dev/null @@ -1,79 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablerepairedset ------------------- - -Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired. - -Note that running a repair (e.g., via nodetool repair) doesn't set the status of this metadata. Only setting the status of this metadata via this tool does. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5351 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablerepairedset --really-set [-f | ] - -=================================== ================================================================================ ---really-set required if you want to really set the status ---is-repaired set the repairedAt status to the last modified time ---is-unrepaired set the repairedAt status to 0 --f use a file containing a list of sstables as the input -=================================== ================================================================================ - -Set a lot of sstables to unrepaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many ways to do this programmatically. This way would likely include variables for the keyspace and table. - -Example:: - - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired % - -Set one to many sstables to repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice. - -Example:: - - nodetool repair keyspace1 standard1 - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired % - -Print metadata showing repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -sstablemetadata can be used to view the status set or unset using this command. - -Example: - - sstablerepairedset --really-set --is-repaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 1534443974000 - - sstablerepairedset --really-set --is-unrepaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 0 - -Using command in a script -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you know you ran repair 2 weeks ago, you can do something like the following:: - - sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablescrub.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstablescrub.rst.txt deleted file mode 100644 index 0bbda9f32..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablescrub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablescrub ------------- - -Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4321 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablescrub
- -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --m,--manifest-check only check and repair the leveled manifest, without actually scrubbing the sstables --n,--no-validate do not validate columns using column validator --r,--reinsert-overflowed-ttl Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 - with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows. --s,--skip-corrupted skip corrupt rows in counter tables --v,--verbose verbose output -=================================== ================================================================================ - -Basic Scrub -^^^^^^^^^^^ - -The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable. - -Example:: - - sstablescrub keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped - Checking leveled manifest - -Scrub without Validation -^^^^^^^^^^^^^^^^^^^^^^^^ -ref: https://issues.apache.org/jira/browse/CASSANDRA-9406 - -Use the --no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client. - -Example:: - - sstablescrub --no-validate keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned - -Skip Corrupted Counter Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5930 - -If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the --skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+. - -Example:: - - sstablescrub --skip-corrupted keyspace1 counter1 - -Dealing with Overflow Dates -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-14092 - -Using the option --reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow). - -Example:: - - sstablescrub --reinsert-overflowed-ttl keyspace1 counter1 - -Manifest Check -^^^^^^^^^^^^^^ - -As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata. - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablesplit.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstablesplit.rst.txt deleted file mode 100644 index 5386fa48b..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstablesplit.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablesplit ------------- - -Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4766 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablesplit - -=================================== ================================================================================ ---debug display stack traces --h, --help display this help message ---no-snapshot don't snapshot the sstables before splitting --s, --size maximum size in MB for the output sstables (default: 50) -=================================== ================================================================================ - -This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped. - -Split a File -^^^^^^^^^^^^ - -Split a large sstable into smaller sstables. By default, unless the option --no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - Pre-split sstables snapshotted into snapshot pre-split-1533144514795 - -Split Multiple Files -^^^^^^^^^^^^^^^^^^^^ - -Wildcards can be used in the filename portion of the command to split multiple files. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1* - -Attempt to Split a Small File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the file is already smaller than the split size provided, the sstable will not be split. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB) - No sstables needed splitting. - -Split a File into Specified Size -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default size used for splitting is 50MB. Specify another size with the --size option. The size is in megabytes (MB). Specify only the number, not the units. For example --size 50 is correct, but --size 50MB is not. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db - Pre-split sstables snapshotted into snapshot pre-split-1533144996008 - - -Split Without Snapshot -^^^^^^^^^^^^^^^^^^^^^^ - -By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the --no-snapshot option to skip it. - -Example:: - - sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db - -Note: There is no output, but you can see the results in your file system. - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableupgrade.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableupgrade.rst.txt deleted file mode 100644 index 66386aca1..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableupgrade.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableupgrade --------------- - -Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version. - -The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableupgrade
[snapshot_name] - -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --k,--keep-source do not delete the source sstables -=================================== ================================================================================ - -Rewrite tables to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start with a set of sstables in one version of Cassandra:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables:: - - sstableupgrade keyspace1 standard1 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 13:48 backups - -rw-r--r-- 1 user wheel 292 Aug 22 13:48 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4599475 Aug 22 13:48 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:48 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 13:48 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330807 Aug 22 13:48 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 13:48 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 13:48 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 13:48 mc-2-big-TOC.txt - -Rewrite tables to the current Cassandra version, and keep tables in old version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Again, starting with a set of sstables in one version:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:: - - sstableupgrade keyspace1 standard1 -k - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 14:00 backups - -rw-r--r--@ 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r--@ 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r--@ 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r--@ 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r--@ 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r--@ 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r--@ 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r--@ 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -rw-r--r-- 1 user wheel 292 Aug 22 14:01 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4596370 Aug 22 14:01 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 14:01 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 14:01 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330801 Aug 22 14:01 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 14:01 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 14:01 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 14:01 mc-2-big-TOC.txt - - -Rewrite a snapshot to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Find the snapshot name:: - - nodetool listsnapshots - - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - ... - 1534962986979 keyspace1 standard1 5.85 MB 5.85 MB - -Then rewrite the snapshot:: - - sstableupgrade keyspace1 standard1 1534962986979 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete. - - - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableutil.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableutil.rst.txt deleted file mode 100644 index 30becd0e0..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableutil.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableutil ------------ - -List sstable files for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7066 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableutil
- -=================================== ================================================================================ --c, --cleanup clean up any outstanding transactions --d, --debug display stack traces --h, --help display this help message --o, --oplog include operation logs --t, --type all (list all files, final or temporary), tmp (list temporary files only), - final (list final files only), --v, --verbose verbose output -=================================== ================================================================================ - -List all sstables -^^^^^^^^^^^^^^^^^ - -The basic command lists the sstables associated with a given keyspace/table. - -Example:: - - sstableutil keyspace eventlog - Listing files... - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt - -List only temporary sstables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `tmp` will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra. - -List only final sstables -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `final` will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option. - -Include transaction logs -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -o option will include transaction logs in the listing, in the format above. - -Clean up sstables -^^^^^^^^^^^^^^^^^ - -Using the -c option removes any transactions left over from incomplete writes or compactions. - -From the 3.0 upgrade notes: - -New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix "add:" or "remove:". They also contain a special line "commit", only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the "add" prefix) and delete the old sstables (those with the "remove" prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first. - - - diff --git a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableverify.rst.txt b/src/doc/4.0-alpha3/_sources/tools/sstable/sstableverify.rst.txt deleted file mode 100644 index dad3f4487..000000000 --- a/src/doc/4.0-alpha3/_sources/tools/sstable/sstableverify.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableverify -------------- - -Check sstable(s) for errors or corruption, for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5791 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableverify
- -=================================== ================================================================================ ---debug display stack traces --e, --extended extended verification --h, --help display this help message --v, --verbose verbose output -=================================== ================================================================================ - -Basic Verification -^^^^^^^^^^^^^^^^^^ - -This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - -Extended Verification -^^^^^^^^^^^^^^^^^^^^^ - -During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time. - -Example:: - - root@DC1C1:/# sstableverify -e keyspace eventlog - WARN 14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully - -Corrupted File -^^^^^^^^^^^^^^ - -Corrupted files are listed if they are detected by the script. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db - -A similar (but less verbose) tool will show the suggested actions:: - - nodetool verify keyspace eventlog - error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair - - - diff --git a/src/doc/4.0-alpha3/_sources/troubleshooting/finding_nodes.rst.txt b/src/doc/4.0-alpha3/_sources/troubleshooting/finding_nodes.rst.txt deleted file mode 100644 index df5e16c93..000000000 --- a/src/doc/4.0-alpha3/_sources/troubleshooting/finding_nodes.rst.txt +++ /dev/null @@ -1,149 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Find The Misbehaving Nodes -========================== - -The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware). - -There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below. - -Client Logs and Errors ----------------------- -Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter's nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with. - -Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax :ref:`drivers `: - -* ``SyntaxError`` (**client**). This and other ``QueryValidationException`` - indicate that the client sent a malformed request. These are rarely server - issues and usually indicate bad queries. -* ``UnavailableException`` (**server**): This means that the Cassandra - coordinator node has rejected the query as it believes that insufficent - replica nodes are available. If many coordinators are throwing this error it - likely means that there really are (typically) multiple nodes down in the - cluster and you can identify them using :ref:`nodetool status - ` If only a single coordinator is throwing this error it may - mean that node has been partitioned from the rest. -* ``OperationTimedOutException`` (**server**): This is the most frequent - timeout message raised when clients set timeouts and means that the query - took longer than the supplied timeout. This is a *client side* timeout - meaning that it took longer than the client specified timeout. The error - message will include the coordinator node that was last tried which is - usually a good starting point. This error usually indicates either - aggressive client timeout values or latent server coordinators/replicas. -* ``ReadTimeoutException`` or ``WriteTimeoutException`` (**server**): These - are raised when clients do not specify lower timeouts and there is a - *coordinator* timeouts based on the values supplied in the ``cassandra.yaml`` - configuration file. They usually indicate a serious server side problem as - the default values are usually multiple seconds. - -Metrics -------- - -If you have Cassandra :ref:`metrics ` reporting to a -centralized location such as `Graphite `_ or -`Grafana `_ you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are: - -Errors -^^^^^^ -Cassandra refers to internode messaging errors as "drops", and provided a -number of :ref:`Dropped Message Metrics ` to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue. - -Latency -^^^^^^^ -For timeouts or latency related issues you can start with :ref:`Table -Metrics ` by comparing Coordinator level metrics e.g. -``CoordinatorReadLatency`` or ``CoordinatorWriteLatency`` with their associated -replica metrics e.g. ``ReadLatency`` or ``WriteLatency``. Issues usually show -up on the ``99th`` percentile before they show up on the ``50th`` percentile or -the ``mean``. While ``maximum`` coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, ``maximum`` replica latencies that correlate with increased ``99th`` -percentiles on coordinators can help narrow down the problem. - -There are usually three main possibilities: - -1. Coordinator latencies are high on all nodes, but only a few node's local - read latencies are high. This points to slow replica nodes and the - coordinator's are just side-effects. This usually happens when clients are - not token aware. -2. Coordinator latencies and replica latencies increase at the - same time on the a few nodes. If clients are token aware this is almost - always what happens and points to slow replicas of a subset of token - ranges (only part of the ring). -3. Coordinator and local latencies are high on many nodes. This usually - indicates either a tipping point in the cluster capacity (too many writes or - reads per second), or a new query pattern. - -It's important to remember that depending on the client's load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use ``TokenAware`` policies the same -node's coordinator and replica latencies will often increase together, but if -you just use normal ``DCAwareRoundRobin`` coordinator latencies can increase -with unrelated replica node's latencies. For example: - -* ``TokenAware`` + ``LOCAL_ONE``: should always have coordinator and replica - latencies on the same node rise together -* ``TokenAware`` + ``LOCAL_QUORUM``: should always have coordinator and - multiple replica latencies rise together in the same datacenter. -* ``TokenAware`` + ``QUORUM``: replica latencies in other datacenters can - affect coordinator latencies. -* ``DCAwareRoundRobin`` + ``LOCAL_ONE``: coordinator latencies and unrelated - replica node's latencies will rise together. -* ``DCAwareRoundRobin`` + ``LOCAL_QUORUM``: different coordinator and replica - latencies will rise together with little correlation. - -Query Rates -^^^^^^^^^^^ -Sometimes the :ref:`Table ` query rate metrics can help -narrow down load issues as "small" increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with ``BATCH`` writes, where a client may send a single ``BATCH`` -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator ``BATCH`` write turns into 450 -replica writes! This is why keeping ``BATCH``'s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a "single" -query. - - -Next Step: Investigate the Node(s) ----------------------------------- - -Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -:ref:`logs `, :ref:`nodetool `, and -:ref:`os tools `. If you are not able to login you may still -have access to :ref:`logs ` and :ref:`nodetool ` -remotely. diff --git a/src/doc/4.0-alpha3/_sources/troubleshooting/index.rst.txt b/src/doc/4.0-alpha3/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 79b46d636..000000000 --- a/src/doc/4.0-alpha3/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you. - -These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don't -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use. - -.. toctree:: - :maxdepth: 2 - - finding_nodes - reading_logs - use_nodetool - use_tools diff --git a/src/doc/4.0-alpha3/_sources/troubleshooting/reading_logs.rst.txt b/src/doc/4.0-alpha3/_sources/troubleshooting/reading_logs.rst.txt deleted file mode 100644 index 08f7d4da6..000000000 --- a/src/doc/4.0-alpha3/_sources/troubleshooting/reading_logs.rst.txt +++ /dev/null @@ -1,267 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _reading-logs: - -Cassandra Logs -============== -Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs. - -Common Log Files ----------------- -Cassandra has three main logs, the ``system.log``, ``debug.log`` and -``gc.log`` which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively. - -These logs by default live in ``${CASSANDRA_HOME}/logs``, but most Linux -distributions relocate logs to ``/var/log/cassandra``. Operators can tune -this location as well as what levels are logged using the provided -``logback.xml`` file. - -``system.log`` -^^^^^^^^^^^^^^ -This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log: - -* Uncaught exceptions. These can be very useful for debugging errors. -* ``GCInspector`` messages indicating long garbage collector pauses. When long - pauses happen Cassandra will print how long and also what was the state of - the system (thread state) at the time of that pause. This can help narrow - down a capacity issue (either not enough heap or not enough spare CPU). -* Information about nodes joining and leaving the cluster as well as token - metadata (data ownersip) changes. This is useful for debugging network - partitions, data movements, and more. -* Keyspace/Table creation, modification, deletion. -* ``StartupChecks`` that ensure optimal configuration of the operating system - to run Cassandra -* Information about some background operational tasks (e.g. Index - Redistribution). - -As with any application, looking for ``ERROR`` or ``WARN`` lines can be a -great first step:: - - $ # Search for warnings or errors in the latest system.log - $ grep 'WARN\|ERROR' system.log | tail - ... - - $ # Search for warnings or errors in all rotated system.log - $ zgrep 'WARN\|ERROR' system.log.* | less - ... - -``debug.log`` -^^^^^^^^^^^^^^ -This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal ``system.log``. Some -examples of activities logged to this log: - -* Information about compactions, including when they start, which sstables - they contain, and when they finish. -* Information about memtable flushes to disk, including when they happened, - how large the flushes were, and which commitlog segments the flush impacted. - -This log can be *very* noisy, so it is highly recommended to use ``grep`` and -other log analysis tools to dive deep. For example:: - - $ # Search for messages involving a CompactionTask with 5 lines of context - $ grep CompactionTask debug.log -C 5 - ... - - $ # Look at the distribution of flush tasks per keyspace - $ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c - 6 compaction_history: - 1 test_keyspace: - 2 local: - 17 size_estimates: - 17 sstable_activity: - - -``gc.log`` -^^^^^^^^^^^^^^ -The gc log is a standard Java GC log. With the default ``jvm.options`` -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:: - - $ grep stopped gc.log.0.current | tail - 2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds - 2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds - 2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds - 2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds - 2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds - 2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds - 2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds - 2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds - 2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds - 2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds - - -This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current | sort -k 1 - 2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds - 2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds - 2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds - 2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds - 2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds - 2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds - 2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds - 2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds - 2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds - 2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds - -In this case any client waiting on a query would have experienced a `56ms` -latency at 17:13:41. - -Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn't know could have disk latency, so the JVM safepoint logic -doesn't handle a blocking memory mapped read particularly well). - -Using these logs you can even get a pause distribution with something like -`histogram.py `_:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py - # NumSamples = 410293; Min = 0.00; Max = 11.49 - # Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498 - # each ∎ represents a count of 5470 - 0.0001 - 1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ - 1.1496 - 2.2991 [ 15]: - 2.2991 - 3.4486 [ 5]: - 3.4486 - 4.5981 [ 1]: - 4.5981 - 5.7475 [ 5]: - 5.7475 - 6.8970 [ 9]: - 6.8970 - 8.0465 [ 1]: - 8.0465 - 9.1960 [ 0]: - 9.1960 - 10.3455 [ 0]: - 10.3455 - 11.4949 [ 2]: - -We can see in this case while we have very good average performance something -is causing multi second JVM pauses ... In this case it was mostly safepoint -pauses caused by slow disks:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current| sort -k 1 - 2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds - 2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds - 2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds - 2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds - 2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds - 2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds - 2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds - 2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds - 2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds - 2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds - -Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as `GCViewer -`_ which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -`200ms` and GC throughput greater than `99%` (ymmv). - -Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues. - - -Getting More Information ------------------------- - -If the default logging levels are insuficient, ``nodetool`` can set higher -or lower logging levels for various packages and classes using the -``nodetool setlogginglevel`` command. Start by viewing the current levels:: - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - -Perhaps the ``Gossiper`` is acting up and we wish to enable it at ``TRACE`` -level for even more insight:: - - - $ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - org.apache.cassandra.gms.Gossiper TRACE - - $ grep TRACE debug.log | tail -2 - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating - heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ... - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local - heartbeat version 2341 greater than 2340 for 127.0.0.1:7000 - - -Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -``logback.xml``. - -.. code-block:: diff - - diff --git a/conf/logback.xml b/conf/logback.xml - index b2c5b10..71b0a49 100644 - --- a/conf/logback.xml - +++ b/conf/logback.xml - @@ -98,4 +98,5 @@ appender reference in the root level section below. - - - - + - - -Full Query Logger -^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -``nodetool`` and the logs are read with the provided ``bin/fqltool`` utility:: - - $ mkdir /var/tmp/fql_logs - $ nodetool enablefullquerylog --path /var/tmp/fql_logs - - # ... do some querying - - $ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail - Query time: 1530750927224 - Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = - 'system_views' AND table_name = 'sstable_tasks'; - Values: - - Type: single - Protocol version: 4 - Query time: 1530750934072 - Query: select * from keyspace1.standard1 ; - Values: - - $ nodetool disablefullquerylog - -Note that if you want more information than this tool provides, there are other -live capture options available such as :ref:`packet capture `. diff --git a/src/doc/4.0-alpha3/_sources/troubleshooting/use_nodetool.rst.txt b/src/doc/4.0-alpha3/_sources/troubleshooting/use_nodetool.rst.txt deleted file mode 100644 index 5072f85d1..000000000 --- a/src/doc/4.0-alpha3/_sources/troubleshooting/use_nodetool.rst.txt +++ /dev/null @@ -1,245 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-nodetool: - -Use Nodetool -============ - -Cassandra's ``nodetool`` allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see ``nodetool help`` -for all the commands), but briefly some of the most useful for troubleshooting: - -.. _nodetool-status: - -Cluster Status --------------- - -You can use ``nodetool status`` to assess status of the cluster:: - - $ nodetool status - - Datacenter: dc1 - ======================= - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - UN 127.0.1.1 4.69 GiB 1 100.0% 35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e r1 - UN 127.0.1.2 4.71 GiB 1 100.0% 752e278f-b7c5-4f58-974b-9328455af73f r2 - UN 127.0.1.3 4.69 GiB 1 100.0% 9dc1a293-2cc0-40fa-a6fd-9e6054da04a7 r3 - -In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all "up". The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -``nodetool status`` on multiple nodes in a cluster to see the full view. - -You can use ``nodetool status`` plus a little grep to see which nodes are -down:: - - $ nodetool status | grep -v '^UN' - Datacenter: dc1 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - Datacenter: dc2 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - DN 127.0.0.5 105.73 KiB 1 33.3% df303ac7-61de-46e9-ac79-6e630115fd75 r1 - -In this case there are two datacenters and there is one node down in datacenter -``dc2`` and rack ``r1``. This may indicate an issue on ``127.0.0.5`` -warranting investigation. - -.. _nodetool-proxyhistograms: - -Coordinator Query Latency -------------------------- -You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using ``nodetool proxyhistograms``:: - - $ nodetool proxyhistograms - Percentile Read Latency Write Latency Range Latency CAS Read Latency CAS Write Latency View Write Latency - (micros) (micros) (micros) (micros) (micros) (micros) - 50% 454.83 219.34 0.00 0.00 0.00 0.00 - 75% 545.79 263.21 0.00 0.00 0.00 0.00 - 95% 654.95 315.85 0.00 0.00 0.00 0.00 - 98% 785.94 379.02 0.00 0.00 0.00 0.00 - 99% 3379.39 2346.80 0.00 0.00 0.00 0.00 - Min 42.51 105.78 0.00 0.00 0.00 0.00 - Max 25109.16 43388.63 0.00 0.00 0.00 0.00 - -Here you can see the full latency distribution of reads, writes, range requests -(e.g. ``select * from keyspace.table``), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds). - -.. _nodetool-tablehistograms: - -Local Query Latency -------------------- - -If you know which table is having latency/error issues, you can use -``nodetool tablehistograms`` to get a better idea of what is happening -locally on a node:: - - $ nodetool tablehistograms keyspace table - Percentile SSTables Write Latency Read Latency Partition Size Cell Count - (micros) (micros) (bytes) - 50% 0.00 73.46 182.79 17084 103 - 75% 1.00 88.15 315.85 17084 103 - 95% 2.00 126.93 545.79 17084 103 - 98% 2.00 152.32 654.95 17084 103 - 99% 2.00 182.79 785.94 17084 103 - Min 0.00 42.51 24.60 14238 87 - Max 2.00 12108.97 17436.92 17084 103 - -This shows you percentile breakdowns particularly critical metrics. - -The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. ``SizeTieredCompactionStrategy`` typically has many more reads -per read than ``LeveledCompactionStrategy`` does for update heavy workloads. - -The second column shows you a latency breakdown of *local* write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments. - -The third column shows you a latency breakdown of *local* read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read. - -The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it's read. - -.. _nodetool-tpstats: - -Threadpool State ----------------- - -You can use ``nodetool tpstats`` to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:: - - $ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 2 0 12 0 0 - MiscStage 0 0 0 0 0 - CompactionExecutor 0 0 1940 0 0 - MutationStage 0 0 0 0 0 - GossipStage 0 0 10293 0 0 - Repair-Task 0 0 0 0 0 - RequestResponseStage 0 0 16 0 0 - ReadRepairStage 0 0 0 0 0 - CounterMutationStage 0 0 0 0 0 - MemtablePostFlush 0 0 83 0 0 - ValidationExecutor 0 0 0 0 0 - MemtableFlushWriter 0 0 30 0 0 - ViewMutationStage 0 0 0 0 0 - CacheCleanupExecutor 0 0 0 0 0 - MemtableReclaimMemory 0 0 30 0 0 - PendingRangeCalculator 0 0 11 0 0 - SecondaryIndexManagement 0 0 0 0 0 - HintsDispatcher 0 0 0 0 0 - Native-Transport-Requests 0 0 192 0 0 - MigrationStage 0 0 14 0 0 - PerDiskMemtableFlushWriter_0 0 0 30 0 0 - Sampler 0 0 0 0 0 - ViewBuildExecutor 0 0 0 0 0 - InternalResponseStage 0 0 0 0 0 - AntiEntropyStage 0 0 0 0 0 - - Message type Dropped Latency waiting in queue (micros) - 50% 95% 99% Max - READ 0 N/A N/A N/A N/A - RANGE_SLICE 0 0.00 0.00 0.00 0.00 - _TRACE 0 N/A N/A N/A N/A - HINT 0 N/A N/A N/A N/A - MUTATION 0 N/A N/A N/A N/A - COUNTER_MUTATION 0 N/A N/A N/A N/A - BATCH_STORE 0 N/A N/A N/A N/A - BATCH_REMOVE 0 N/A N/A N/A N/A - REQUEST_RESPONSE 0 0.00 0.00 0.00 0.00 - PAGED_RANGE 0 N/A N/A N/A N/A - READ_REPAIR 0 N/A N/A N/A N/A - -This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the ``RequestResponseState`` queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ``ALL`` ties up RF -``RequestResponseState`` threads whereas ``LOCAL_ONE`` only uses a single -thread in the ``ReadStage`` threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the ``concurrent_compactors`` or ``compaction_throughput`` options. - -The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation. - -.. _nodetool-compactionstats: - -Compaction State ----------------- - -As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS `page cache `_, -and can put a lot of load on your disk drives. There are great -:ref:`os tools ` to determine if this is the case, but often it's a -good idea to check if compactions are even running using -``nodetool compactionstats``:: - - $ nodetool compactionstats - pending tasks: 2 - - keyspace.table: 2 - - id compaction type keyspace table completed total unit progress - 2062b290-7f3a-11e8-9358-cd941b956e60 Compaction keyspace table 21848273 97867583 bytes 22.32% - Active compaction remaining time : 0h00m04s - -In this case there is a single compaction running on the ``keyspace.table`` -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass ``-H`` to get the units in a human readable format. - -Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don't take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra's ``concurrent_compactors`` -or ``compaction_throughput`` options. diff --git a/src/doc/4.0-alpha3/_sources/troubleshooting/use_tools.rst.txt b/src/doc/4.0-alpha3/_sources/troubleshooting/use_tools.rst.txt deleted file mode 100644 index b1347cc6d..000000000 --- a/src/doc/4.0-alpha3/_sources/troubleshooting/use_tools.rst.txt +++ /dev/null @@ -1,542 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-os-tools: - -Diving Deep, Use External Tools -=============================== - -Machine access allows operators to dive even deeper than logs and ``nodetool`` -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes. - -JVM Tooling ------------ -The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks. - -**NOTE**: There are two common gotchas with JVM tooling and Cassandra: - -1. By default Cassandra ships with ``-XX:+PerfDisableSharedMem`` set to prevent - long pauses (see ``CASSANDRA-9242`` and ``CASSANDRA-9483`` for details). If - you want to use JVM tooling you can instead have ``/tmp`` mounted on an in - memory ``tmpfs`` which also effectively works around ``CASSANDRA-9242``. -2. Make sure you run the tools as the same user as Cassandra is running as, - e.g. if the database is running as ``cassandra`` the tool also has to be - run as ``cassandra``, e.g. via ``sudo -u cassandra ``. - -Garbage Collection State (jstat) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you suspect heap pressure you can use ``jstat`` to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):: - - - jstat -gcutil 500ms - S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - 0.00 0.00 81.53 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.94 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - -In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies. - -Thread Information (jstack) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To get a point in time snapshot of exactly what Cassandra is doing, run -``jstack`` against the Cassandra PID. **Note** that this does pause the JVM for -a very brief period (<20ms).:: - - $ jstack > threaddump - - # display the threaddump - $ cat threaddump - ... - - # look at runnable threads - $grep RUNNABLE threaddump -B 1 - "Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000] - java.lang.Thread.State: RUNNABLE - -- - "Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - ... - - # Note that the nid is the Linux thread id - -Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on. - -Basic OS Tooling ----------------- -A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of: - -* CPU cores. For executing concurrent user queries -* CPU processing time. For query activity (data decompression, row merging, - etc...) -* CPU processing time (low priority). For background tasks (compaction, - streaming, etc ...) -* RAM for Java Heap. Used to hold internal data-structures and by default the - Cassandra memtables. Heap space is a crucial component of write performance - as well as generally. -* RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS - disk cache is a crucial component of read performance. -* Disks. Cassandra cares a lot about disk read latency, disk write throughput, - and of course disk space. -* Network latency. Cassandra makes many internode requests, so network latency - between nodes can directly impact performance. -* Network throughput. Cassandra (as other databases) frequently have the - so called "incast" problem where a small request (e.g. ``SELECT * from - foo.bar``) returns a massively large result set (e.g. the entire dataset). - In such situations outgoing bandwidth is crucial. - -Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource. - -High Level Resource Usage (top/htop) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra makes signifiant use of system resources, and often the very first -useful action is to run ``top`` or ``htop`` (`website -`_)to see the state of the machine. - -Useful things to look at: - -* System load levels. While these numbers can be confusing, generally speaking - if the load average is greater than the number of CPU cores, Cassandra - probably won't have very good (sub 100 millisecond) latencies. See - `Linux Load Averages `_ - for more information. -* CPU utilization. ``htop`` in particular can help break down CPU utilization - into ``user`` (low and normal priority), ``system`` (kernel), and ``io-wait`` - . Cassandra query threads execute as normal priority ``user`` threads, while - compaction threads execute as low priority ``user`` threads. High ``system`` - time could indicate problems like thread contention, and high ``io-wait`` - may indicate slow disk drives. This can help you understand what Cassandra - is spending processing resources doing. -* Memory usage. Look for which programs have the most resident memory, it is - probably Cassandra. The number for Cassandra is likely inaccurately high due - to how Linux (as of 2018) accounts for memory mapped file memory. - -.. _os-iostat: - -IO Usage (iostat) -^^^^^^^^^^^^^^^^^ -Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:: - - $ sudo iostat -xdm 2 - Linux 4.13.0-13-generic (hostname) 07/03/2018 _x86_64_ (8 CPU) - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.28 0.32 5.42 0.01 0.13 48.55 0.01 2.21 0.26 2.32 0.64 0.37 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 79.34 0.00 0.20 0.20 0.00 0.16 0.00 - sdc 0.34 0.27 0.76 0.36 0.01 0.02 47.56 0.03 26.90 2.98 77.73 9.21 1.03 - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.00 2.00 32.00 0.01 4.04 244.24 0.54 16.00 0.00 17.00 1.06 3.60 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - sdc 0.00 24.50 0.00 114.00 0.00 11.62 208.70 5.56 48.79 0.00 48.79 1.12 12.80 - - -In this case we can see that ``/dev/sdc1`` is a very slow drive, having an -``await`` close to 50 milliseconds and an ``avgqu-sz`` close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user. - -Important metrics to assess using iostat: - -* Reads and writes per second. These numbers will change with the workload, - but generally speaking the more reads Cassandra has to do from disk the - slower Cassandra read latencies are. Large numbers of reads per second - can be a dead giveaway that the cluster has insufficient memory for OS - page caching. -* Write throughput. Cassandra's LSM model defers user writes and batches them - together, which means that throughput to the underlying medium is the most - important write metric for Cassandra. -* Read latency (``r_await``). When Cassandra missed the OS page cache and reads - from SSTables, the read latency directly determines how fast Cassandra can - respond with the data. -* Write latency. Cassandra is less sensitive to write latency except when it - syncs the commit log. This typically enters into the very high percentiles of - write latency. - -Note that to get detailed latency breakdowns you will need a more advanced -tool such as :ref:`bcc-tools `. - -OS page Cache Usage -^^^^^^^^^^^^^^^^^^^ -As Cassandra makes heavy use of memory mapped files, the health of the -operating system's `Page Cache `_ is -crucial to performance. Start by finding how much available cache is in the -system:: - - $ free -g - total used free shared buff/cache available - Mem: 15 9 2 0 3 5 - Swap: 0 0 0 - -In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap. - -If you suspect that you are missing the OS page cache frequently you can use -advanced tools like :ref:`cachestat ` or -:ref:`vmtouch ` to dive deeper. - -Network Latency and Reliability -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Whenever Cassandra does writes or reads that involve other replicas, -``LOCAL_QUORUM`` reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ``ping`` and ``traceroute`` or most -effectively ``mtr``:: - - $ mtr -nr www.google.com - Start: Sun Jul 22 13:10:28 2018 - HOST: hostname Loss% Snt Last Avg Best Wrst StDev - 1.|-- 192.168.1.1 0.0% 10 2.0 1.9 1.1 3.7 0.7 - 2.|-- 96.123.29.15 0.0% 10 11.4 11.0 9.0 16.4 1.9 - 3.|-- 68.86.249.21 0.0% 10 10.6 10.7 9.0 13.7 1.1 - 4.|-- 162.141.78.129 0.0% 10 11.5 10.6 9.6 12.4 0.7 - 5.|-- 162.151.78.253 0.0% 10 10.9 12.1 10.4 20.2 2.8 - 6.|-- 68.86.143.93 0.0% 10 12.4 12.6 9.9 23.1 3.8 - 7.|-- 96.112.146.18 0.0% 10 11.9 12.4 10.6 15.5 1.6 - 9.|-- 209.85.252.250 0.0% 10 13.7 13.2 12.5 13.9 0.0 - 10.|-- 108.170.242.238 0.0% 10 12.7 12.4 11.1 13.0 0.5 - 11.|-- 74.125.253.149 0.0% 10 13.4 13.7 11.8 19.2 2.1 - 12.|-- 216.239.62.40 0.0% 10 13.4 14.7 11.5 26.9 4.6 - 13.|-- 108.170.242.81 0.0% 10 14.4 13.2 10.9 16.0 1.7 - 14.|-- 72.14.239.43 0.0% 10 12.2 16.1 11.0 32.8 7.1 - 15.|-- 216.58.195.68 0.0% 10 25.1 15.3 11.1 25.1 4.8 - -In this example of ``mtr``, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between ``200ms`` and ``3s`` of additional latency, so that -can be a common cause of latency issues. - -Network Throughput -^^^^^^^^^^^^^^^^^^ -As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is `iftop `_ which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ``ccm`` cluster:: - - $ # remove the -t for ncurses instead of pure text - $ sudo iftop -nNtP -i lo - interface: lo - IP address is: 127.0.0.1 - MAC address is: 00:00:00:00:00:00 - Listening on lo - # Host name (port/service if enabled) last 2s last 10s last 40s cumulative - -------------------------------------------------------------------------------------------- - 1 127.0.0.1:58946 => 869Kb 869Kb 869Kb 217KB - 127.0.0.3:9042 <= 0b 0b 0b 0B - 2 127.0.0.1:54654 => 736Kb 736Kb 736Kb 184KB - 127.0.0.1:9042 <= 0b 0b 0b 0B - 3 127.0.0.1:51186 => 669Kb 669Kb 669Kb 167KB - 127.0.0.2:9042 <= 0b 0b 0b 0B - 4 127.0.0.3:9042 => 3.30Kb 3.30Kb 3.30Kb 845B - 127.0.0.1:58946 <= 0b 0b 0b 0B - 5 127.0.0.1:9042 => 2.79Kb 2.79Kb 2.79Kb 715B - 127.0.0.1:54654 <= 0b 0b 0b 0B - 6 127.0.0.2:9042 => 2.54Kb 2.54Kb 2.54Kb 650B - 127.0.0.1:51186 <= 0b 0b 0b 0B - 7 127.0.0.1:36894 => 1.65Kb 1.65Kb 1.65Kb 423B - 127.0.0.5:7000 <= 0b 0b 0b 0B - 8 127.0.0.1:38034 => 1.50Kb 1.50Kb 1.50Kb 385B - 127.0.0.2:7000 <= 0b 0b 0b 0B - 9 127.0.0.1:56324 => 1.50Kb 1.50Kb 1.50Kb 383B - 127.0.0.1:7000 <= 0b 0b 0b 0B - 10 127.0.0.1:53044 => 1.43Kb 1.43Kb 1.43Kb 366B - 127.0.0.4:7000 <= 0b 0b 0b 0B - -------------------------------------------------------------------------------------------- - Total send rate: 2.25Mb 2.25Mb 2.25Mb - Total receive rate: 0b 0b 0b - Total send and receive rate: 2.25Mb 2.25Mb 2.25Mb - -------------------------------------------------------------------------------------------- - Peak rate (sent/received/total): 2.25Mb 0b 2.25Mb - Cumulative (sent/received/total): 576KB 0B 576KB - ============================================================================================ - -In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring. - -Advanced tools --------------- -Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy. - -.. _use-bcc-tools: - -bcc-tools -^^^^^^^^^ -Most modern Linux distributions (kernels newer than ``4.1``) support `bcc-tools -`_ for diving deep into performance problems. -First install ``bcc-tools``, e.g. via ``apt`` on Debian:: - - $ apt install bcc-tools - -Then you can use all the tools that ``bcc-tools`` contains. One of the most -useful tools is ``cachestat`` -(`cachestat examples `_) -which allows you to determine exactly how many OS page cache hits and misses -are happening:: - - $ sudo /usr/share/bcc/tools/cachestat -T 1 - TIME TOTAL MISSES HITS DIRTIES BUFFERS_MB CACHED_MB - 18:44:08 66 66 0 64 88 4427 - 18:44:09 40 40 0 75 88 4427 - 18:44:10 4353 45 4308 203 88 4427 - 18:44:11 84 77 7 13 88 4428 - 18:44:12 2511 14 2497 14 88 4428 - 18:44:13 101 98 3 18 88 4428 - 18:44:14 16741 0 16741 58 88 4428 - 18:44:15 1935 36 1899 18 88 4428 - 18:44:16 89 34 55 18 88 4428 - -In this case there are not too many page cache ``MISSES`` which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node's "hot" dataset. If you don't have enough cache, ``MISSES`` will -be high and performance will be slow. If you have enough cache, ``MISSES`` will -be low and performance will be fast (as almost all reads are being served out -of memory). - -You can also measure disk latency distributions using ``biolatency`` -(`biolatency examples `_) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:: - - $ sudo /usr/share/bcc/tools/biolatency -D 10 - Tracing block device I/O... Hit Ctrl-C to end. - - - disk = 'sda' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 12 |****************************************| - 32 -> 63 : 9 |****************************** | - 64 -> 127 : 1 |*** | - 128 -> 255 : 3 |********** | - 256 -> 511 : 7 |*********************** | - 512 -> 1023 : 2 |****** | - - disk = 'sdc' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 0 | | - 32 -> 63 : 0 | | - 64 -> 127 : 41 |************ | - 128 -> 255 : 17 |***** | - 256 -> 511 : 13 |*** | - 512 -> 1023 : 2 | | - 1024 -> 2047 : 0 | | - 2048 -> 4095 : 0 | | - 4096 -> 8191 : 56 |***************** | - 8192 -> 16383 : 131 |****************************************| - 16384 -> 32767 : 9 |** | - -In this case most ios on the data drive (``sdc``) are fast, but many take -between 8 and 16 milliseconds. - -Finally ``biosnoop`` (`examples `_) -can be used to dive even deeper and see per IO latencies:: - - $ sudo /usr/share/bcc/tools/biosnoop | grep java | head - 0.000000000 java 17427 sdc R 3972458600 4096 13.58 - 0.000818000 java 17427 sdc R 3972459408 4096 0.35 - 0.007098000 java 17416 sdc R 3972401824 4096 5.81 - 0.007896000 java 17416 sdc R 3972489960 4096 0.34 - 0.008920000 java 17416 sdc R 3972489896 4096 0.34 - 0.009487000 java 17427 sdc R 3972401880 4096 0.32 - 0.010238000 java 17416 sdc R 3972488368 4096 0.37 - 0.010596000 java 17427 sdc R 3972488376 4096 0.34 - 0.011236000 java 17410 sdc R 3972488424 4096 0.32 - 0.011825000 java 17427 sdc R 3972488576 16384 0.65 - ... time passes - 8.032687000 java 18279 sdc R 10899712 122880 3.01 - 8.033175000 java 18279 sdc R 10899952 8192 0.46 - 8.073295000 java 18279 sdc R 23384320 122880 3.01 - 8.073768000 java 18279 sdc R 23384560 8192 0.46 - - -With ``biosnoop`` you see every single IO and how long they take. This data -can be used to construct the latency distributions in ``biolatency`` but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (``128kb``) of ``read_ahead_kb``. To improve point read -performance you may may want to decrease ``read_ahead_kb`` on fast data volumes -such as SSDs while keeping the a higher value like ``128kb`` value is probably -right for HDs. There are tradeoffs involved, see `queue-sysfs -`_ docs for more -information, but regardless ``biosnoop`` is useful for understanding *how* -Cassandra uses drives. - -.. _use-vmtouch: - -vmtouch -^^^^^^^ -Sometimes it's useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -`vmtouch `_. - -First install it:: - - $ git clone https://github.com/hoytech/vmtouch.git - $ cd vmtouch - $ make - -Then run it on the Cassandra data directory:: - - $ ./vmtouch /var/lib/cassandra/data/ - Files: 312 - Directories: 92 - Resident Pages: 62503/64308 244M/251M 97.2% - Elapsed: 0.005657 seconds - -In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn't really matter unless reads are missing the -cache (per e.g. :ref:`cachestat `), in which case having -additional memory may help read performance. - -CPU Flamegraphs -^^^^^^^^^^^^^^^ -Cassandra often uses a lot of CPU, but telling *what* it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -`CPU Flamegraphs `_ -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a "compaction problem dropping -tombstones" or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -`Java Flamegraphs -`_. - -Generally: - -1. Enable the ``-XX:+PreserveFramePointer`` option in Cassandra's - ``jvm.options`` configuation file. This has a negligible performance impact - but allows you actually see what Cassandra is doing. -2. Run ``perf`` to get some data. -3. Send that data through the relevant scripts in the FlameGraph toolset and - convert the data into a pretty flamegraph. View the resulting SVG image in - a browser or other image browser. - -For example just cloning straight off github we first install the -``perf-map-agent`` to the location of our JVMs (assumed to be -``/usr/lib/jvm``):: - - $ sudo bash - $ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/ - $ cd /usr/lib/jvm - $ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent - $ cd perf-map-agent - $ cmake . - $ make - -Now to get a flamegraph:: - - $ git clone --depth=1 https://github.com/brendangregg/FlameGraph - $ sudo bash - $ cd FlameGraph - $ # Record traces of Cassandra and map symbols for all java processes - $ perf record -F 49 -a -g -p -- sleep 30; ./jmaps - $ # Translate the data - $ perf script > cassandra_stacks - $ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \ - ./flamegraph.pl --color=java --hash > cassandra_flames.svg - - -The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser. - -.. _packet-capture: - -Packet Capture -^^^^^^^^^^^^^^ -Sometimes you have to understand what queries a Cassandra node is performing -*right now* to troubleshoot an issue. For these times trusty packet capture -tools like ``tcpdump`` and `Wireshark -`_ can be very helpful to dissect packet captures. -Wireshark even has native `CQL support -`_ although it sometimes has -compatibility issues with newer Cassandra protocol releases. - -To get a packet capture first capture some packets:: - - $ sudo tcpdump -U -s0 -i -w cassandra.pcap -n "tcp port 9042" - -Now open it up with wireshark:: - - $ wireshark cassandra.pcap - -If you don't see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> ``Decode as`` -> select CQL from the -dropdown for port 9042. - -If you don't want to do this manually or use a GUI, you can also use something -like `cqltrace `_ to ease obtaining and -parsing CQL packet captures. diff --git a/src/doc/4.0-alpha3/_static/ajax-loader.gif b/src/doc/4.0-alpha3/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/4.0-alpha3/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/basic.css b/src/doc/4.0-alpha3/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/4.0-alpha3/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_static/comment-bright.png b/src/doc/4.0-alpha3/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/4.0-alpha3/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/comment-close.png b/src/doc/4.0-alpha3/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/4.0-alpha3/_static/comment-close.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/comment.png b/src/doc/4.0-alpha3/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/4.0-alpha3/_static/comment.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/doctools.js b/src/doc/4.0-alpha3/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/4.0-alpha3/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/4.0-alpha3/_static/documentation_options.js b/src/doc/4.0-alpha3/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/4.0-alpha3/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/4.0-alpha3/_static/down-pressed.png b/src/doc/4.0-alpha3/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/4.0-alpha3/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/down.png b/src/doc/4.0-alpha3/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/4.0-alpha3/_static/down.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/extra.css b/src/doc/4.0-alpha3/_static/extra.css deleted file mode 100644 index 715e2a850..000000000 --- a/src/doc/4.0-alpha3/_static/extra.css +++ /dev/null @@ -1,59 +0,0 @@ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/4.0-alpha3/_static/file.png b/src/doc/4.0-alpha3/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/4.0-alpha3/_static/file.png and /dev/null differ diff --git a/src/doc/4.0-alpha3/_static/jquery-3.2.1.js b/src/doc/4.0-alpha3/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/4.0-alpha3/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-
-

Gossip

-
-

Todo

-

todo

-
-
-
-

Failure Detection

-
-

Todo

-

todo

-
-
-
-

Token Ring/Ranges

-
-

Todo

-

todo

-
-
-
-

Replication

-

The replication strategy of a keyspace determines which nodes are replicas for a given token range. The two main -replication strategies are SimpleStrategy and NetworkTopologyStrategy.

-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each datacenter in the cluster. Even if your -cluster only uses a single datacenter, NetworkTopologyStrategy should be prefered over SimpleStrategy to make it easier -to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified per-DC, NetworkTopologyStrategy also attempts to choose -replicas within a datacenter from different racks. If the number of racks is greater than or equal to the replication -factor for the DC, each replica will be chosen from a different rack. Otherwise, each rack will hold at least one -replica, but some racks may hold more than one. Note that this rack-aware behavior has some potentially surprising -implications. For example, if there are not an even number of -nodes in each rack, the data load on the smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a new rack, it will be considered a replica for the entire ring. For this reason, many operators choose to -configure all nodes on a single “rack”.

-
-
-

Transient Replication

-

Transient replication allows you to configure a subset of replicas to only replicate data that hasn’t been incrementally -repaired. This allows you to decouple data redundancy from availability. For instance, if you have a keyspace replicated -at rf 3, and alter it to rf 5 with 2 transient replicas, you go from being able to tolerate one failed replica to being -able to tolerate two, without corresponding increase in storage usage. This is because 3 nodes will replicate all the data -for a given token range, and the other 2 will only replicate data that hasn’t been incrementally repaired.

-

To use transient replication, you first need to enable it in cassandra.yaml. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. You configure it by specifying replication factor -as <total_replicas>/<transient_replicas Both SimpleStrategy and NetworkTopologyStrategy support configuring transient -replication.

-

Transiently replicated keyspaces only support tables created with read_repair set to NONE and monotonic reads are not currently supported. -You also can’t use LWT, logged batches, and counters in 4.0. You will possibly never be able to use materialized views -with transiently replicated keyspaces and probably never be able to use 2i with them.

-

Transient replication is an experimental feature that may not be ready for production use. The expected audienced is experienced -users of Cassandra capable of fully validating a deployment of their particular application. That means being able check -that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, -configuration, operational practices, and availability requirements.

-

It is anticipated that 4.next will support monotonic reads with transient replication as well as LWT, logged batches, and -counters.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and availability through Consistency Levels. -Essentially, an operation’s consistency level specifies how many of the replicas need to respond to the coordinator in -order to consider the operation a success.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency level. The consistency level simply -controls how many responses the coordinator waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to enough replicas to satisfy the consistency -level, with one exception. Speculative retry may issue a redundant read request to an extra replica if the other replicas -have not responded within a specified time window.

-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels that are high enough to overlap, resulting in “strong” -consistency. This is typically expressed as W + R > RF, where W is the write consistency level, R is the -read consistency level, and RF is the replication factor. For example, if RF = 3, a QUORUM request will -require responses from at least two of the three replicas. If QUORUM is used for both writes and reads, at least -one of the replicas is guaranteed to participate in both the write and the read request, which in turn guarantees that -the latest write will be read. In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a weaker but -still useful guarantee: reads are guaranteed to see the latest write from within the same datacenter.

-

If this type of strong consistency isn’t required, lower consistency levels like ONE may be used to improve -throughput, latency, and availability.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/architecture/guarantees.html b/src/doc/4.0-alpha3/architecture/guarantees.html deleted file mode 100644 index cce28e137..000000000 --- a/src/doc/4.0-alpha3/architecture/guarantees.html +++ /dev/null @@ -1,115 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/architecture/index.html b/src/doc/4.0-alpha3/architecture/index.html deleted file mode 100644 index fb48af7bd..000000000 --- a/src/doc/4.0-alpha3/architecture/index.html +++ /dev/null @@ -1,130 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/architecture/overview.html b/src/doc/4.0-alpha3/architecture/overview.html deleted file mode 100644 index 36dd2e13d..000000000 --- a/src/doc/4.0-alpha3/architecture/overview.html +++ /dev/null @@ -1,115 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/architecture/storage_engine.html b/src/doc/4.0-alpha3/architecture/storage_engine.html deleted file mode 100644 index 8bb7fbcec..000000000 --- a/src/doc/4.0-alpha3/architecture/storage_engine.html +++ /dev/null @@ -1,294 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables.

-

All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the “commitlog_segment_size_in_mb” option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running “nodetool drain” before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup.

-
    -
  • commitlog_segment_size_in_mb: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
  • -
-

*NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*

-

Default Value: 32

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied.

-
    -
  • commitlog_sync: may be either “periodic” or “batch.”

    -
      -
    • batch: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait “commitlog_sync_batch_window_in_ms” milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason.

      -
        -
      • commitlog_sync_batch_window_in_ms: Time to wait between “batch” fsyncs
      • -
      -

      Default Value: 2

      -
    • -
    • periodic: In periodic mode, writes are immediately ack’ed, and the CommitLog is simply synced every “commitlog_sync_period_in_ms” milliseconds.

      -
        -
      • commitlog_sync_period_in_ms: Time to wait between “periodic” fsyncs
      • -
      -

      Default Value: 10000

      -
    • -
    -
  • -
-

Default Value: batch

-

* NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using “batch” mode, it is recommended to store commitlogs in a separate, dedicated device.

-
    -
  • commitlog_directory: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
  • -
-

Default Value: /var/lib/cassandra/commitlog

-
    -
  • commitlog_compression: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported.
  • -
-

(Default Value: (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
    -
  • commitlog_total_space_in_mb: Total space to use for commit logs on disk.
  • -
-

If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume.

-

Default Value: 8192

-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
-

SSTable Versions

-

This section was created using the following -gist -which utilized this original -source.

-

The version numbers, to date are:

-
-

Version 0

-
    -
  • b (0.7.0): added version to sstable filenames
  • -
  • c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings
  • -
  • d (0.7.0): row size in data component becomes a long instead of int
  • -
  • e (0.7.0): stores undecorated keys in data and index components
  • -
  • f (0.7.0): switched bloom filter implementations in data component
  • -
  • g (0.8): tracks flushed-at context in metadata component
  • -
-
-
-

Version 1

-
    -
  • h (1.0): tracks max client timestamp in metadata component
  • -
  • hb (1.0.3): records compression ration in metadata component
  • -
  • hc (1.0.4): records partitioner in metadata component
  • -
  • hd (1.0.10): includes row tombstones in maxtimestamp
  • -
  • he (1.1.3): includes ancestors generation in metadata component
  • -
  • hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782)
  • -
  • ia (1.2.0):
      -
    • column indexes are promoted to the index file
    • -
    • records estimated histogram of deletion times in tombstones
    • -
    • bloom filter (keys and columns) upgraded to Murmur3
    • -
    -
  • -
  • ib (1.2.1): tracks min client timestamp in metadata component
  • -
  • ic (1.2.5): omits per-row bloom filter of column names
  • -
-
-
-

Version 2

-
    -
  • ja (2.0.0):
      -
    • super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format)
    • -
    • tracks max local deletiontime in sstable metadata
    • -
    • records bloom_filter_fp_chance in metadata component
    • -
    • remove data size and column count from data file (CASSANDRA-4180)
    • -
    • tracks max/min column values (according to comparator)
    • -
    -
  • -
  • jb (2.0.1):
      -
    • switch from crc32 to adler32 for compression checksums
    • -
    • checksum the compressed data
    • -
    -
  • -
  • ka (2.1.0):
      -
    • new Statistics.db file format
    • -
    • index summaries can be downsampled and the sampling level is persisted
    • -
    • switch uncompressed checksums to adler32
    • -
    • tracks presense of legacy (local and remote) counter shards
    • -
    -
  • -
  • la (2.2.0): new file name format
  • -
  • lb (2.2.7): commit log lower bound included
  • -
-
-
-

Version 3

-
    -
  • ma (3.0.0):
      -
    • swap bf hash order
    • -
    • store rows natively
    • -
    -
  • -
  • mb (3.0.7, 3.7): commit log lower bound included
  • -
  • mc (3.0.8, 3.9): commit log intervals included
  • -
-
-
-

Example Code

-

The following example is useful for finding all sstables that do not match the “ib” SSTable version

-
find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots"
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/bugs.html b/src/doc/4.0-alpha3/bugs.html deleted file mode 100644 index 96297107d..000000000 --- a/src/doc/4.0-alpha3/bugs.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the cassandra Slack room.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Contributing to Cassandra section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/configuration/cassandra_config_file.html b/src/doc/4.0-alpha3/configuration/cassandra_config_file.html deleted file mode 100644 index a39bfc87e..000000000 --- a/src/doc/4.0-alpha3/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1949 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Replica factor is determined via the replication strategy used by the specified -keyspace.

-

Default Value: KEYSPACE

-
-
-

allocate_tokens_for_local_replication_factor

-

This option is commented out by default.

-

Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS.

-

Default Value: 3

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

network_authorizer

-

Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}.

-
    -
  • AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization.
  • -
  • CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllNetworkAuthorizer

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using.

-

The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down the node and kill the JVM, so the node can be replaced.
-
stop
-
shut down the node, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

commitlog_sync may be either “periodic”, “group”, or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed.

-

Default Value: 2

-
-
-

commitlog_sync_group_window_in_ms

-

This option is commented out by default.

-

group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes.

-

Default Value: 1000

-
-
-

commitlog_sync

-

the default option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

periodic_commitlog_sync_lag_block_in_ms

-

This option is commented out by default.

-

When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete.

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1:7000"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_space_in_mb

-

This option is commented out by default.

-

Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_frame_block_size_in_kb

-

This option is commented out by default.

-

If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed.

-

Default Value: 32

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_allow_older_protocols

-

Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored.

-

Default Value: true

-
-
-

native_transport_idle_timeout_in_ms

-

This option is commented out by default.

-

Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period.

-

Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side.

-

Idle connection timeouts are disabled by default.

-

Default Value: 60000

-
-
-

rpc_address

-

The address or interface to bind the native transport server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

concurrent_validations

-

This option is commented out by default.

-

Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default)

-

Default Value: 0

-
-
-

concurrent_materialized_view_builders

-

Number of simultaneous materialized view builder tasks to allow.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_entire_sstables

-

This option is commented out by default.

-

When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696.

-

Default Value: true

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms.

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms.

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

internode_application_send_queue_capacity_in_bytes

-

This option is commented out by default.

-

Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details.

-

The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000

-

The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000

-

The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000

-

Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received.

-

The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth.

-

The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_send_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_send_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

internode_application_receive_queue_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_receive_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_receive_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: before enabling this property make sure to ntp is installed -and the times are synchronized between the nodes.

-

Default Value: false

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

streaming_connections_per_host

-

This option is commented out by default.

-

Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files).

-

Default Value: 1

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html

-

NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
# set to true for allowing secure incoming connections
-enabled: false
-# If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port
-optional: false
-# if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used
-# during upgrade to 4.0; otherwise, set to false.
-enable_legacy_ssl_storage_port: false
-# on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true.
-internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client-to-server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

gc_warn_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature.

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

ideal_consistency_level

-

This option is commented out by default.

-

Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability.

-

Default Value: EACH_QUORUM

-
-
-

automatic_sstable_upgrade

-

This option is commented out by default.

-

Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version

-

Default Value: false

-
-
-

max_concurrent_automatic_sstable_upgrades

-

This option is commented out by default. -Limit the number of concurrent sstable upgrades

-

Default Value: 1

-
-
-

audit_logging_options

-

Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options.

-
-
-

full_query_logging_options

-

This option is commented out by default.

-

default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog

-
-
-

corrupted_tombstone_strategy

-

This option is commented out by default.

-

validate tombstones on reads and compaction -can be either “disabled”, “warn” or “exception”

-

Default Value: disabled

-
-
-

diagnostic_events_enabled

-

Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX.

-

Default Value: false

-
-
-

native_transport_flush_in_batches_legacy

-

This option is commented out by default.

-

Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.

-

Default Value: false

-
-
-

repaired_data_tracking_for_range_reads_enabled

-

Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don’t use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads

-

Default Value: false

-
-
-

repaired_data_tracking_for_partition_reads_enabled

-

Default Value: false

-
-
-

report_unconfirmed_repaired_data_mismatches

-

If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones.

-

Default Value: false

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-

enable_transient_replication

-

Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use.

-

Default Value: false

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/configuration/index.html b/src/doc/4.0-alpha3/configuration/index.html deleted file mode 100644 index aa95a611b..000000000 --- a/src/doc/4.0-alpha3/configuration/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/contactus.html b/src/doc/4.0-alpha3/contactus.html deleted file mode 100644 index 14c103281..000000000 --- a/src/doc/4.0-alpha3/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or Slack rooms.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

Slack

-

To chat with developers or users in real-time, join our rooms on ASF Slack:

-
    -
  • cassandra - for user questions and general discussions.
  • -
  • cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/appendices.html b/src/doc/4.0-alpha3/cql/appendices.html deleted file mode 100644 index 9837b781b..000000000 --- a/src/doc/4.0-alpha3/cql/appendices.html +++ /dev/null @@ -1,568 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/changes.html b/src/doc/4.0-alpha3/cql/changes.html deleted file mode 100644 index 67b7c36b2..000000000 --- a/src/doc/4.0-alpha3/cql/changes.html +++ /dev/null @@ -1,364 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.5

- -
-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

- -
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/ddl.html b/src/doc/4.0-alpha3/cql/ddl.html deleted file mode 100644 index 0ef8c7198..000000000 --- a/src/doc/4.0-alpha3/cql/ddl.html +++ /dev/null @@ -1,857 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-    AND durable_writes = false;
-
-
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
-

SimpleStrategy

-

A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -NetworkTopologyStrategy. SimpleStrategy supports a single mandatory argument:

- ------ - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'replication_factor'intallThe number of replicas to store per range
-
-
-

NetworkTopologyStrategy

-

A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options:

- ------ - - - - - - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'<datacenter>'intallThe number of replicas to store per range in -the provided datacenter.
'replication_factor'int4.0The number of replicas to use as a default -per datacenter if not specifically provided. -Note that this always defers to existing -definitions or explicit datacenter settings. -For example, to have three replicas per -datacenter, supply this with a value of 3.
-

Note that when ALTER ing keyspaces and supplying replication_factor, -auto-expansion will only add new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying replication_factor, -explicitly zero out the datacenter you want to have zero replicas.

-

An example of auto-expanding datacenters with two datacenters: DC1 and DC2:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true;
-
-
-

An example of auto-expanding and overriding a datacenter:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true;
-
-
-

An example that excludes a datacenter while using replication_factor:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ;
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
-
-
-

If transient replication has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format '<total_replicas>/<transient_replicas>'

-

For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-
-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records';
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, b, c)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage. It only exists for historical reason and is preserved for backward compatibility And as COMPACT -STORAGE cannot, as of Cassandra 4.0-alpha3, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
commentsimplenoneA free-form, human-readable comment.
speculative_retrysimple99PERCENTILESpeculative retry options.
additional_write_policysimple99PERCENTILESpeculative retry options.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
memtable_flush_period_in_mssimple0Time (in ms) before Cassandra flushes memtables to disk.
read_repairsimpleBLOCKINGSets read repair behavior (see below)
-

By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ONE, a quorum for QUORUM, and so on. -speculative_retry determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. additional_write_policy specifies the threshold at which -a cheap quorum write will be upgraded to include transient replicas. The following are legal values (case-insensitive):

-

This setting does not affect reads with consistency level ALL because they already query all replicas.

-

Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default 99PERCENTILE.

-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The default supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-

The compression options define if and how the sstables of the table are compressed. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression.
chunk_length_in_kb64On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read
crc_check_chance1.0When compression is enabled, each compressed block includes a checksum of -that block for the purpose of detecting disk bitrot and avoiding the -propagation of corruption to other replica. This option defines the -probability with which those checksums are checked during read. By default -they are always checked. Set to 0 to disable checksum checking and to 0.5 for -instance to check them every other read |
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-

The caching options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-

The read_repair options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back -in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of -replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. -Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement -is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it -is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a -batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-

The default setting. When read_repair is set to BLOCKING, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table';
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/definitions.html b/src/doc/4.0-alpha3/cql/definitions.html deleted file mode 100644 index b4191153b..000000000 --- a/src/doc/4.0-alpha3/cql/definitions.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term                 ::=  constant | literal | function_call | arithmetic_operation | type_hint | bind_marker
-literal              ::=  collection_literal | udt_literal | tuple_literal
-function_call        ::=  identifier '(' [ term (',' term)* ] ')'
-arithmetic_operation ::=  '-' term | term ('+' | '-' | '*' | '/' | '%') term
-type_hint            ::=  '(' cql_type `)` term
-bind_marker          ::=  '?' | ':' identifier
-
-

A term is thus one of:

- -
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/dml.html b/src/doc/4.0-alpha3/cql/dml.html deleted file mode 100644 index 49b3bec6e..000000000 --- a/src/doc/4.0-alpha3/cql/dml.html +++ /dev/null @@ -1,561 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/functions.html b/src/doc/4.0-alpha3/cql/functions.html deleted file mode 100644 index 534ef0a65..000000000 --- a/src/doc/4.0-alpha3/cql/functions.html +++ /dev/null @@ -1,706 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-

currentTimeUUID is an alias of now.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Datetime functions

-
-
Retrieving the current date/time
-

The following functions can be used to retrieve the date/time at the time where the function is invoked:

- ---- - - - - - - - - - - - - - - - - - - - -
Function nameOutput type
currentTimestamptimestamp
currentDatedate
currentTimetime
currentTimeUUIDtimeUUID
-

For example the last 2 days of data can be retrieved using:

-
SELECT * FROM myTable WHERE date >= currentDate() - 2d
-
-
-
-
-
Time conversion functions
-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/index.html b/src/doc/4.0-alpha3/cql/index.html deleted file mode 100644 index 33693d353..000000000 --- a/src/doc/4.0-alpha3/cql/index.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL.

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/indexes.html b/src/doc/4.0-alpha3/cql/indexes.html deleted file mode 100644 index 28413daee..000000000 --- a/src/doc/4.0-alpha3/cql/indexes.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/json.html b/src/doc/4.0-alpha3/cql/json.html deleted file mode 100644 index 0841872b8..000000000 --- a/src/doc/4.0-alpha3/cql/json.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/mvs.html b/src/doc/4.0-alpha3/cql/mvs.html deleted file mode 100644 index f990a8660..000000000 --- a/src/doc/4.0-alpha3/cql/mvs.html +++ /dev/null @@ -1,261 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

Note

-

By default, materialized views are built in a single thread. The initial build can be parallelized by -increasing the number of threads specified by the property concurrent_materialized_view_builders in -cassandra.yaml. This property can also be manipulated at runtime through both JMX and the -setconcurrentviewbuilders and getconcurrentviewbuilders nodetool commands.

-
-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-

MV Limitations

-
-

Note

-

Removal of columns not selected in the Materialized View (via UPDATE base SET unselected_column = null or -DELETE unselected_column FROM base) may shadow missed updates to other columns received by hints or repair. -For this reason, we advise against doing deletions on base columns not selected in views until this is -fixed on CASSANDRA-13826.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/operators.html b/src/doc/4.0-alpha3/cql/operators.html deleted file mode 100644 index 21abc9235..000000000 --- a/src/doc/4.0-alpha3/cql/operators.html +++ /dev/null @@ -1,301 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Arithmetic Operators" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Arithmetic Operators

-

CQL supports the following operators:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - -
OperatorDescription
- (unary)Negates operand
+Addition
-Substraction
*Multiplication
/Division
%Returns the remainder of a division
-
-

Number Arithmetic

-

All arithmetic operations are supported on numeric types or counters.

-

The return type of the operation will be based on the operand types:

- ------------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
left/righttinyintsmallintintbigintcounterfloatdoublevarintdecimal
tinyinttinyintsmallintintbigintbigintfloatdoublevarintdecimal
smallintsmallintsmallintintbigintbigintfloatdoublevarintdecimal
intintintintbigintbigintfloatdoublevarintdecimal
bigintbigintbigintbigintbigintbigintdoubledoublevarintdecimal
counterbigintbigintbigintbigintbigintdoubledoublevarintdecimal
floatfloatfloatfloatdoubledoublefloatdoubledecimaldecimal
doubledoubledoubledoubledoubledoubledoubledoubledecimaldecimal
varintvarintvarintvarintdecimaldecimaldecimaldecimaldecimaldecimal
decimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimal
-

*, / and % operators have a higher precedence level than + and - operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression.

-
-
-

Datetime Arithmetic

-

A duration can be added (+) or substracted (-) from a timestamp or a date to create a new -timestamp or date. So for instance:

-
SELECT * FROM myTable WHERE t = '2017-01-01' - 2d
-
-
-

will select all the records with a value of t which is in the last 2 days of 2016.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/security.html b/src/doc/4.0-alpha3/cql/security.html deleted file mode 100644 index dd92d75f5..000000000 --- a/src/doc/4.0-alpha3/cql/security.html +++ /dev/null @@ -1,743 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-                          | ACCESS TO DATACENTERS set_literal
-                          | ACCESS TO ALL DATACENTERS
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS;
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ACCESS TO ALL DATACENTERS can be used for -explicitness, but there’s no functional difference.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ACCESS TO ALL DATACENTERS clause.

-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-

Note

-

DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain -connected and will retain the ability to perform any database actions which do not require authorization. -However, if authorization is enabled, permissions of the dropped role are also revoked, -subject to the caching options configured in cassandra.yaml. -Should a dropped role be subsequently recreated and have new permissions or -roles granted to it, any client sessions still connected will acquire the newly granted -permissions and roles.

-
-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-

Because of their function in normal driver operations, certain tables cannot have their SELECT permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:

-
* `system_schema.keyspaces`
-* `system_schema.columns`
-* `system_schema.tables`
-* `system.local`
-* `system.peers`
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/triggers.html b/src/doc/4.0-alpha3/cql/triggers.html deleted file mode 100644 index b9091c738..000000000 --- a/src/doc/4.0-alpha3/cql/triggers.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/cql/types.html b/src/doc/4.0-alpha3/cql/types.html deleted file mode 100644 index 576817e7f..000000000 --- a/src/doc/4.0-alpha3/cql/types.html +++ /dev/null @@ -1,700 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 4.0-alpha3, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_conceptual.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_conceptual.html deleted file mode 100644 index dd77b3777..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_conceptual.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Conceptual Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Conceptual Data Modeling

-

First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra.

-

Let’s use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about.

-

For example, let’s use a domain that is easily understood and that -everyone can relate to: making hotel reservations.

-

The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances.

-

The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection.

-../_images/data_modeling_hotel_erd.png -

Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_logical.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_logical.html deleted file mode 100644 index 4ac54efdb..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_logical.html +++ /dev/null @@ -1,284 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Logical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Logical Data Modeling

-

Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model.

-

To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with _by_. For example, hotels_by_poi.

-

Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering.

-

The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads.

-

Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static.

-

Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models.

-

Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model.

-../_images/data_modeling_chebotko_logical.png -

Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as K for partition key -columns and C↑ or C↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support.

-
-

Hotel Logical Data Model

-

The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you’ll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access.

-../_images/data_modeling_hotel_logical.png -

Let’s explore the details of each of these tables.

-

The first query Q1 is to find hotels near a point of interest, so you’ll -call this table hotels_by_poi. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search.

-

You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column.

-

An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data.

-

Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the hotels_by_poi table, but you added -only those attributes that were required by the application workflow.

-

From the workflow diagram, you know that the hotels_by_poi table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -hotel_id from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called hotels.

-

Another option would have been to store a set of poi_names in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application.

-

Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -pois_by_hotel table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness.

-

At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the hotel_id as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the available_rooms_by_hotel_date table.

-

To support searching over a range, use clustering columns to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important.

-

The design of the available_rooms_by_hotel_date table is an instance -of the wide partition pattern. This -pattern is sometimes called the wide row pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query.

-

In order to round out the shopping portion of the data model, add the -amenities_by_room table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates.

-
-
-

Reservation Logical Data Model

-

Now let’s switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys.

-../_images/data_modeling_reservation_logical.png -

In order to satisfy Q6, the reservations_by_guest table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well.

-

Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on.

-

The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date.

-

Finally, you create a guests table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the guests table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example.

-
-
-

Patterns and Anti-Patterns

-

As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern.

-

The time series pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments.

-

The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application.

-

One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now tombstones that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance.

-

The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_physical.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_physical.html deleted file mode 100644 index a7d21de84..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_physical.html +++ /dev/null @@ -1,199 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Physical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Physical Data Modeling

-

Once you have a logical data model defined, creating the physical model -is a relatively simple process.

-

You walk through each of the logical model tables, assigning types to -each item. You can use any valid CQL data type, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design.

-

After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let’s cover the data -modeling process in more detail by working through an example.

-

Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table.

-../_images/data_modeling_chebotko_physical.png -

The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern.

-
-

Hotel Physical Data Model

-

Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -hotel keyspace to contain tables for hotel and availability -data, and a reservation keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns.

-

For the hotels table, use Cassandra’s text type to -represent the hotel’s id. For the address, create an -address user defined type. Use the text type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries.

-

While it would make sense to use the uuid type for attributes such -as the hotel_id, this document uses mostly text attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like “AZ123” or “NY229”. This example uses these values -for hotel_ids, while acknowledging they are not necessarily globally -unique.

-

You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these uuids as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type.

-

As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure:

-../_images/data_modeling_hotel_physical.png -

Note that the address type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the hotels and hotels_by_poi tables.

-

User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the address -user-defined type. This can reduce complexity in the design.

-

Remember that the scope of a UDT is the keyspace in which it is defined. -To use address in the reservation keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design.

-
-
-

Reservation Physical Data Model

-

Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you’re going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature.

-../_images/data_modeling_reservation_physical.png -

Note that the address type is reproduced in this keyspace and -guest_id is modeled as a uuid type in all of the tables.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_queries.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_queries.html deleted file mode 100644 index 7fbd43edd..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_queries.html +++ /dev/null @@ -1,170 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Application Queries" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Application Queries

-

Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following:

-
    -
  • Q1. Find hotels near a given point of interest.
  • -
  • Q2. Find information about a given hotel, such as its name and -location.
  • -
  • Q3. Find points of interest near a given hotel.
  • -
  • Q4. Find an available room in a given date range.
  • -
  • Q5. Find the rate and amenities for a room.
  • -
-

It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example.

-

Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases.

-

You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations:

-
    -
  • Q6. Lookup a reservation by confirmation number.
  • -
  • Q7. Lookup a reservation by hotel, date, and guest name.
  • -
  • Q8. Lookup all reservations by guest name.
  • -
  • Q9. View guest details.
  • -
-

All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries.

-../_images/data_modeling_hotel_queries.png -

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_rdbms.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_rdbms.html deleted file mode 100644 index 2a6b612d9..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_rdbms.html +++ /dev/null @@ -1,251 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "RDBMS Design" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

RDBMS Design

-

When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables.

-

The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation).

-../_images/data_modeling_hotel_relational.png -
-

Design Differences Between RDBMS and Cassandra

-

Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database.

-
-

No joins

-

You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead.

-
-
-

No referential integrity

-

Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available.

-
-
-

Denormalization

-

In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances.

-

A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems.

-

In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it.

-

Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as materialized views -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table.

-
-
-

Query-first design

-

Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true.

-

By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them.

-

Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS.

-
-
-

Designing for optimal storage

-

In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table.

-

A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance.

-
-
-

Sorting is a design decision

-

In an RDBMS, you can easily change the order in which records are -returned to you by using ORDER BY in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns.

-

In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the CREATE TABLE command. The CQL SELECT statement does support -ORDER BY semantics, but only in the order specified by the -clustering columns.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_refining.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_refining.html deleted file mode 100644 index 4f61e26b8..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_refining.html +++ /dev/null @@ -1,287 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Evaluating and Refining Data Models" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Evaluating and Refining Data Models

-

Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance.

-
-

Calculating Partition Size

-

The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit.

-

In order to calculate the size of partitions, use the following -formula:

-
-\[N_v = N_r (N_c - N_{pk} - N_s) + N_s\]
-

The number of values (or cells) in the partition (Nv) is equal to -the number of static columns (Ns) plus the product of the number -of rows (Nr) and the number of of values per row. The number of -values per row is defined as the number of columns (Nc) minus the -number of primary key columns (Npk) and static columns -(Ns).

-

The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast.

-

Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the available_rooms_by_hotel_date table. The table has -four columns total (Nc = 4), including three primary key columns -(Npk = 3) and no static columns (Ns = 0). Plugging these -values into the formula, the result is:

-
-\[N_v = N_r (4 - 3 - 0) + 0 = 1N_r\]
-

Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let’s assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel.

-

Since there is a partition for each hotel, the estimated number of rows -per partition is as follows:

-
-\[N_r = 100 rooms/hotel \times 730 days = 73,000 rows\]
-

This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you’ll see how to do shortly.

-

When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems.

-
-
-

Calculating Size on Disk

-

In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -St of a partition:

-
-\[S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) +\]
-
-\[N_v\times sizeOf\big(t_{avg}\big)\]
-

This is a bit more complex than the previous formula, but let’s break it -down a bit at a time. Let’s take a look at the notation first:

-
    -
  • In this formula, ck refers to partition key columns, -cs to static columns, cr to regular columns, and -cc to clustering columns.
  • -
  • The term tavg refers to the average number of bytes of -metadata stored per cell, such as timestamps. It is typical to use an -estimate of 8 bytes for this value.
  • -
  • You’ll recognize the number of rows Nr and number of values -Nv from previous calculations.
  • -
  • The sizeOf() function refers to the size in bytes of the CQL data -type of each referenced column.
  • -
-

The first term asks you to sum the size of the partition key columns. For -this example, the available_rooms_by_hotel_date table has a single -partition key column, the hotel_id, which is of type -text. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes.

-

The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes.

-

The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the date, which is 4 bytes, and the room_number, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean is_available, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB).

-

The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB.

-

Adding these terms together, you get a final estimate:

-
-\[Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB\]
-

This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage.

-

Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size.

-

Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster.

-
-
-

Breaking Up Large Partitions

-

As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both.

-

The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic.

-

Continuing to examine the available rooms example, if you add the date -column to the partition key for the available_rooms_by_hotel_date -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes.

-

Another technique known as bucketing is often used to break the data -into moderate-size partitions. For example, you could bucketize the -available_rooms_by_hotel_date table by adding a month column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -month column is partially duplicative of the date, it provides -a nice way of grouping related data in a partition that will not get -too large.

-../_images/data_modeling_hotel_bucketing.png -

If you really felt strongly about preserving a wide partition design, you -could instead add the room_id to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_schema.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_schema.html deleted file mode 100644 index abd0b581f..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_schema.html +++ /dev/null @@ -1,235 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Database Schema" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Database Schema

-

Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -hotel keyspace, using CQL’s comment feature to document the query -pattern supported by each table:

-
CREATE KEYSPACE hotel WITH replication =
-  {class: SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE hotel.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE hotel.hotels_by_poi (
-  poi_name text,
-  hotel_id text,
-  name text,
-  phone text,
-  address frozen<address>,
-  PRIMARY KEY ((poi_name), hotel_id) )
-  WITH comment = Q1. Find hotels near given poi
-  AND CLUSTERING ORDER BY (hotel_id ASC) ;
-
-CREATE TABLE hotel.hotels (
-  id text PRIMARY KEY,
-  name text,
-  phone text,
-  address frozen<address>,
-  pois set )
-  WITH comment = Q2. Find information about a hotel;
-
-CREATE TABLE hotel.pois_by_hotel (
-  poi_name text,
-  hotel_id text,
-  description text,
-  PRIMARY KEY ((hotel_id), poi_name) )
-  WITH comment = Q3. Find pois near a hotel;
-
-CREATE TABLE hotel.available_rooms_by_hotel_date (
-  hotel_id text,
-  date date,
-  room_number smallint,
-  is_available boolean,
-  PRIMARY KEY ((hotel_id), date, room_number) )
-  WITH comment = Q4. Find available rooms by hotel date;
-
-CREATE TABLE hotel.amenities_by_room (
-  hotel_id text,
-  room_number smallint,
-  amenity_name text,
-  description text,
-  PRIMARY KEY ((hotel_id, room_number), amenity_name) )
-  WITH comment = Q5. Find amenities for a room;
-
-
-

Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column poi_name. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL.

-

Similarly, here is the schema for the reservation keyspace:

-
CREATE KEYSPACE reservation WITH replication = {class:
-  SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE reservation.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE reservation.reservations_by_confirmation (
-  confirm_number text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  guest_id uuid,
-  PRIMARY KEY (confirm_number) )
-  WITH comment = Q6. Find reservations by confirmation number;
-
-CREATE TABLE reservation.reservations_by_hotel_date (
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((hotel_id, start_date), room_number) )
-  WITH comment = Q7. Find reservations by hotel and date;
-
-CREATE TABLE reservation.reservations_by_guest (
-  guest_last_name text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((guest_last_name), hotel_id) )
-  WITH comment = Q8. Find reservations by guest name;
-
-CREATE TABLE reservation.guests (
-  guest_id uuid PRIMARY KEY,
-  first_name text,
-  last_name text,
-  title text,
-  emails set,
-  phone_numbers list,
-  addresses map<text,
-  frozen<address>,
-  confirm_number text )
-  WITH comment = Q9. Find guest by ID;
-
-
-

You now have a complete Cassandra schema for storing data for a hotel -application.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/data_modeling_tools.html b/src/doc/4.0-alpha3/data_modeling/data_modeling_tools.html deleted file mode 100644 index 93988c251..000000000 --- a/src/doc/4.0-alpha3/data_modeling/data_modeling_tools.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Cassandra Data Modeling Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Data Modeling Tools

-

There are several tools available to help you design and -manage your Cassandra schema and build queries.

-
    -
  • Hackolade -is a data modeling tool that supports schema design for Cassandra and -many other NoSQL databases. Hackolade supports the unique concepts of -CQL such as partition keys and clustering columns, as well as data types -including collections and UDTs. It also provides the ability to create -Chebotko diagrams.
  • -
  • Kashlev Data Modeler is a Cassandra -data modeling tool that automates the data modeling methodology -described in this documentation, including identifying -access patterns, conceptual, logical, and physical data modeling, and -schema generation. It also includes model patterns that you can -optionally leverage as a starting point for your designs.
  • -
  • DataStax DevCenter is a tool for managing -schema, executing queries and viewing results. While the tool is no -longer actively supported, it is still popular with many developers and -is available as a free download. -DevCenter features syntax highlighting for CQL commands, types, and name -literals. DevCenter provides command completion as you type out CQL -commands and interprets the commands you type, highlighting any errors -you make. The tool provides panes for managing multiple CQL scripts and -connections to multiple clusters. The connections are used to run CQL -commands against live clusters and view the results. The tool also has a -query trace feature that is useful for gaining insight into the -performance of your queries.
  • -
  • IDE Plugins - There are CQL plugins available for several Integrated -Development Environments (IDEs), such as IntelliJ IDEA and Apache -NetBeans. These plugins typically provide features such as schema -management and query execution.
  • -
-

Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/data_modeling/index.html b/src/doc/4.0-alpha3/data_modeling/index.html deleted file mode 100644 index 3d56fd636..000000000 --- a/src/doc/4.0-alpha3/data_modeling/index.html +++ /dev/null @@ -1,141 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/ci.html b/src/doc/4.0-alpha3/development/ci.html deleted file mode 100644 index 7056d33d4..000000000 --- a/src/doc/4.0-alpha3/development/ci.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Jenkins CI Environment" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Jenkins CI Environment

-
-

About CI testing and Apache Cassandra

-

Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the dtest scripts written in Python. As outlined in Testing, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at builds.apache.org, running Jenkins.

-
-
-

Setting up your own Jenkins server

-

Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution.

-

Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment.

-
-

Required plugins

-

The following plugins need to be installed additionally to the standard plugins (git, ant, ..).

-

You can install any missing plugins through the install manager.

-

Go to Manage Jenkins -> Manage Plugins -> Available and install the following plugins and respective dependencies:

-
    -
  • Job DSL
  • -
  • Javadoc Plugin
  • -
  • description setter plugin
  • -
  • Throttle Concurrent Builds Plug-in
  • -
  • Test stability history
  • -
  • Hudson Post build task
  • -
-
-
-

Setup seed job

-

Config New Item

-
    -
  • Name it Cassandra-Job-DSL
  • -
  • Select Freestyle project
  • -
-

Under Source Code Management select Git using the repository: https://github.com/apache/cassandra-builds

-

Under Build, confirm Add build step -> Process Job DSLs and enter at Look on Filesystem: jenkins-dsl/cassandra_job_dsl_seed.groovy

-

Generated jobs will be created based on the Groovy script’s default settings. You may want to override settings by checking This project is parameterized and add String Parameter for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches).

-

When done, confirm “Save”

-

You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message “Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use”. Goto Manage Jenkins -> In-process Script Approval to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates.

-

Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label “cassandra”, once the job is to be run. Please make sure to make any executors available by selecting Build Executor Status -> Configure -> Add “cassandra” as label and save.

-

Executors need to have “JDK 1.8 (latest)” installed. This is done under Manage Jenkins -> Global Tool Configuration -> JDK Installations…. Executors also need to have the virtualenv package installed on their system.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/code_style.html b/src/doc/4.0-alpha3/development/code_style.html deleted file mode 100644 index 1ffc52f9e..000000000 --- a/src/doc/4.0-alpha3/development/code_style.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/dependencies.html b/src/doc/4.0-alpha3/development/dependencies.html deleted file mode 100644 index 742bf9295..000000000 --- a/src/doc/4.0-alpha3/development/dependencies.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Dependency Management" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Dependency Management

-

Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the Jenkins CI Environment and reported related issues on Jira/ML, in case of any project dependency changes.

-

As Cassandra is an Apache product, all included libraries must follow Apache’s software license requirements.

-
-

Required steps to add or update libraries

-
    -
  • Add or replace jar file in lib directory
  • -
  • Add or update lib/license files
  • -
  • Update dependencies in build.xml
      -
    • Add to parent-pom with correct version
    • -
    • Add to all-pom if simple Cassandra dependency (see below)
    • -
    -
  • -
-
-
-

POM file types

-
    -
  • parent-pom - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here.
  • -
  • build-deps-pom(-sources) + coverage-deps-pom - used by ant build compile target. Listed dependenices will be resolved and copied to build/lib/{jar,sources} by executing the maven-ant-tasks-retrieve-build target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution.
  • -
  • test-deps-pom - refered by maven-ant-tasks-retrieve-test to retrieve and save dependencies to build/test/lib. Exclusively used during JUnit test execution.
  • -
  • all-pom - pom for cassandra-all.jar that can be installed or deployed to public maven repos via ant publish
  • -
  • dist-pom - pom for tarball distribution (cassandra-{bin,src}.tar.gz) created by ant artifacts. Should be left as is, but needed for installing or deploying releases.
  • -
-
-
-

Troubleshooting and conflict resolution

-

Here are some useful commands that may help you out resolving conflicts.

-
    -
  • ant realclean - gets rid of the build directory, including build artifacts.
  • -
  • mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ant mvn-install.
  • -
  • rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/ - removes cached local Cassandra maven artifacts
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/documentation.html b/src/doc/4.0-alpha3/development/documentation.html deleted file mode 100644 index 587b40719..000000000 --- a/src/doc/4.0-alpha3/development/documentation.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Working on Documentation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Working on Documentation

-
-

How Cassandra is documented

-

The official Cassandra documentation lives in the project’s git repository. We use a static site generator, Sphinx, to create pages hosted at cassandra.apache.org. You’ll also find developer centric content about Cassandra internals in our retired wiki (not covered by this guide).

-

Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses reStructuredText for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at existing documents to get a better idea how we use reStructuredText to write our documents.

-

So how do you actually start making contributions?

-
-
-

GitHub based work flow

-

Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)

-

Follow these steps to contribute using GitHub. It’s assumed that you’re logged in with an existing account.

-
    -
  1. Fork the GitHub mirror of the Cassandra repository
  2. -
-../_images/docs_fork.png -
    -
  1. Create a new branch that you can use to make your edits. It’s recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work.
  2. -
-../_images/docs_create_branch.png -
    -
  1. Navigate to document sources doc/source to find the .rst file to edit. The URL of the document should correspond to the directory structure. New files can be created using the “Create new file” button:
  2. -
-../_images/docs_create_file.png -
    -
  1. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing .rst files to get a better idea what format elements to use.
  2. -
-../_images/docs_editor.png -

Make sure to preview added content before committing any changes.

-../_images/docs_preview.png -
    -
  1. Commit your work when you’re done. Make sure to add a short description of all your edits since the last time you committed before.
  2. -
-../_images/docs_commit.png -
    -
  1. Finally if you decide that you’re done working on your branch, it’s time to create a pull request!
  2. -
-../_images/docs_pr.png -

Afterwards the GitHub Cassandra mirror will list your pull request and you’re done. Congratulations! Please give us some time to look at your suggested changes before we get back to you.

-
-
-

Jira based work flow

-

Recommended for major changes

-

Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same contribution guides as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed.

-
-
-

Working on documents locally using Sphinx

-

Recommended for advanced editing

-

Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at doc/README.md. Setup is very easy (at least on OSX and Linux).

-
-
-

Notes for committers

-

Please feel free to get involved and merge pull requests created on the GitHub mirror if you’re a committer. As this is a read-only repository, you won’t be able to merge a PR directly on GitHub. You’ll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub.

-

You may use a git work flow like this:

-
git remote add github https://github.com/apache/cassandra.git
-git fetch github pull/<PR-ID>/head:<PR-ID>
-git checkout <PR-ID>
-
-
-

Now either rebase or squash the commit, e.g. for squashing:

-
git reset --soft origin/trunk
-git commit --author <PR Author>
-
-
-

Make sure to add a proper commit message including a “Closes #<PR-ID>” text to automatically close the PR.

-
-

Publishing

-

Details for building and publishing of the site at cassandra.apache.org can be found here.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/gettingstarted.html b/src/doc/4.0-alpha3/development/gettingstarted.html deleted file mode 100644 index acbc28290..000000000 --- a/src/doc/4.0-alpha3/development/gettingstarted.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Getting Started" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Getting Started

-
-

Initial Contributions

-
-
Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we’d suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work.
-
    -
  • Add to or update the documentation
  • -
  • Answer questions on the user list
  • -
  • Review and test a submitted patch
  • -
  • Investigate and fix a reported bug
  • -
  • Create unit tests and d-tests
  • -
-
-
-
-
-

Updating documentation

-

The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (Contributing Code Changes).

-
-
-

Answering questions on the user list

-

Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the community page for details on how to subscribe to the mailing list.

-
-
-

Reviewing and testing a submitted patch

-

Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in _development_how_to_review or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, “I tested this performance enhacement on our application’s standard production load test and found a 3% improvement.”)

-
-
-

Investigate and/or fix a reported bug

-

Often, the hardest work in fixing a bug is reproducing it. Even if you don’t have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (Contributing Code Changes).

-
-
-

Create unit tests and Dtests

-

Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See Testing and Contributing Code Changes for more detail.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/how_to_commit.html b/src/doc/4.0-alpha3/development/how_to_commit.html deleted file mode 100644 index 5f0e0f7c7..000000000 --- a/src/doc/4.0-alpha3/development/how_to_commit.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

-atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/how_to_review.html b/src/doc/4.0-alpha3/development/how_to_review.html deleted file mode 100644 index 9e07beef5..000000000 --- a/src/doc/4.0-alpha3/development/how_to_review.html +++ /dev/null @@ -1,179 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/ide.html b/src/doc/4.0-alpha3/development/ide.html deleted file mode 100644 index b1f6095c1..000000000 --- a/src/doc/4.0-alpha3/development/ide.html +++ /dev/null @@ -1,268 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-

-
-
-
-
-

Opening Cassandra in Apache NetBeans

-

Apache NetBeans is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans.

-
-

Open Cassandra as a Project (C* 4.0 and newer)

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Start Apache NetBeans
  2. -
  3. Open the NetBeans project from the ide/ folder of the checked out Cassandra directory using the menu item “Open Project…” in NetBeans’ File menu
  4. -
-

The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant build.xml script.

-
-
    -
  • Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu.
  • -
  • Profile Project is available via the Profile menu. In the opened Profiler tab, click the green “Profile” button.
  • -
  • Cassandra’s code style is honored in ide/nbproject/project.properties
  • -
-
-

The JAVA8_HOME system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute.

-
-

-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/index.html b/src/doc/4.0-alpha3/development/index.html deleted file mode 100644 index 27a0238eb..000000000 --- a/src/doc/4.0-alpha3/development/index.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contributing to Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- - -
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/patches.html b/src/doc/4.0-alpha3/development/patches.html deleted file mode 100644 index 10da3350d..000000000 --- a/src/doc/4.0-alpha3/development/patches.html +++ /dev/null @@ -1,274 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue marked as Low Hanging Fruit Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or Slack.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - - - - -
VersionPolicy
4.0Code freeze (see below)
3.11Critical bug fixes only
3.0Critical bug fixes only
2.2Critical bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

4.0 Code Freeze

-

Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance:

-
-
    -
  • Bug fixes
  • -
  • Measurable performance improvements
  • -
  • Changes not distributed as part of the release such as:
  • -
  • Testing related improvements and fixes
  • -
  • Build and infrastructure related changes
  • -
  • Documentation
  • -
-
-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below.

    -
    <One sentence description, usually Jira title and CHANGES.txt summary>
    -<Optional lengthier description>
    -patch by <Authors>; reviewed by <Reviewers> for CASSANDRA-#####
    -
    -
    -
  2. -
  3. When you’re happy with the result, create a patch:

    -
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/release_process.html b/src/doc/4.0-alpha3/development/release_process.html deleted file mode 100644 index e6e08e60a..000000000 --- a/src/doc/4.0-alpha3/development/release_process.html +++ /dev/null @@ -1,390 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Release Process" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Release Process

- -
-

-

-
-
-

Attention

-
-
WORK IN PROGRESS
-
    -
  • A number of these steps still have been finalised/tested.
  • -
  • The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org
  • -
-
-
-
-

The steps for Release Managers to create, vote and publish releases for Apache Cassandra.

-

While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release.

-
-

Prerequisites

-
-
Background docs
-
-
-
-

A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools.

-
-

Create and publish your GPG key

-

To create a GPG key, follow the guidelines. -Include your public key in:

-
https://dist.apache.org/repos/dist/release/cassandra/KEYS
-
-
-

Publish your GPG key in a PGP key server, such as MIT Keyserver.

-
-
-
-

Create Release Artifacts

-

Any committer can perform the following steps to create and call a vote on a proposed release.

-

Check that no open jira tickets are urgent and currently being worked on. -Also check with a PMC that there’s security vulnerabilities currently being worked on in private.

-
-

Perform the Release

-

Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh`
-
-# After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout,
-# on the branch/commit that we wish to tag for the tentative release along with version number to tag.
-# For example here <version-branch> might be `3.11` and <version> `3.11.3`
-cd ~/git/cassandra/
-git checkout cassandra-<version-branch>
-../cassandra-builds/cassandra-release/prepare_release.sh -v <version>
-
-
-

If successful, take note of the email text output which can be used in the next section “Call for a Vote”.

-

The prepare_release.sh script does not yet generate and upload the rpm distribution packages. -To generate and upload them do:

-
cd ~/git/cassandra-build
-docker build -f docker/centos7-image.docker docker/
-docker run --rm -v `pwd`/dist:/dist `docker images -f label=org.cassandra.buildenv=centos -q` /home/build/build-rpms.sh <version>-tentative
-rpmsign --addsign dist/*.rpm
-
-
-

For more information on the above steps see the cassandra-builds documentation. -The next step is to copy and commit these binaries to staging svnpubsub:

-
# FIXME the following commands is wrong while people.apache.org is still used instead of svnpubsub and dist.apache.org
-cd ~/git
-svn co https://dist.apache.org/repos/dist/dev/cassandra cassandra-dist-dev
-mkdir cassandra-dist-dev/<version>
-cp cassandra-build/dist/*.rpm cassandra-dist-dev/<version>/
-
-svn add cassandra-dist-dev/<version>
-svn ci cassandra-dist-dev/<version>
-
-
-

After committing the binaries to staging, increment the version number in Cassandra on the cassandra-<version-branch>

-
-
cd ~/git/cassandra/ -git checkout cassandra-<version-branch> -edit build.xml # update `<property name=”base.version” value=”…”/> ` -edit debian/changelog # add entry for new version -edit CHANGES.txt # add entry for new version -git commit -m “Update version to <next-version>” build.xml debian/changelog CHANGES.txt -git push
-
-
-
-

Call for a Vote

-

Fill out the following email template and send to the dev mailing list:

-
I propose the following artifacts for release as <version>.
-
-sha1: <git-sha>
-
-Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/<version>-tentative
-
-Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/org/apache/cassandra/apache-cassandra/<version>/
-
-Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/
-
-The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/
-
-The vote will be open for 72 hours (longer if needed).
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>-tentative
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>-tentative
-
-
-
-
-

Post-vote operations

-

Any PMC can perform the following steps to formalize and publish a successfully voted release.

-
-

Publish Artifacts

-

Run the following commands to publish the voted release artifacts:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# edit the variables at the top of `finish_release.sh`
-
-# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout,
-# on the tentative release tag that we wish to tag for the final release version number tag.
-cd ~/git/cassandra/
-git checkout <version>-tentative
-../cassandra-builds/cassandra-release/finish_release.sh -v <version> <staging_number>
-
-
-

If successful, take note of the email text output which can be used in the next section “Send Release Announcement”. -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout.

-
-
-

Promote Nexus Repository

-
-
    -
  • Login to Nexus repository again.
  • -
  • Click on “Staging” and then on the repository with id “cassandra-staging”.
  • -
  • Find your closed staging repository, right click on it and choose “Promote”.
  • -
  • Select the “Releases” repository and click “Promote”.
  • -
  • Next click on “Repositories”, select the “Releases” repository and validate that your artifacts exist as you expect them.
  • -
-
-
-
-

Sign and Upload Distribution Packages to Bintray

-

Run the following command:

-
cd ~/git
-# FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org
-svn mv https://dist.apache.org/repos/dist/dev/cassandra/<version> https://dist.apache.org/repos/dist/release/cassandra/
-
-# Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on
-svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat
-cd cassandra-dist-redhat/<abbreviated-version>x/
-createrepo .
-gpg --detach-sign --armor repodata/repomd.xml
-for f in `find repodata/ -name *.bz2`; do
-  gpg --detach-sign --armor $f;
-done
-
-svn co https://dist.apache.org/repos/dist/release/cassandra/<version> cassandra-dist-<version>
-cd cassandra-dist-<version>
-cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist-<version>
-
-
-
-
-

Update and Publish Website

-

See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate.

-
-
-

Release version in JIRA

-

Release the JIRA version.

-
-
    -
  • In JIRA go to the version that you want to release and release it.
  • -
  • Create a new version, if it has not been done before.
  • -
-
-
-
-

Update to Next Development Version

-

Edit and commit build.xml so the base.version property points to the next version.

-
- -
-

Send Release Announcement

-

Fill out the following email template and send to both user and dev mailing lists:

-
The Cassandra team is pleased to announce the release of Apache Cassandra version <version>.
-
-Apache Cassandra is a fully distributed database. It is the right choice
-when you need scalability and high availability without compromising
-performance.
-
- http://cassandra.apache.org/
-
-Downloads of source and binary distributions are listed in our download
-section:
-
- http://cassandra.apache.org/download/
-
-This version is <the first|a bug fix> release[1] on the <version-base> series. As always,
-please pay attention to the release notes[2] and let us know[3] if you
-were to encounter any problem.
-
-Enjoy!
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>
-[3]: https://issues.apache.org/jira/browse/CASSANDRA
-
-
-
-
-

Update Slack Cassandra topic

-
-
Update topic in cassandra Slack room
-
/topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don’t ask to ask
-
-
-
-

Tweet from @Cassandra

-

Tweet the new release, from the @Cassandra account

-
-
-

Delete Old Releases

-

As described in When to Archive. -Also check people.apache.org as previous release scripts used it.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/development/testing.html b/src/doc/4.0-alpha3/development/testing.html deleted file mode 100644 index 0e962c1e1..000000000 --- a/src/doc/4.0-alpha3/development/testing.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-

If you see an error like this:

-
Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found:
-org/apache/tools/ant/taskdefs/optional/junit/JUnitTask  using the classloader
-AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar]
-
-
-

You will need to install the ant-optional package since it contains the JUnitTask class.

-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

See Cassandra Stress

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/faq/index.html b/src/doc/4.0-alpha3/faq/index.html deleted file mode 100644 index 7621efdd3..000000000 --- a/src/doc/4.0-alpha3/faq/index.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair -full to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it. Note that you will need to run a full repair (-full) to make sure that already repaired -sstables are not skipped.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/genindex.html b/src/doc/4.0-alpha3/genindex.html deleted file mode 100644 index a6c8aeadc..000000000 --- a/src/doc/4.0-alpha3/genindex.html +++ /dev/null @@ -1,95 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/getting_started/configuring.html b/src/doc/4.0-alpha3/getting_started/configuring.html deleted file mode 100644 index babd4fa4b..000000000 --- a/src/doc/4.0-alpha3/getting_started/configuring.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the default configuration file present at ./conf/cassandra.yaml is enough, -you shouldn’t need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/getting_started/drivers.html b/src/doc/4.0-alpha3/getting_started/drivers.html deleted file mode 100644 index 3da2f8f2f..000000000 --- a/src/doc/4.0-alpha3/getting_started/drivers.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
- -
-

Elixir

- -
-
-

Dart

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/getting_started/index.html b/src/doc/4.0-alpha3/getting_started/index.html deleted file mode 100644 index 2bd2c2b3f..000000000 --- a/src/doc/4.0-alpha3/getting_started/index.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/getting_started/installing.html b/src/doc/4.0-alpha3/getting_started/installing.html deleted file mode 100644 index 7994af523..000000000 --- a/src/doc/4.0-alpha3/getting_started/installing.html +++ /dev/null @@ -1,198 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-
-

Prerequisites

-
    -
  • The latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • For using cqlsh, the latest version of Python 2.7. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Installation from binary tarball files

- -
tar -xzvf apache-cassandra-3.6-bin.tar.gz
-
-
-

The files will be extracted into apache-cassandra-3.6, you need to substitute 3.6 with the release number that you -have downloaded.

-
    -
  • Optionally add apache-cassandra-3.6\bin to your path.
  • -
  • Start Cassandra in the foreground by invoking bin/cassandra -f from the command line. Press “Control-C” to stop -Cassandra. Start Cassandra in the background by invoking bin/cassandra from the command line. Invoke kill pid -or pkill -f CassandraDaemon to stop Cassandra, where pid is the Cassandra process id, which you can find for -example by invoking pgrep -f CassandraDaemon.
  • -
  • Verify that Cassandra is running by invoking bin/nodetool status from the command line.
  • -
  • Configuration files are located in the conf sub-directory.
  • -
  • Since Cassandra 2.1, log and data directories are located in the logs and data sub-directories respectively. -Older versions defaulted to /var/log/cassandra and /var/lib/cassandra. Due to this, it is necessary to either -start Cassandra with root privileges or change conf/cassandra.yaml to use directories owned by the current user, -as explained below in the section on changing the location of directories.
  • -
-
-
-

Installation from Debian packages

-
    -
  • Add the Apache repository of Cassandra to /etc/apt/sources.list.d/cassandra.sources.list, for example for version -3.6:
  • -
-
echo "deb http://www.apache.org/dist/cassandra/debian 36x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-
-
-
    -
  • Add the Apache Cassandra repository keys:
  • -
-
curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-
-
-
    -
  • Update the repositories:
  • -
-
sudo apt-get update
-
-
-
    -
  • If you encounter this error:
  • -
-
GPG error: http://www.apache.org 36x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA
-
-
-

Then add the public key A278B781FE4B2BDA as follows:

-
sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA
-
-
-

and repeat sudo apt-get update. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to this link.

-
    -
  • Install Cassandra:
  • -
-
sudo apt-get install cassandra
-
-
-
    -
  • You can start Cassandra with sudo service cassandra start and stop it with sudo service cassandra stop. -However, normally the service will start automatically. For this reason be sure to stop it if you need to make any -configuration changes.
  • -
  • Verify that Cassandra is running by invoking nodetool status from the command line.
  • -
  • The default location of configuration files is /etc/cassandra.
  • -
  • The default location of log and data directories is /var/log/cassandra/ and /var/lib/cassandra.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/getting_started/querying.html b/src/doc/4.0-alpha3/getting_started/querying.html deleted file mode 100644 index 1eb52cfb5..000000000 --- a/src/doc/4.0-alpha3/getting_started/querying.html +++ /dev/null @@ -1,146 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/index.html b/src/doc/4.0-alpha3/index.html deleted file mode 100644 index 64a5ae761..000000000 --- a/src/doc/4.0-alpha3/index.html +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v4.0-alpha3

- -
This documentation is currently a work-in-progress and contains a number of TODO sections. - Contributions are welcome.
- -

Main documentation

- -
-
-
- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/4.0-alpha3/new/index.html b/src/doc/4.0-alpha3/new/index.html deleted file mode 100644 index 67335985b..000000000 --- a/src/doc/4.0-alpha3/new/index.html +++ /dev/null @@ -1,117 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "New Features in Apache Cassandra 4.0" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

New Features in Apache Cassandra 4.0

-

This section covers the new features in Apache Cassandra 4.0.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/new/java11.html b/src/doc/4.0-alpha3/new/java11.html deleted file mode 100644 index d87a7d240..000000000 --- a/src/doc/4.0-alpha3/new/java11.html +++ /dev/null @@ -1,348 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Support for Java 11" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Support for Java 11

-

In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions.

-

One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (CASSANDRA-9608). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0.

-

Note: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use.

-
-

Support Matrix

-

The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis.

-

Table 1 : Support Matrix for Java

- ----- - - - - - - - - - - - - - - -
 Java 8 (Run)Java 11 (Run)
Java 8 (Build)SupportedSupported
Java 11(Build)Not SupportedSupported
-

Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0.

-
-
-

Using Java 8 to Build

-

To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows:

-
$ sudo yum install java-1.8.0-openjdk-devel
-
-
-

Set JAVA_HOME and JRE_HOME environment variables in the shell bash script. First, open the bash script:

-
$ sudo vi ~/.bashrc
-
-
-

Set the environment variables including the PATH.

-
$ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies.

-
$ git clone https://github.com/apache/cassandra.git
-
-
-

If Cassandra is already running stop Cassandra with the following command.

-
[ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon
-
-
-

Build the source code from the cassandra directory, which has the build.xml build script. The Apache Ant uses the Java version set in the JAVA_HOME environment variable.

-
$ cd ~/cassandra
-$ ant
-
-
-

Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for CASSANDRA_HOME in the bash script. Also add the CASSANDRA_HOME/bin to the PATH variable.

-
$ export CASSANDRA_HOME=~/cassandra
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin
-
-
-

To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the CASSANDRA_HOME/bin directory, which is in the PATH env variable.

-
$ cassandra
-
-
-

The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet:

-
INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3-
-146.ec2.internal:7000:7001
-INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK
-64-Bit Server VM/11.0.3
-INFO  [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size:
-1004.000MiB/1004.000MiB
-
-
-

The following output indicates a single node Cassandra 4.0 cluster has started.

-
INFO  [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on
-address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl)
-...
-...
-INFO  [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any
-peers but continuing anyway since node is in its own seed list
-INFO  [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state
-INFO  [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip
-INFO  [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled,
-when pool is exhausted (max is 251.000MiB) it will allocate on heap
-INFO  [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto
-bootstrap because it is configured to be a seed node.
-...
-...
-INFO  [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring
-INFO  [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state
-jump to NORMAL
-
-
-
-
-

Using Java 11 to Build

-

If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command.

-
$ yum install java-11-openjdk-devel
-
-
-

Set the environment variables in the bash script for Java 11. The first command is to open the bash script.

-
$ sudo vi ~/.bashrc
-$ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

To build source code with Java 11 one of the following two options must be used.

-
-
    -
  1. -
    Include Apache Ant command-line option -Duse.jdk=11 as follows:
    -
    $ ant -Duse.jdk=11
    -
    -
    -
    -
    -
  2. -
  3. -
    Set environment variable CASSANDRA_USE_JDK11 to true:
    -
    $ export CASSANDRA_USE_JDK11=true
    -
    -
    -
    -
    -
  4. -
-
-

As an example, set the environment variable CASSANDRA_USE_JDK11 to true.

-
[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-
-

Or, set the command-line option.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true
-
-
-

The build output should include the following.

-
_build_java:
-    [echo] Compiling for Java 11
-...
-...
-build:
-
-_main-jar:
-         [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar
-...
-...
-_build-test:
-   [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes
-    [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes
-...
-...
-jar:
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar
-
-BUILD SUCCESSFUL
-Total time: 1 minute 3 seconds
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-
-
-

Common Issues

-

One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must
-be set when building from java 11
-Total time: 1 second
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-

The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-[ec2-user@ip-172-30-3-146 ~]$ cassandra
-...
-...
-Error: A JNI error has occurred, please check your installation and try again
-Exception in thread "main" java.lang.UnsupportedClassVersionError:
-org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of
-the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes
-class file versions up to 52.0
-  at java.lang.ClassLoader.defineClass1(Native Method)
-  at java.lang.ClassLoader.defineClass(ClassLoader.java:763)
-  at ...
-...
-
-
-

The CASSANDRA_USE_JDK11 variable or the command-line option -Duse.jdk11 cannot be used to build with Java 8. To demonstrate set JAVA_HOME to version 8.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-
-
-

Set the CASSANDRA_USE_JDK11=true or command-line option -Duse.jdk11=true. Subsequently, run Apache Ant to start the build. The build fails with error message listed.

-
[ec2-user@ip-172-30-3-146 ~]$ cd
-cassandra
-[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot
-be set when building from java 8
-
-Total time: 0 seconds
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/objects.inv b/src/doc/4.0-alpha3/objects.inv deleted file mode 100644 index cac97f56f..000000000 Binary files a/src/doc/4.0-alpha3/objects.inv and /dev/null differ diff --git a/src/doc/4.0-alpha3/operating/audit_logging.html b/src/doc/4.0-alpha3/operating/audit_logging.html deleted file mode 100644 index 87463ca6c..000000000 --- a/src/doc/4.0-alpha3/operating/audit_logging.html +++ /dev/null @@ -1,281 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Audit Logging" -doc-header-links: ' - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml.

-
    -
  • BinAuditLogger An efficient way to log events to file in a binary format.
  • -
  • FileAuditLogger Logs events to audit/audit.log file using slf4j logger.
  • -
-

Recommendation BinAuditLogger is a community recommended logger considering the performance

-
-

What does it capture

-

Audit logging captures following events

-
    -
  • Successful as well as unsuccessful login attempts.
  • -
  • All database commands executed via Native protocol (CQL) attempted or successfully executed.
  • -
-
-
-

Limitations

-

Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log.

-
-
-

What does it log

-

Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with | s to yield the final message.

-
-
    -
  • user: User name(if available)
  • -
  • host: Host IP, where the command is being executed
  • -
  • source ip address: Source IP address from where the request initiated
  • -
  • source port: Source port number from where the request initiated
  • -
  • timestamp: unix time stamp
  • -
  • type: Type of the request (SELECT, INSERT, etc.,)
  • -
  • category - Category of the request (DDL, DML, etc.,)
  • -
  • keyspace - Keyspace(If applicable) on which request is targeted to be executed
  • -
  • scope - Table/Aggregate name/ function name/ trigger name etc., as applicable
  • -
  • operation - CQL command being executed
  • -
-
-
-
-

How to configure

-

Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using nodetool.

-
-

cassandra.yaml configurations for AuditLog

-
-
    -
  • enabled: This option enables/ disables audit log
  • -
  • logger: Class name of the logger/ custom logger.
  • -
  • audit_logs_dir: Auditlogs directory location, if not set, default to cassandra.logdir.audit or cassandra.logdir + /audit/
  • -
  • included_keyspaces: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces
  • -
  • excluded_keyspaces: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except system, system_schema and system_virtual_schema
  • -
  • included_categories: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories
  • -
  • excluded_categories: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category
  • -
  • included_users: Comma separated list of users to be included in audit log, default - includes all users
  • -
  • excluded_users: Comma separated list of users to be excluded from audit log, default - excludes no user
  • -
-
-

List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE

-
-
-

NodeTool command to enable AuditLog

-

enableauditlog: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command.

-
nodetool enableauditlog
-
-
-
-

Options

-
-
--excluded-categories
-
Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
-
--excluded-keyspaces
-
Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used. -Please remeber that system, system_schema and system_virtual_schema are excluded by default, -if you are overwriting this option via nodetool, -remember to add these keyspaces back if you dont want them in audit logs
-
--excluded-users
-
Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
-
--included-categories
-
Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
-
--included-keyspaces
-
Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
-
--included-users
-
Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
-
--logger
-
Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
-
-
-
-
-

NodeTool command to disable AuditLog

-

disableauditlog: Disables AuditLog.

-
nodetool disableuditlog
-
-
-
-
-

NodeTool command to reload AuditLog filters

-

enableauditlog: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous loggername and updated filters

-

E.g.,

-
nodetool enableauditlog --loggername <Default/ existing loggerName> --included-keyspaces <New Filter values>
-
-
-
-
-
-

View the contents of AuditLog Files

-

auditlogviewer is the new tool introduced to help view the contents of binlog file in human readable text format.

-
auditlogviewer <path1> [<path2>...<pathN>] [options]
-
-
-
-

Options

-
-
-f,--follow
-
-
Upon reacahing the end of the log continue indefinitely
-
waiting for more records
-
-
-
-r,--roll_cycle
-
-
How often to roll the log file was rolled. May be
-
necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, -DAILY). Default HOURLY.
-
-
-
-h,--help
-
display this help message
-
-

For example, to dump the contents of audit log files on the console

-
auditlogviewer /logs/cassandra/audit
-
-
-
-
-

Sample output

-
LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1"
-
-
-
-
-
-

Configuring BinAuditLogger

-

To use BinAuditLogger as a logger in AuditLogging, set the logger to BinAuditLogger in cassandra.yaml under audit_logging_options section. BinAuditLogger can be futher configued using its advanced options in cassandra.yaml.

-
-

Adcanced Options for BinAuditLogger

-
-
block
-
Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to true so that AuditLog records wont be lost
-
max_queue_weight
-
Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to 256 * 1024 * 1024
-
max_log_size
-
Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to 16L * 1024L * 1024L * 1024L
-
roll_cycle
-
How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to "HOURLY"
-
-
-
-
-

Configuring FileAuditLogger

-

To use FileAuditLogger as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log

-
<!-- Audit Logging (FileAuditLogger) rolling file appender to audit.log -->
-<appender name="AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
-  <file>${cassandra.logdir}/audit/audit.log</file>
-  <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
-    <!-- rollover daily -->
-    <fileNamePattern>${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
-    <!-- each file should be at most 50MB, keep 30 days worth of history, but at most 5GB -->
-    <maxFileSize>50MB</maxFileSize>
-    <maxHistory>30</maxHistory>
-    <totalSizeCap>5GB</totalSizeCap>
-  </rollingPolicy>
-  <encoder>
-    <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-  </encoder>
-</appender>
-
-<!-- Audit Logging additivity to redirect audt logging events to audit/audit.log -->
-<logger name="org.apache.cassandra.audit" additivity="false" level="INFO">
-        <appender-ref ref="AUDIT"/>
-</logger>
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/backups.html b/src/doc/4.0-alpha3/operating/backups.html deleted file mode 100644 index bdc878ed8..000000000 --- a/src/doc/4.0-alpha3/operating/backups.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/bloom_filters.html b/src/doc/4.0-alpha3/operating/bloom_filters.html deleted file mode 100644 index b7b2ac547..000000000 --- a/src/doc/4.0-alpha3/operating/bloom_filters.html +++ /dev/null @@ -1,162 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/bulk_loading.html b/src/doc/4.0-alpha3/operating/bulk_loading.html deleted file mode 100644 index 177a27ec6..000000000 --- a/src/doc/4.0-alpha3/operating/bulk_loading.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/cdc.html b/src/doc/4.0-alpha3/operating/cdc.html deleted file mode 100644 index b26b7f904..000000000 --- a/src/doc/4.0-alpha3/operating/cdc.html +++ /dev/null @@ -1,194 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property cdc=true (either when creating the table or -altering it). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in cassandra.yaml. On segment fsync to disk, if CDC data is present anywhere in the segment a -<segment_name>_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word “COMPLETED” will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file.

-

We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable.

-

A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory.

-
-
-

Configuration

-
-

Enabling or disabling CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

Use a CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

If CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/compaction.html b/src/doc/4.0-alpha3/operating/compaction.html deleted file mode 100644 index c856a4ea1..000000000 --- a/src/doc/4.0-alpha3/operating/compaction.html +++ /dev/null @@ -1,521 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy). With TimeWindowCompactionStrategy -it is possible to remove the guarantee (not check for shadowing data) by enabling unsafe_aggressive_sstable_expiration.

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
-

Size Tiered Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
-
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
-
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
unsafe_aggressive_sstable_expiration (default: false)
-
Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially -risky option that can lead to data loss or deleted data re-appearing, going beyond what -unchecked_tombstone_compaction does for single sstable compaction. Due to the risk the jvm must also be -started with -Dcassandra.unsafe_aggressive_sstable_expiration=true.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled).

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/compression.html b/src/doc/4.0-alpha3/operating/compression.html deleted file mode 100644 index 0750bb25e..000000000 --- a/src/doc/4.0-alpha3/operating/compression.html +++ /dev/null @@ -1,195 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides four classes (LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
  • -
    compression_level is only applicable for ZstdCompressor and accepts values between -131072 and 22.
    -
    The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called -“ultra levels” and should be used with caution, as they require more memory. The default is 3.
    -
    -
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/hardware.html b/src/doc/4.0-alpha3/operating/hardware.html deleted file mode 100644 index 965b05dbb..000000000 --- a/src/doc/4.0-alpha3/operating/hardware.html +++ /dev/null @@ -1,191 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • m1.xlarge instances, which provide 1.6TB of local ephemeral spinning storage and sufficient RAM to run moderate -workloads
  • -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/hints.html b/src/doc/4.0-alpha3/operating/hints.html deleted file mode 100644 index 8d638e92a..000000000 --- a/src/doc/4.0-alpha3/operating/hints.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/index.html b/src/doc/4.0-alpha3/operating/index.html deleted file mode 100644 index 48c16eabb..000000000 --- a/src/doc/4.0-alpha3/operating/index.html +++ /dev/null @@ -1,225 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/metrics.html b/src/doc/4.0-alpha3/operating/metrics.html deleted file mode 100644 index c249775e0..000000000 --- a/src/doc/4.0-alpha3/operating/metrics.html +++ /dev/null @@ -1,1801 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorWriteLatencyTimerCoordinator write latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
BytesRepairedGauge<Long>Size of table data repaired on disk
BytesUnrepairedGauge<Long>Size of table data unrepaired on disk
BytesPendingRepairGauge<Long>Size of table data isolated for an ongoing incremental repair
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
SpeculativeFailedRetriesCounterNumber of speculative retries that failed to prevent a timeout
SpeculativeInsufficientReplicasCounterNumber of speculative retries that couldn’t be attempted due to lack of replicas
SpeculativeSampleLatencyNanosGauge<Long>Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
AnticompactionTimeTimerTime spent anticompacting before a consistent repair.
ValidationTimeTimerTime spent doing validation compaction during repair.
SyncTimeTimerTime spent doing streaming during repair.
BytesValidatedHistogramHistogram over the amount of bytes read during validation.
PartitionsValidatedHistogramHistogram over the number of partitions read during validation.
BytesAnticompactedCounterHow many bytes we anticompacted.
BytesMutatedAnticompactionCounterHow many bytes we avoided anticompacting because the sstable was fully contained in the repaired range.
MutatedAnticompactionGaugeGauge<Double>Ratio of bytes mutated vs total bytes repaired.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

Most of these metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
WriteFailedIdeaCLCounterNumber of writes that failed to achieve the configured ideal consistency level or 0 if none is configured
IdealCLWriteLatencyLatencyCoordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured
RepairTimeTimerTotal time spent as repair coordinator.
RepairPrepareTimeTimerTotal time spent preparing for repair.
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools path=<Path> scope=<ThreadPoolName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
MaxTasksQueuedGauge<Integer>The maximum number of tasks queued before a task get blocked.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
ViewBuildExecutorinternalPerforms materialized views initial build
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessage.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMessage scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

HintsService Metrics

-

Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintsService.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintsService name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
HintsSucceededMeterA meter of the hints successfully delivered
HintsFailedMeterA meter of the hints that failed deliver
HintsTimedOutMeterA meter of the hints that timed out
Hint_delaysHistogramHistogram of hint delivery delays (in milliseconds)
Hint_delays-<PeerIP>HistogramHistogram of hint delivery delays (in milliseconds) per peer
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsGauge<Integer>Number of clients connected to this nodes native protocol server
connectionsGauge<List<Map<String, String>>List of all connections and their state information
connectedNativeClientsByUserGauge<Map<String, Int>Number of connnective native clients by username
-
-
-

Batch Metrics

-

Metrics specifc to batch statements.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Batch.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Batch name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PartitionsPerCounterBatchHistogramDistribution of the number of partitions processed per counter batch
PartitionsPerLoggedBatchHistogramDistribution of the number of partitions processed per logged batch
PartitionsPerUnloggedBatchHistogramDistribution of the number of partitions processed per unlogged batch
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/read_repair.html b/src/doc/4.0-alpha3/operating/read_repair.html deleted file mode 100644 index e41a1b064..000000000 --- a/src/doc/4.0-alpha3/operating/read_repair.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/repair.html b/src/doc/4.0-alpha3/operating/repair.html deleted file mode 100644 index 5586bb500..000000000 --- a/src/doc/4.0-alpha3/operating/repair.html +++ /dev/null @@ -1,194 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Repair

-

Cassandra is designed to remain available if one of it’s nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren’t guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire.

-

These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes.

-
-

Incremental and Full Repairs

-

There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that’s been written since the previous incremental repair.

-

Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it’s important to understand that once an incremental repair marks data as repaired, it won’t -try to repair it again. This is fine for syncing up missed writes, but it doesn’t protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally.

-
-
-

Usage and Best Practices

-

Since repair can result in a lot of disk and network io, it’s not run automatically by Cassandra. It is run by the operator -via nodetool.

-

Incremental repair is the default and is run with the following command:

-
nodetool repair
-
-
-

A full repair can be run with the following command:

-
nodetool repair --full
-
-
-

Additionally, repair can be run on a single keyspace:

-
nodetool repair [options] <keyspace_name>
-
-
-

Or even on specific tables:

-
nodetool repair [options] <keyspace_name> <table1> <table2>
-
-
-

The repair command only repairs token ranges on the node being repaired, it doesn’t repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you’re running repair on, which will cause duplicate work if you run it -on every node. The -pr flag will only repair the “primary” ranges on a node, so you can repair your entire cluster by running -nodetool repair -pr on each node in a single datacenter.

-

The specific frequency of repair that’s right for your cluster, of course, depends on several factors. However, if you’re -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don’t want to run incremental repairs, a full repair every 5 days is a good place -to start.

-

At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays.

-
-
-

Other Options

-
-
-pr, --partitioner-range
-
Restricts repair to the ‘primary’ token ranges of the node being repaired. A primary range is just a token range for -which a node is the first replica in the ring.
-
-prv, --preview
-
Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints -the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, -add the --full flag to estimate a full repair.
-
-vd, --validate
-
Verifies that the repaired data is the same across all nodes. Similiar to --preview, this builds and compares merkle -trees of repaired data, but doesn’t do any streaming. This is useful for troubleshooting. If this shows that the repaired -data is out of sync, a full repair should be run.
-
-
-

See also

-

nodetool repair docs

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/security.html b/src/doc/4.0-alpha3/operating/security.html deleted file mode 100644 index f16d41268..000000000 --- a/src/doc/4.0-alpha3/operating/security.html +++ /dev/null @@ -1,474 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-

By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still:

-
    -
  • Craft internode messages to insert users into authentication schema
  • -
  • Craft internode messages to truncate or drop schema
  • -
  • Use tools such as sstableloader to overwrite system_auth tables
  • -
  • Attach to the cluster directly to capture write traffic
  • -
-

Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra’s -security features is crucial to configuring your cluster to meet your security needs.

-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-
-

SSL Certificate Hot Reloading

-

Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes.

-

Certificate Hot reloading may also be triggered using the nodetool reloadssl command. Use this if you want to Cassandra to -immediately notice the changed certificates.

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to yes. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/snitch.html b/src/doc/4.0-alpha3/operating/snitch.html deleted file mode 100644 index 3e5a6737c..000000000 --- a/src/doc/4.0-alpha3/operating/snitch.html +++ /dev/null @@ -1,178 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero, this will allow ‘pinning’ of replicas to hosts -in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region. Loads Region and Availability Zone information from the EC2 API. -The Region is treated as the datacenter, and the Availability Zone as the rack. Only private IPs are used, so this -will not work across multiple regions.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/operating/topo_changes.html b/src/doc/4.0-alpha3/operating/topo_changes.html deleted file mode 100644 index defc93f66..000000000 --- a/src/doc/4.0-alpha3/operating/topo_changes.html +++ /dev/null @@ -1,222 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in nodetool netstats.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344)

-

Once the bootstrapping is complete the node will be marked “UP”.

-
-

Note

-

If any of the following cases apply, you MUST run repair to make the replaced node consistent again, since -it missed ongoing writes during/prior to bootstrapping. The replacement timeframe refers to the period from when the -node initially dies to when a new node completes the replacement process.

-
    -
  1. The node is down for longer than max_hint_window_in_ms before being replaced.
  2. -
  3. You are replacing using the same IP address as the dead node and replacement takes longer than max_hint_window_in_ms.
  4. -
-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/plugins/index.html b/src/doc/4.0-alpha3/plugins/index.html deleted file mode 100644 index 32824a321..000000000 --- a/src/doc/4.0-alpha3/plugins/index.html +++ /dev/null @@ -1,117 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Third-Party Plugins" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Third-Party Plugins

-

Available third-party plugins for Apache Cassandra

-
-

CAPI-Rowcache

-

The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments.

-

The official page for the CAPI-Rowcache plugin contains further details how to build/run/download the plugin.

-
-
-

Stratio’s Cassandra Lucene Index

-

Stratio’s Lucene index is a Cassandra secondary index implementation based on Apache Lucene. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or Apache Solr, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed.

-

The official Github repository Cassandra Lucene Index contains everything you need to build/run/configure the plugin.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/search.html b/src/doc/4.0-alpha3/search.html deleted file mode 100644 index 56789334a..000000000 --- a/src/doc/4.0-alpha3/search.html +++ /dev/null @@ -1,105 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/searchindex.js b/src/doc/4.0-alpha3/searchindex.js deleted file mode 100644 index 1e24808c0..000000000 --- a/src/doc/4.0-alpha3/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/operators","cql/security","cql/triggers","cql/types","data_modeling/data_modeling_conceptual","data_modeling/data_modeling_logical","data_modeling/data_modeling_physical","data_modeling/data_modeling_queries","data_modeling/data_modeling_rdbms","data_modeling/data_modeling_refining","data_modeling/data_modeling_schema","data_modeling/data_modeling_tools","data_modeling/index","development/ci","development/code_style","development/dependencies","development/documentation","development/gettingstarted","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/release_process","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/querying","index","new/index","new/java11","operating/audit_logging","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","plugins/index","tools/cassandra_stress","tools/cqlsh","tools/index","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","tools/sstable/index","tools/sstable/sstabledump","tools/sstable/sstableexpiredblockers","tools/sstable/sstablelevelreset","tools/sstable/sstableloader","tools/sstable/sstablemetadata","tools/sstable/sstableofflinerelevel","tools/sstable/sstablerepairedset","tools/sstable/sstablescrub","tools/sstable/sstablesplit","tools/sstable/sstableupgrade","tools/sstable/sstableutil","tools/sstable/sstableverify","troubleshooting/finding_nodes","troubleshooting/index","troubleshooting/reading_logs","troubleshooting/use_nodetool","troubleshooting/use_tools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/operators.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/data_modeling_conceptual.rst","data_modeling/data_modeling_logical.rst","data_modeling/data_modeling_physical.rst","data_modeling/data_modeling_queries.rst","data_modeling/data_modeling_rdbms.rst","data_modeling/data_modeling_refining.rst","data_modeling/data_modeling_schema.rst","data_modeling/data_modeling_tools.rst","data_modeling/index.rst","development/ci.rst","development/code_style.rst","development/dependencies.rst","development/documentation.rst","development/gettingstarted.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/release_process.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/querying.rst","index.rst","new/index.rst","new/java11.rst","operating/audit_logging.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","plugins/index.rst","tools/cassandra_stress.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","tools/sstable/index.rst","tools/sstable/sstabledump.rst","tools/sstable/sstableexpiredblockers.rst","tools/sstable/sstablelevelreset.rst","tools/sstable/sstableloader.rst","tools/sstable/sstablemetadata.rst","tools/sstable/sstableofflinerelevel.rst","tools/sstable/sstablerepairedset.rst","tools/sstable/sstablescrub.rst","tools/sstable/sstablesplit.rst","tools/sstable/sstableupgrade.rst","tools/sstable/sstableutil.rst","tools/sstable/sstableverify.rst","troubleshooting/finding_nodes.rst","troubleshooting/index.rst","troubleshooting/reading_logs.rst","troubleshooting/use_nodetool.rst","troubleshooting/use_tools.rst"],objects:{},objnames:{},objtypes:{},terms:{"000kib":195,"000mib":52,"00t89":22,"011mib":195,"018kib":195,"01t02":206,"021kib":195,"028809z":192,"031mib":195,"03t04":22,"054mib":195,"055z":192,"056kib":195,"061kib":195,"062mib":195,"063kib":195,"064kib":195,"0665ae80b2d711e886c66d2c86545d91":193,"06t22":206,"077mib":195,"078kib":195,"081kib":195,"082kib":195,"090kib":195,"092mib":195,"096gib":203,"0974e5a0aa5811e8a0a06d2c86545d91":195,"099kib":195,"0ee8b91fdd0":207,"0h00m04":207,"0x0000000000000000":208,"0x0000000000000003":14,"0x00000004":13,"0x00007f829c001000":208,"0x00007f82d0856000":208,"0x00007f82e800e000":208,"0x00007f82e80cc000":208,"0x00007f82e80d7000":208,"0x00007f82e84d0800":208,"0x2a19":208,"0x2a29":208,"0x2a2a":208,"0x2a2c":208,"0x3a74":208,"100b":70,"100k":70,"100mb":6,"1024l":53,"105kib":195,"10mb":6,"10s":[71,208],"10x":[6,58],"115mib":195,"11e6":71,"11e8":207,"122kib":195,"128kb":208,"128mib":6,"128th":4,"12gb":60,"12h30m":22,"130mib":195,"142mib":199,"147mib":195,"14t00":206,"150kib":195,"155kib":195,"15m":63,"160mb":58,"162kib":195,"165kib":195,"167kb":208,"16l":53,"16mb":[44,58],"16th":6,"173kib":195,"176kib":195,"17t06":206,"184kb":208,"19t03":[157,199],"1mo":22,"1n_r":28,"1st":22,"200m":[206,208],"203mib":195,"2062b290":207,"20m":208,"20t20":192,"217kb":208,"217mib":195,"22z":192,"232mib":195,"23t06":206,"23z":192,"244m":208,"245mib":195,"247mib":195,"24h":22,"25005289beb2":192,"250m":6,"251m":208,"253mib":195,"256mb":6,"256th":6,"258mib":195,"25mb":208,"265kib":195,"270mib":195,"27t04":206,"280mib":195,"28t17":206,"295kib":195,"299kib":195,"29d":22,"29t00":206,"2cc0":207,"2e10":10,"2gb":60,"2nd":[6,11,67],"2xlarg":60,"300mib":195,"300s":6,"307kib":195,"30kb":208,"30s":6,"30t23":206,"314kib":195,"322kib":195,"325kib":195,"327e":71,"32gb":60,"32mb":[6,44],"331mib":195,"333kib":195,"33m":206,"348mib":195,"353mib":203,"3578d7de":192,"35ea8c9f":207,"361kib":195,"366b":208,"36x":48,"370mib":195,"378711z":192,"383b":208,"384z":192,"385b":208,"386kib":195,"387mib":195,"388mib":195,"392kib":195,"392mib":195,"394kib":195,"3f22a07b2bc6":192,"3ff3e5109f22":13,"3gb":[59,208],"3ms":208,"3rd":[6,63,67],"401mib":195,"406mib":195,"40a7":207,"40f3":13,"40fa":207,"40s":208,"410kib":195,"412kib":195,"416mib":203,"41b52700b4ed11e896476d2c86545d91":196,"423b":208,"423kib":195,"4248dc9d790e":192,"431kib":195,"43kb":208,"440kib":195,"443kib":195,"449mib":195,"452kib":195,"457mib":195,"458mib":195,"461mib":195,"465kib":195,"46e9":207,"476mib":195,"481mib":195,"482mib":199,"48d6":192,"4ae3":13,"4d40":192,"4f3438394e39374d3730":196,"4f58":207,"4kb":11,"4mib":6,"4xlarg":60,"500m":208,"501mib":195,"50kb":[6,208],"50m":208,"50mb":[6,53,58,200],"50th":204,"512mb":6,"512mib":6,"513kib":195,"521kib":195,"524kib":195,"536kib":195,"543mib":195,"545kib":195,"54kb":208,"550mib":195,"5573e5b09f14":13,"559kib":195,"561mib":195,"563kib":195,"563mib":195,"56m":206,"571kib":195,"576kb":208,"5850e9f0a63711e8a5c5091830ac5256":201,"591mib":195,"592kib":195,"5gb":53,"5kb":6,"5level":53,"5mb":58,"603kib":195,"606mib":195,"61111111111111e":196,"613mib":195,"619kib":195,"61de":207,"635kib":195,"6365332094dd11e88f324f9c503e4753":[194,197,199,200,202,203],"638mib":195,"640kib":195,"646mib":195,"64k":6,"64kb":59,"650b":208,"65c429e08c5a11e8939edf4f403979ef":[192,194],"65kb":208,"663kib":195,"665kib":195,"669kb":208,"684mib":195,"688kib":195,"690mib":195,"6e630115fd75":207,"6gb":207,"6ms":6,"6tb":60,"701mib":195,"715b":208,"718mib":195,"71b0a49":206,"725mib":195,"730kib":195,"732mib":195,"734mib":195,"736kb":208,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":37,"737mib":195,"738mib":195,"743kib":195,"744mib":195,"751mib":195,"752e278f":207,"75th":63,"771mib":195,"775mib":203,"780mib":195,"782kib":195,"783522z":192,"789z":192,"791mib":195,"793kib":195,"798mib":195,"79kb":208,"7f3a":207,"802kib":195,"812mib":195,"813kib":195,"814kib":195,"832mib":195,"835kib":195,"840kib":195,"843mib":195,"845b":208,"846kib":195,"848kib":195,"84fc":192,"861mib":195,"86400s":58,"869kb":208,"872kib":195,"877mib":195,"880mib":195,"882kib":195,"889mib":195,"892kib":195,"894mib":195,"89h4m48":22,"8gb":[60,208],"8th":[6,57],"903mib":195,"90th":63,"911kib":195,"920kib":195,"920mib":195,"9328455af73f":207,"938kib":195,"954kib":195,"957mib":195,"95ac6470":71,"95th":63,"965kib":195,"9695b790a63211e8a6fb091830ac5256":201,"974b":207,"975kib":195,"983kib":195,"98th":63,"993mib":195,"996kib":195,"99percentil":11,"99th":[63,204],"9dc1a293":207,"9e6054da04a7":207,"9gb":208,"9th":63,"\u00eatre":9,"abstract":[33,38],"boolean":[9,12,14,17,20,22,28,29,71],"break":[11,31,41,58,201,205,208],"byte":[4,6,9,13,22,28,63,80,98,134,183,195,207],"case":[4,6,10,11,12,13,14,16,17,18,22,24,26,27,28,34,37,38,41,43,44,55,60,66,68,70,71,195,206,207,208],"catch":[33,197],"class":[6,11,14,22,29,33,39,43,52,53,58,59,62,66,70,135,147,168,195,206],"default":[4,6,10,11,13,14,17,18,20,22,27,32,39,43,44,45,48,53,55,57,58,59,63,65,66,68,70,71,75,94,98,105,134,135,137,140,150,151,157,172,173,184,192,195,196,200,204,206,207,208],"enum":9,"export":[39,52,63,71,208],"final":[14,20,24,28,33,35,39,42,53,57,58,60,66,72,151,191,208],"float":[9,10,11,12,14,17,19,22,55,59],"function":[6,9,10,11,12,15,16,18,20,22,28,38,46,50,53,66,67,69,71,191],"goto":32,"import":[11,14,22,23,24,27,39,40,43,45,58,60,63,65,71,135,204,207,208],"int":[4,9,10,11,13,14,17,18,19,20,22,43,57,59,63],"long":[4,6,13,22,27,37,38,44,52,58,63,70,198,199,206,208],"new":[0,4,6,10,11,14,16,17,18,19,20,21,22,24,27,28,32,33,35,36,38,39,41,42,43,47,50,52,53,55,58,60,66,68,70,126,133,135,194,195,197,199,202,204],"null":[9,10,12,13,14,17,18,22,33,71],"public":[6,14,33,34,42,43,44,48,66,67],"return":[6,9,11,13,14,16,17,18,19,20,22,24,27,38,70,150,193,194,208],"short":[4,22,25,28,35],"static":[6,9,10,11,18,24,25,28,35,63,67,196],"super":[4,24,66,69,70],"switch":[4,6,10,20,24,39,44,62,63,66,67],"throw":[6,14,33,43,204],"transient":[6,11],"true":[6,11,12,17,20,22,27,28,39,44,52,53,57,58,66,68,71,132,135,201],"try":[6,11,24,26,27,33,34,39,41,44,52,53,58,65,150,195,207,208],"var":[4,6,33,48,192,193,194,195,196,197,198,199,200,201,202,203,206,208],"void":43,"while":[4,6,10,11,12,13,22,25,28,30,37,41,42,55,58,59,60,65,66,71,195,204,206,207,208],AES:6,AND:[9,11,13,14,18,20,29,66,70,71,206],AWS:60,Added:10,Adding:[6,11,20,22,28,44,50,62,66],And:[11,14,20,66],Are:38,Ave:22,BUT:33,But:[13,20,22,27,32,33,41,44,71],CAS:[6,207],CCS:208,CFs:[150,157],CLS:71,CMS:208,DCs:6,DNS:44,Doing:[10,72,191],EBS:60,For:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,21,22,23,24,25,26,28,36,41,42,43,44,45,48,49,53,58,60,65,66,67,70,71,195,196,197,200,204,206,207,208],GCs:6,HDs:208,Has:38,IDE:[30,36,40,50],IDEs:[30,39,40],IDs:[25,27,135,174],INTO:[6,9,11,13,14,17,22],IPs:[6,67,156,174],Ids:180,JKS:6,JPS:208,KBs:6,LCS:[11,196],LTS:52,NFS:60,NOT:[6,9,10,11,13,14,16,18,20,21,22],NTS:6,N_s:28,Not:[13,20,41,52,58,59],ONE:[0,6,11,63,70,71],One:[24,27,41,43,44,52,58,208],PFS:6,Pis:60,QPS:204,Such:22,THE:6,TLS:[6,62,195],That:[0,11,12,18,22,27,41,44,58,71,208],The:[0,4,6,8,9,10,11,12,14,16,18,19,20,21,22,23,24,25,26,27,28,29,30,32,33,35,36,37,39,41,42,43,44,45,48,49,50,52,55,57,59,60,63,65,66,67,68,69,70,71,75,78,83,85,91,95,101,104,105,108,113,117,119,121,126,133,135,137,141,142,148,150,157,160,161,168,173,174,175,182,184,187,188,190,194,195,196,197,199,200,201,202,205,206,207,208],Their:22,Then:[13,43,44,48,58,66,197,201,208],There:[6,10,11,12,13,14,22,25,27,28,30,39,41,43,44,58,63,65,66,70,198,200,204,207,208],These:[4,6,11,14,30,39,63,65,66,70,71,202,204,205,206,207,208],USE:[9,14,15,53],USING:[9,13,16,21,22,58],Use:[6,11,13,20,24,25,44,49,50,57,62,66,70,71,72,73,78,135,140,150,180,187,191,192,197,198,199,202,205],Used:[23,24,25,26,27,28,29,30,63,208],Useful:208,Uses:[6,17,62,67],Using:[11,13,35,43,44,51,66,72,191,192,195,199,202,206],WILL:6,WITH:[9,11,12,16,18,20,29,55,57,58,59,66,70,71],Will:[6,50,98,135,168,197],With:[6,13,17,44,58,65,68,74,206,208],Yes:44,_build:52,_build_java:52,_by_:24,_cache_max_entri:66,_cdc:57,_development_how_to_review:36,_if_:6,_main:52,_must_:6,_only_:206,_trace:[63,207],_udt:14,_update_interval_in_m:66,_use:14,_validity_in_m:66,_x86_64_:208,a278b781fe4b2bda:48,a6fd:207,abbrevi:42,abil:[14,20,30,44,59],abilityid:16,abl:[0,6,14,22,25,26,28,32,35,39,43,44,58,66,204,205],abort:32,about:[4,6,20,23,24,26,27,28,29,35,39,40,41,43,44,55,58,67,71,77,135,156,196,206,207,208],abov:[4,6,8,11,12,13,14,22,34,39,41,42,44,58,63,70,72,191,195,202,208],absenc:12,abstracttyp:22,ac79:207,acceler:69,accept:[0,6,10,11,12,13,17,41,43,55,59,68,93,135],access:[6,10,20,22,24,26,30,39,41,53,60,62,63,69,195,196,204,205,208],accident:[24,194],accompani:6,accomplish:26,accord:[4,6,24,44],accordingli:[6,14,44],account:[6,22,35,42,43,208],accru:[58,63],accumul:[6,58,63],accur:[6,24,28,44,55,68,156,196],accuraci:[55,137,184],acheiv:66,achiev:[6,58,63],achil:46,ack:[4,6],acknowledg:25,acoount:63,acquir:[20,63],across:[6,11,20,25,27,28,41,63,65,66,67,70,135,139,196,203],act:[26,28,206],action:[6,13,20,39,203,208],activ:[4,6,30,36,41,57,63,65,71,135,137,184,204,206,207,208],activetask:63,actor:66,actual:[4,6,13,21,28,30,33,35,38,42,44,48,53,58,65,67,70,150,199,208],acycl:20,adapt:[23,24,25,26,27,28,29,30],add:[0,6,9,10,11,22,24,25,28,32,35,36,37,38,40,41,42,45,48,50,52,53,58,65,66,70,197,202,206],addamsfamili:11,added:[0,4,6,10,11,14,19,24,35,38,57,58,200],adding:[6,13,14,24,28,38,60,71,197,203],addit:[0,6,9,11,13,19,20,22,24,25,26,27,28,39,41,45,53,58,60,63,66,71,206,208],addition:[11,13,32,58,65,70,206],additional_write_polici:11,address:[6,8,17,22,25,26,29,32,39,41,45,50,52,53,63,67,68,69,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,207,208],addsign:42,adher:10,adjac:58,adjust:[6,25,55],adler32:4,adv:48,advanc:[6,35,53,62,66,205],advantag:[27,60],advers:[44,207],advic:[41,44],advis:[6,12,18,22,44],aefb:192,af08:13,afd:22,affect:[11,24,38,41,44,58,157,199,204,208],afford:6,afraid:27,after:[5,6,10,11,12,13,14,16,17,18,24,25,27,39,41,42,44,52,57,58,60,62,63,66,67,71,197,198,201],afterward:[32,35,39,43],afunct:14,again:[6,25,41,42,52,58,65,68,71,198,201],against:[6,11,14,18,27,30,35,41,43,44,60,65,68,70,71,150,196,208],agent:[24,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,208],agentlib:39,aggreg:[6,9,10,13,15,18,20,53,63,71],aggress:204,ago:198,aid:12,aim:[6,206],akeyspac:14,alg:195,algorithm:[6,11,68,195,206],alia:[10,13,14,46],alias:[6,10,18],alic:20,align:33,aliv:6,all:[0,4,6,9,11,12,13,14,17,18,19,22,23,24,25,26,27,28,29,30,32,33,34,35,37,38,39,41,43,50,52,53,55,57,58,63,65,66,68,70,71,72,75,76,77,93,105,110,126,127,132,135,137,139,148,151,157,172,173,175,184,186,187,188,191,193,197,199,203,204,206,207,208],allmemtableslivedatas:63,allmemtablesoffheaps:63,allmemtablesonheaps:63,alloc:[6,44,52,57,60,63],allocate_tokens_for_keyspac:68,allow:[0,4,6,9,10,11,12,14,16,17,18,22,23,24,27,32,35,36,45,55,57,58,59,60,65,67,70,199,207,208],allowallauthent:[6,66],allowallauthor:[6,66],allowallinternodeauthent:6,allowallnetworkauthor:6,almost:[4,6,14,22,58,204,208],alon:33,along:[6,13,27,36,42,52,53,132,135,206],alongsid:[49,71],alpha3:[11,22,50],alphabet:33,alphanumer:[11,20],alreadi:[6,11,14,16,18,22,24,26,27,41,44,52,58,66,70,72,187,191,200],also:[0,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,28,30,32,35,39,41,42,43,44,45,52,53,57,58,60,63,66,68,71,105,188,201,202,206,207,208],alter:[0,9,10,15,17,28,44,55,57,58,59,66],alter_keyspace_stat:12,alter_role_stat:12,alter_table_instruct:11,alter_table_stat:12,alter_type_modif:22,alter_type_stat:[12,22],alter_user_stat:12,altern:[10,11,12,13,17,22,39,41,45,60,66,195],although:[6,27,28,41,70,206,208],alwai:[0,4,6,9,10,11,13,14,18,22,27,33,35,41,42,43,44,58,60,70,204,208],amazonaw:52,amen:[24,26,27,29],amend:37,amenities_by_room:[24,29],amenity_nam:29,amongst:11,amount:[6,11,13,22,28,39,41,43,44,58,59,60,63,65,68,71,150,208],amplif:[58,60],anaggreg:14,analogu:13,analysi:[24,28,205,206],analyt:[24,55],analyz:[25,28,43,208],ancestor:[4,202],ani:[0,4,6,10,11,12,13,14,17,18,20,21,22,24,25,27,30,32,34,35,37,38,39,41,42,43,45,48,50,52,58,60,63,65,66,68,70,71,73,126,135,140,157,172,192,196,199,201,202,205,206,207],annot:33,anonym:[12,22,53],anoth:[6,11,14,20,22,24,27,28,43,58,66,71,193,200,205,208],anotherarg:14,answer:[40,208],ant:[32,34,39,41,43,52],antclassload:43,anti:[6,22,31],anticip:[0,11],anticompact:[58,63,180,200],anticompactiontim:63,antientropystag:[63,207],antipattern:60,anymor:[37,58],anyon:33,anyth:58,anywai:[6,52],anywher:[13,57],apach:[2,5,6,7,14,21,30,33,34,35,37,38,40,41,42,43,44,47,48,52,53,58,59,63,66,69,72,192,193,194,195,196,197,198,199,200,202,203,206],apart:53,api:[6,8,17,49,67],appar:24,appear:[6,11,12,14,24,58,71],append:[4,22,24,37,53,60,63,71,206],appendic:[15,50],appendix:[12,15],appl:22,appli:[4,6,9,10,11,12,13,20,22,37,41,43,44,63,68,70,71],applic:[0,6,11,20,23,24,25,27,28,29,31,33,36,38,39,50,52,53,59,66,70,206],appreci:41,approach:[4,24,25,26,27,28,58,68],appropri:[6,11,20,22,38,41,42,66,67,68,206],approv:32,approxim:[28,58,63,196],apt:[48,208],arbitrari:[11,12,22,70],architectur:[25,44,50,69],archiv:[4,6,42,57,98],archive_command:98,archive_retri:98,area:[36,208],aren:[13,65],arg:[14,135,192,196,202],argnam:14,argnum:14,argument:[6,11,13,14,16,17,44,45,59,70,71,73,74,75,76,78,83,85,91,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190],arguments_declar:14,arguments_signatur:14,arithmet:[10,12,15,50],arithmetic_oper:12,armor:42,around:[6,20,24,27,58,60,67,208],arrai:[6,23,44],arriv:[6,41,44],arrow:26,artem:24,artifact:[26,34,39,40],artifici:11,asap:10,asc:[9,11,13,29],ascend:[11,13],ascii:[9,14,17,22],asdf:192,asf:[8,39,42],ask:[5,28,41,42,43,50,66],aspect:11,assassin:135,assertionerror:33,assertrow:43,assess:[207,208],assign:[6,13,20,25,27,44],assist:24,associ:[6,11,26,69,202,204],assum:[6,11,14,25,26,27,28,35,39,66,67,204,208],assumpt:66,asterisk:25,astyanax:46,async:[6,66],asynchron:[16,44,60],asynchroni:63,ata:27,atabl:14,atom:[11,13,21,37],atomiclong:63,attach:[36,41,66,69,208],attack:66,attemp:63,attempt:[0,6,11,16,18,20,22,24,44,53,58,63,65,66,71,72,151,191,201,206],attent:[33,34,41,42],attract:24,attribut:[23,24,25,53,58],audienc:0,audit:[6,27,84,94,135],audit_log:6,audit_logging_opt:53,audit_logs_dir:53,auditlog:94,auditlogview:53,audt:53,aug:201,auth:[6,53,195],authent:[10,53,62,71,195],authenticatedus:6,author:[9,20,22,35,41,62,70],authorizationproxi:66,authprovid:195,auto:[6,11,44,52,70,175],auto_bootstrap:68,autocompact:[58,85,95,135,175],autom:[30,33,36],automat:[6,13,14,16,32,35,39,43,44,48,58,65,66,68,70],avail:[0,6,8,11,14,20,23,24,25,26,27,28,29,30,32,39,41,42,43,48,52,53,57,65,66,67,69,71,75,105,148,157,168,187,204,206,208],availabil:6,available_rooms_by_hotel_d:[24,28,29],averag:[6,14,28,58,63,195,206,207,208],average_live_cells_per_slice_last_five_minut:183,average_s:11,average_tombstones_per_slice_last_five_minut:183,averagefin:14,averagest:14,avg:[28,195,208],avg_bucket_s:58,avgqu:208,avgrq:208,avoid:[6,11,12,27,33,38,41,55,58,60,63,66,67,71,188,195],awai:[39,68,71,207],await:208,awar:[0,6,11,41,55,59,156,204,207],awesom:70,axi:52,az123:25,azur:60,b124:13,b2c5b10:206,b70de1d0:13,b7a2:207,b7c5:207,b957:192,b9c5:207,back:[6,11,35,53,58,63,68,207],backend:6,background:[42,44,48,66,206,208],backlog:6,backpressur:6,backpressurestrategi:6,backup:[6,50,58,62,68,71,86,96,135,176,201,202],backward:[6,10,11,15,20,22],bad:[6,14,44,66,67,204,207],balanc:[6,24,68,204,207],banana:22,band:22,bandwidth:[6,69,208],bank:24,bar:[12,33,208],bardet:22,bare:6,base:[4,6,10,11,13,14,18,19,20,22,24,25,27,28,32,34,36,37,40,41,42,43,44,53,58,60,63,66,68,69,196,204,207],bash:[44,52,208],bashrc:52,basi:[6,32,44,59],basic:[6,11,24,25,58,60,70,72,191,202,205],batch:[0,4,6,9,11,15,27,43,50,62,70,71,204,208],batch_remov:[63,207],batch_stat:12,batch_stor:[63,207],batchlog:[13,63,107,135,152,158],batchtyp:70,bc9cf530b1da11e886c66d2c86545d91:199,be34:13,beatl:22,beca:71,becaus:[0,4,6,11,13,14,20,24,27,28,48,52,58,59,63,66,196,199,208],becom:[4,6,11,14,20,41,58,63,66,68],been:[0,4,6,10,11,13,14,15,20,22,24,38,41,42,52,58,60,65,66,157,199,202,204],befor:[0,4,6,10,11,13,14,16,19,21,22,25,26,28,32,35,36,39,40,42,43,46,53,58,63,66,67,68,70,71,98,173,191,192,193,194,195,196,197,198,199,200,201,202,203,204],begin:[9,12,13,24,26,27,43,66,71],beginn:41,begintoken:71,behalf:24,behav:6,behavior:[0,6,10,11,14,17,22,33,38,55,58,68,151,204],behind:[6,33,43,44,53,58],being:[0,4,6,11,13,17,22,24,28,38,42,43,44,53,55,58,63,65,68,197,206,207,208],believ:204,belong:[11,13,14,63,75,135],below:[6,11,12,13,17,20,22,23,24,25,26,27,28,34,41,48,53,58,63,71,81,195,197,204,206],benchmark:[60,70],benefici:58,benefit:[6,36,55,58,60,62,195],best:[6,24,27,29,30,35,42,43,58,62,66,67,204,208],best_effort:6,better:[6,33,35,36,41,58,60,195,207,208],between:[0,4,6,9,10,11,12,13,15,23,24,25,26,31,41,44,55,58,59,63,65,66,69,70,150,172,208],beyond:[6,58,71,188],big:[6,28,58,78,192,193,194,195,196,198,199,200,201,202,203],bigg:28,bigger:[11,58],biggest:14,bigint:[9,14,17,19,22],bigintasblob:14,bigtableread:[193,199,201,203],billion:28,bin:[34,39,48,49,52,71,206],binari:[14,42,47,53,66,87,97,135,177,206],binauditlogg:94,bind:[6,10,12,14,44],bind_mark:[12,13,18,22],binlog:53,biolat:208,biolog:11,biosnoop:208,birth:13,birth_year:13,bit:[14,17,22,28,34,41,44,52,59,60],bite:44,bitempor:69,bitrot:11,bitstr:9,black:6,blank:[6,33,44,196],blindli:44,blob:[9,10,12,17,22,50,59,70],blob_plain:42,blobasbigint:14,blobastyp:14,block:[4,6,11,37,45,53,58,60,63,66,72,98,191,206,207,208],blockedonalloc:6,blog:[6,13],blog_til:13,blog_titl:13,blogpost:70,bloom:[4,11,50,60,62,63,196],bloom_filter_false_posit:183,bloom_filter_false_ratio:183,bloom_filter_fp_ch:[4,11,55],bloom_filter_off_heap_memory_us:183,bloom_filter_space_us:183,bloomfilterdiskspaceus:63,bloomfilterfalseposit:63,bloomfilterfalseratio:63,bloomfilteroffheapmemoryus:63,blunt:66,bnf:12,bob:[13,20],bodi:[6,11,12,70],bog:23,boilerpl:40,book:[23,26],boolstyl:71,boost:6,boot:44,bootstrap:[0,6,50,52,59,62,63,66,135,140,168,197],born:13,both:[0,6,11,13,14,18,22,23,24,28,36,37,38,41,42,44,45,55,58,59,60,63,66,68,70,71,202,208],bottleneck:6,bottom:44,bound:[4,6,11,12,22,53,60,66],boundari:197,box:[6,26,66,67],brace:33,bracket:12,braket:12,branch:[32,35,37,38,39,42,43],branchnam:41,breadcrumb:204,breakdown:[207,208],breakpoint:39,breed:43,brendangregg:208,brief:208,briefli:207,bring:[6,52],brk:44,broadcast:6,broadcast_address:67,broken:[6,58,63,199],brows:[6,42,192,193,194,195,196,197,198,199,200,202,203],browser:[71,208],bucket:[28,58],bucket_high:58,bucket_low:58,buff:208,buffer:[4,6,52,57,63],bufferpool:[52,62],buffers_mb:208,bug:[10,37,40,42,43,44,50,65],build:[18,27,30,32,34,35,36,40,41,42,43,50,51,63,65,69,70,135,190],buildenv:42,builder:[6,112,135,165],buildfil:52,built:[18,28,39,52,63],bulk:[50,62,195],bump:[4,10,197],bunch:33,burn:57,busi:[24,27],button:[35,39,44],bytebuff:14,byteorderedpartition:[6,14],bytesanticompact:63,bytescompact:63,bytesflush:63,bytesmutatedanticompact:63,bytespendingrepair:63,bytesrepair:63,bytestyp:[9,196],bytesunrepair:63,bytesvalid:63,bz2:42,c60d:192,c73de1d3:13,c_l:28,cach:[6,11,20,34,44,45,60,62,67,126,128,129,130,135,159,160,207],cachecleanupexecutor:[63,207],cached_mb:208,cachenam:63,cachestat:208,cadenc:52,calcul:[6,23,24,25,31,55,57,58,63,67,195,196],call:[9,11,12,13,14,20,24,33,35,40,45,50,53,58,59,60,63,68,135,168,208],callback:63,caller:33,can:[0,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25,27,28,30,32,33,34,35,36,37,38,39,41,42,43,45,48,49,50,53,55,57,58,59,60,63,65,66,67,68,70,71,73,75,76,78,83,85,91,95,98,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,192,193,194,195,197,198,199,200,201,204,205,206,207,208],cancel:[10,151],candid:58,cannot:[6,9,11,13,14,17,18,20,22,27,43,52,58,66,73,135,207],cap:[12,109,114,120,135,162,167,170],capabl:[0,6,44,69,71],capac:[6,28,57,63,67,69,135,137,159,184,203,204,206,208],capacit:204,capi:50,captur:[6,24,50,62,66,72,206],cardin:196,care:[6,58,70,150,208],carefulli:34,carlo:20,carpent:[23,24,25,26,27,28,29,30],carri:[33,150],cascad:27,cascommit:63,cascontent:[121,171],casprepar:63,caspropos:63,casread:63,cassablanca:22,cassafort:46,cassandra:[0,2,4,5,8,10,11,13,14,18,20,21,22,23,24,25,26,28,29,31,33,34,36,37,41,46,47,49,52,55,58,59,60,63,65,67,68,71,94,98,135,146,150,153,157,181,189,191,192,193,194,195,196,197,198,199,200,202,203,204,205,207,208],cassandra_flam:208,cassandra_hom:[4,6,52,57,66,206],cassandra_job_dsl_se:32,cassandra_stack:208,cassandra_use_jdk11:52,cassandraauthor:[6,66],cassandradaemon:[39,48,52],cassandralogin:66,cassandranetworkauthor:6,cassandrarolemanag:[6,66],casser:46,cassi:46,cast:[10,13,18],caswrit:63,cat:[22,192,208],categor:63,categori:[11,12,13,14,53,94],caught:[38,63],caus:[4,6,18,27,44,58,65,66,197,199,206,207,208],caution:[6,59],caveat:66,cbc:6,ccm:[38,43,208],ccmlib:43,cd941b956e60:207,cdc:[6,11],cdc_enabl:57,cdc_free_space_check_interval_m:57,cdc_free_space_in_mb:57,cdc_raw:[6,57],cdc_raw_directori:57,cdccompactor:6,cell:[6,22,28,63,105,188,192,196,207],center:[6,11,20,22,24,44,67,68,91,101,135,150],cento:42,centos7:42,central:[39,66,71,204],centric:[20,35],certain:[4,6,9,11,20,35,43,58,66,193],certainli:[14,24,26,28],certif:[62,135,146],cf188983:192,cfname:[119,137,184],cfs:33,chain:20,challeng:[36,69],chanc:[36,55,196],chang:[4,6,11,12,15,20,22,27,32,34,35,36,37,39,40,42,47,48,50,59,62,63,66,168,194,197,206,208],changelog:42,charact:[11,12,13,17,20,22,28,33,70,71],character:6,chat:8,cheap:[6,11],chebotko:[24,25,30],check:[0,6,11,13,24,32,33,38,39,41,42,43,44,52,55,57,58,63,66,72,126,135,150,188,191,203,207],checklist:[40,41,50],checkout:[35,39,41,42],checksum:[4,6,11,59,135,188,202],chen:23,cherri:37,chess:13,child:71,chmod:66,choic:[6,11,42,50,58,62,198],choos:[0,6,11,40,42,46,60,63],chosen:[0,6,11,14,207],chown:66,christoph:22,chrome:71,chronicl:53,chunk:[4,6,44,59,71],chunk_length_in_kb:[11,59],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:63,chunksiz:71,churn:6,cipher:[6,66,195],cipher_suit:6,circular:20,circumst:[11,27],citi:[22,29],claim:30,clash:12,class_nam:[4,6],classload:[43,52],classpath:[6,14,22,63],claus:[10,11,14,16,17,18,20,33],clean:[6,33,63,72,75,135,153,191,195],cleanli:41,cleanup:[44,58,62,63,105,135,180,202],clear:[38,41,72,77,126],clearli:26,clearsnapshot:135,click:[13,39,41,42,43,208],client:[0,4,6,8,10,11,13,17,20,22,27,38,44,45,47,50,53,57,60,62,71,77,135,195,199,205,206,207,208],client_encryption_opt:[66,195],clientrequest:63,clientstat:135,clock:6,clockr:6,clojur:47,clone:[39,42,44,52,71,208],close:[6,15,35,42,66,208],closer:55,cloud:62,clue:[24,208],cluster:[0,4,6,9,10,11,13,14,21,22,24,27,28,29,30,38,43,45,49,50,52,58,60,63,65,66,67,68,70,71,72,82,103,107,123,135,158,174,191,196,203,204,205,206,208],cluster_nam:[45,49],clustering_column:11,clustering_ord:11,clusteringtyp:196,cmake:208,cmd:208,cmsparallelremarken:39,coalesc:6,coalescingstrategi:6,codd:27,code:[6,10,12,14,21,25,28,32,35,36,37,38,39,40,43,50,52,59,63,204,208],codestyl:33,coher:69,col:[14,70],cold:6,collat:6,collect:[6,10,11,12,13,14,15,17,23,25,30,60,62,63,70,105,206],collection_liter:12,collection_typ:22,collector:206,color:[22,71,208],column1:9,column:[4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,27,28,29,30,59,63,69,70,71,119,137,157,173,184,196,199,201,206,207],column_definit:11,column_nam:[11,13,16],columnfamili:[4,6,9,33,58,194,197],columnspec:70,colupdatetimedeltahistogram:63,com:[6,14,32,33,35,37,42,52,66,208],combin:[4,6,10,58],come:[6,9,26,28,66,208],comingl:58,comma:[6,11,12,13,45,53,66,68,71,94,137,140,184,195],command:[0,6,18,27,30,34,37,42,43,44,45,48,49,52,59,62,65,66,70,72,73,74,75,76,78,83,85,91,95,98,101,104,105,108,110,113,117,119,121,125,126,133,135,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,191,194,195,200,201,202,205,206,207,208],comment:[4,6,11,15,18,29,33,35,36,38,66],commit:[4,6,8,11,35,40,41,42,50,63,202,208],commitlog:[2,6,44,45,60,62,196,206,207],commitlog_archiv:[4,6],commitlog_compress:4,commitlog_directori:[4,45,60],commitlog_segment_size_in_mb:[4,44],commitlog_sync:4,commitlog_sync_batch_window_in_m:4,commitlog_sync_period_in_m:4,commitlog_total_space_in_mb:4,commitlogposit:196,commitlogread:57,commitlogreadhandl:57,commitlogseg:[6,62,63],committ:[36,37,40,41,42,43],common:[0,14,15,24,25,27,33,35,38,41,51,62,65,71,204,205,208],common_nam:11,commonli:135,commun:[6,8,24,36,38,39,41,44,45,49,53,66,195],commut:44,compact:[4,6,11,15,44,50,55,59,60,62,70,75,79,80,105,108,109,135,148,157,161,162,168,175,180,187,193,194,195,196,197,199,202,205,206,208],compacted_partition_maximum_byt:183,compacted_partition_mean_byt:183,compacted_partition_minimum_byt:183,compaction_:180,compaction_histori:206,compaction_throughput:207,compaction_window_s:58,compaction_window_unit:58,compactionbyteswritten:63,compactionexecutor:[63,207],compactionhistori:[58,135],compactionid:180,compactionparamet:58,compactionparametersjson:58,compactions_in_progress:202,compactionstat:[58,135,207],compactionstrategi:62,compactiontask:206,compactor:[111,135,164],compani:27,compar:[4,6,34,41,58,63,65,70,204,207],comparis:28,comparison:6,compat:[6,9,10,11,13,15,20,38,41,72,208],compatilibi:22,compet:6,compil:[33,34,39,52,71],compilerthread3:208,complain:39,complet:[6,13,14,24,29,30,41,42,44,57,58,63,66,68,71,135,149,151,199,201,202,207],completedtask:63,complex:[4,6,9,14,22,23,25,27,28,41],complexarg:14,compliant:[6,14,66],complic:41,compon:[4,6,11,24,38,55,63,66,135,168,208],compos:[11,13,22],composit:[4,11],compound:17,comprehens:38,compress:[4,6,11,43,50,58,60,62,63,70,196],compression_level:59,compression_metadata_off_heap_memory_us:183,compressioninfo:4,compressionmetadataoffheapmemoryus:63,compressionratio:63,compressor:[4,6,11,196],compris:[4,11,59],compromis:[42,66],comput:[4,6,14,28,52,203],concaten:[14,53],concentr:23,concept:[20,27,30,58],conceptu:[24,26,27,30,31,50],concern:[13,14,25,208],conclus:6,concret:[12,22],concurr:[6,32,60,70,110,111,112,135,150,163,164,165,195,207,208],concurrent_compactor:207,concurrent_materialized_view_build:18,concurrent_writ:4,concurrentmarksweep:60,condens:13,condit:[6,10,12,13,20,22,33,37,58,63,66,70,71,208],conditionnotmet:63,conf:[6,44,45,48,52,63,66,71,195,206],config:[32,63,66,71,72,191],configu:[53,208],configur:[0,4,11,20,22,27,32,39,43,44,47,48,50,52,62,63,66,67,69,70,71,81,98,135,153,168,194,195,196,204,206,207],confirm:[6,8,25,26,29,32,38,39],confirm_numb:29,conflict:[13,22,37,40],conform:[18,38],confus:[10,12,44,208],congratul:35,conjunct:71,connect:[6,11,22,23,30,39,49,50,63,66,67,70,71,77,81,134,135,195,208],connectednativecli:63,connectednativeclientsbyus:63,connectionsperhost:195,connector:[23,44,46,66],connnect:63,consecut:[28,45],consequ:[11,13,19,22,60],conserv:6,consid:[0,6,13,22,24,28,36,41,45,53,55,58,60,203],consider:[13,22,23,24,25,27],consist:[2,6,11,12,13,14,29,38,63,66,68,72,204,207],consol:[39,45,53,71],constant:[10,11,15,17,22],constantli:[6,58],constrain:27,construct:[12,208],constructor:[6,33],consum:[6,43,55,57,63,207],consumpt:57,contact:[6,11,44,50,204],contain:[0,6,8,9,10,11,12,13,15,16,18,20,22,24,25,34,39,41,43,58,59,63,66,69,71,173,193,198,202,204,206,207,208],contend:[6,63],content:[4,6,11,12,13,24,35,50,58,71,98,192,208],contentionhistogram:63,context:[4,6,9,20,22,26,39,41,44,66,206],contigu:13,continu:[0,6,28,33,43,52,53,58,66,67],contrarili:12,contrast:[24,27,43,66],contribut:[5,32,35,37,43,50],contributor:[35,37,41,48],control:[0,6,10,11,13,15,38,45,48,58,66,67,71],conveni:[9,12,14,17,43,68],convent:[6,11,14,15,25,35,37,40,41,43,66,67],convers:10,convert:[10,13,14,58,208],coordin:[0,6,11,13,14,22,44,63,151,204,205],coordinatorreadlat:[63,204],coordinatorscanlat:63,coordinatorwritelat:[63,204],cop:33,copi:[0,6,28,34,42,44,52,58,72,195,204],copyright:[23,24,25,26,27,28,29,30],core:[6,14,53,60,69,163,207,208],correct:[10,34,38,48,58,59,66,135,148,194,200],correctli:[6,11,35,44,53,58,66],correl:[6,10,67,204,207],correspond:[0,4,6,9,11,13,14,18,22,35,41,43,44,57,67,195],corrupt:[6,11,58,59,60,65,72,157,188,191],cost:[6,13,22,59,65],could:[6,12,22,24,25,27,28,36,38,41,58,65,71,206,208],couldn:[48,63],count:[4,6,9,13,22,28,44,58,63,68,70,196,206,207,208],counter1:199,counter:[0,4,6,9,14,19,60,63,70,72,128,135,157,159,160,191],counter_mut:[63,207],counter_read:70,counter_writ:70,countercach:63,countermutationstag:[63,207],counterwrit:[70,121,171],countri:[13,22,25,29],country_cod:22,coupl:[6,25,27],cours:[13,65,203,208],cover:[11,25,35,38,41,43,44,47,51,58,63,196],coverag:[34,36],cph:195,cpu:[6,11,57,59,62,204,206,207],cpu_idl:208,cq4:206,cqerl:46,cqex:46,cql3:[14,38,43,71],cql:[6,10,11,12,13,14,16,17,19,20,22,25,27,28,29,30,42,43,46,49,50,53,58,62,66,70,72,168,192,208],cql_type:[11,12,13,14,20,22],cqlc:46,cqldefinit:14,cqlsh:[44,47,48,50,66,72],cqlshrc:72,cqltester:[38,43],cqltrace:208,craft:66,crash:60,crc32:[4,201,202],crc:[4,201,202],crc_check_chanc:[11,59],creat:[0,4,6,9,10,12,13,15,17,19,23,24,25,26,27,28,29,30,32,34,35,39,40,43,44,52,57,58,59,66,68,70,71,78,195,200,208],create_aggregate_stat:12,create_function_stat:12,create_index_stat:12,create_keyspace_stat:12,create_materialized_view_stat:12,create_role_stat:12,create_table_stat:12,create_trigger_stat:12,create_type_stat:[12,22],create_user_stat:12,createkeystor:6,createrepo:42,createt:43,creation:[6,10,11,13,14,18,22,57,206],creator:20,credenti:[6,66],critic:[38,41,66,204,207],cross:[6,44,67],crossnodedroppedlat:63,crucial:[66,206,207,208],cryptographi:6,csv:71,ctrl:208,cuddli:22,cue:25,culprit:204,cumul:[207,208],curent:196,curl:[37,48],current:[0,6,9,11,13,20,22,24,39,41,42,48,53,58,63,68,70,71,72,100,118,122,124,126,135,149,179,187,191,196,197,202,206,207],currentd:[10,14],currentlyblockedtask:63,currenttim:[10,14],currenttimestamp:[10,14],currenttimeuuid:[10,14],custom:[6,9,10,11,14,15,16,20,24,26,27,28,32,41,53,67,70,71,195],custom_option1:20,custom_option2:20,custom_typ:[14,22],cut:206,cute:22,cvh:38,cycl:[6,52,57,98],cython:72,d18250c0:192,d85b:192,d936bd20a17c11e8bc92a55ed562cd82:198,daemon:[39,135,181,208],dai:[14,17,19,22,28,53,58,65],daili:[32,53,98],danger:6,dart:47,dart_cassandra_cql:46,dash:12,data:[0,4,6,10,12,14,15,16,18,26,27,29,38,45,48,50,55,59,60,62,63,65,66,67,69,70,71,73,78,91,98,101,105,126,135,140,150,173,188,192,193,194,195,196,197,198,199,200,201,202,203,206,207,208],data_file_directori:[45,60],data_read:20,data_writ:20,databas:[12,13,15,21,24,27,30,31,42,50,53,58,60,66,205,206,208],datacent:[0,6,11,65,67,91,101,114,135,150,167,195,204,207],datacenter1:[6,70],dataset:[6,65,208],datastax:[6,14,30,46,204],datastor:207,datatyp:14,date:[4,9,10,15,17,19,24,25,26,27,28,29,53,72,157,191,192,196],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,58],datetim:15,daylight:22,db532690a63411e8b4ae091830ac5256:201,db_user:66,dba:66,dbd:46,dc1:[6,11,20,66,207],dc1c1:203,dc2:[6,11,66,207],dc3:20,dcassandra:[58,63,66,68],dcawareroundrobin:204,dcl:53,dcom:66,dcpar:150,ddl:[11,53,71],ddl_statement:12,deactiv:6,dead:[6,62,73,135,208],dead_node_ip:68,deal:[72,191],deb:48,debian:[42,44,47,208],debug:[45,71,199,200,201,202,203,204,205,208],decai:204,decid:[9,35,58,67],decim:[9,14,17,19,22,71],decimalsep:71,declar:[11,12,14,22,25],decod:[17,22,208],decommiss:[0,6,68,135],decompress:[59,208],decoupl:0,decreas:[6,58,195,208],decrement:[13,22],decrypt:6,dedic:[4,6,24],deem:6,deep:[50,205,206],deeper:[41,208],default_time_to_l:[10,11,13],defend:44,defens:6,defer:[11,208],defin:[0,6,9,10,11,12,13,15,16,17,18,20,21,23,24,25,27,28,31,39,50,58,63,66,67,68,70,71,78,135,196],defineclass1:52,defineclass:52,definit:[9,13,14,15,18,22,23,24,25,26,27,28,29,30,50,55,70,196],deflat:[4,6],deflatecompressor:[11,59],degrad:[6,24],delai:[4,63,65],deleg:39,delet:[4,6,9,10,11,12,15,17,18,20,22,24,27,41,50,53,65,71,98,105,135,186,196,201,202,206],delete_stat:[12,13],deletiontim:4,delimit:6,deliv:[0,6,63],deliveri:[6,63,135,136,155,166],delta:[63,196],demand:66,demonstr:[52,205],deni:44,denorm:[22,24,25],denot:[6,12,25],dens:55,dep:34,depend:[4,6,11,12,13,14,22,32,35,38,39,40,41,43,50,52,58,65,72,204],dependenic:34,depict:23,deploi:[34,44,45,208],deploy:[0,6,66,67,69],deprec:[6,10,11,14,15,58],depth:208,desc:[9,11,13,71],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,20,22,26,27,30,38,39,41,42,55,66,72,135,191],describeclust:135,descript:[10,11,14,19,22,24,26,29,32,35,41,63,71],descriptor:[63,202],deseri:203,design:[14,23,24,25,26,28,30,31,50,58,60,65],desir:[16,22,24,32,44,198],destin:[57,71],destroyjavavm:208,detach:42,detail:[5,6,10,11,12,13,14,22,23,24,25,26,32,35,36,44,52,62,66,69,70,71,72,191,201,206,207,208],detect:[2,6,11,37,44,66,203],detector:[103,135],determin:[0,6,11,13,20,24,27,28,55,59,67,150,204,207,208],determinist:44,detractor:27,dev1:53,dev:[6,8,42,44,208],devcent:30,devel:52,develop:[8,30,35,36,39,41,43,52,60],devic:[4,69,208],df303ac7:207,dfb660d92ad8:71,dfp:188,dht:[6,196],diagnost:6,diagram:[23,24,26,30],diagrammat:24,diamond:23,dictat:[6,66],did:[26,38,63,194],didn:24,die:6,dies:[50,68],diff:[15,33,206],differ:[0,6,11,12,13,14,15,20,22,24,25,31,32,37,39,41,43,44,45,48,58,59,60,63,65,68,70,204,208],difficult:[6,43,208],difficulti:22,digest:[4,6,201,202],digit:[17,22,44],diminish:22,dinclud:34,dir:52,dir_path:195,direct:[6,11,17,20,24,41,63,208],directli:[13,18,20,35,39,58,66,196,208],director:13,directori:[4,6,21,34,35,39,43,44,47,48,49,52,53,57,60,62,71,126,135,153,195,208],dirti:[4,6,208],disabl:[6,11,14,58,59,66,67,71,84,85,86,87,88,89,90,91,92,101,135,158,160,162,167,170,171,172],disable_stcs_in_l0:58,disableauditlog:[53,135],disableautocompact:[58,135],disablebackup:135,disablebinari:135,disablefullquerylog:[135,206],disablegossip:135,disablehandoff:135,disablehintsfordc:135,disableoldprotocolvers:135,disablesnapshot:157,disableuditlog:53,disallow:6,disambigu:[73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190],disappear:11,discard:6,disconnect:58,discourag:[11,22,41],discov:[44,65],discuss:[8,22,24,26,28,41,52],disk:[4,6,11,24,27,31,45,50,53,55,57,58,59,62,63,65,98,132,135,148,188,193,197,200,201,206,207,208],dispar:[6,27],displai:[11,24,53,71,72,74,80,110,125,127,134,135,183,191,195,199,200,201,202,203,208],displaystyl:28,disrupt:[44,66],dissect:208,dist:[34,42,48],distanc:[23,69],distinct:[0,9,10,13],distinguish:[9,14],distribut:[6,23,34,41,43,44,58,63,66,68,69,70,196,197,205,206,207,208],distro:42,dive:[50,205,206],divid:[12,25,27],divis:19,djava:[39,44,66],dml:[21,53],dml_statement:12,dmx4jaddress:63,dmx4jport:63,dns:44,dobar:33,doc:[6,34,35,38,42,65,66,194,208],docker:42,document:[5,12,14,15,17,24,25,27,29,30,32,38,40,41,42,49,66,70,71],doe:[6,11,13,14,16,17,18,20,22,27,37,38,41,42,50,55,58,59,65,66,67,68,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208],doesn:[6,14,22,24,33,43,44,65,70,195,196,198,199,206,208],dofoo:33,doing:[6,13,18,27,43,44,58,63,68,208],dollar:[10,12],domain:[23,24,27,36,66,70,156,174],domin:208,don:[5,6,13,26,27,28,33,36,37,38,39,41,42,44,45,58,65,126,150,195,200,205,207,208],done:[6,11,13,22,25,27,32,35,36,41,42,43,45,49,58,70,197,200,201],dont:53,doubl:[6,9,10,11,12,14,17,19,22,39,63,67],doubt:11,down:[6,20,23,28,58,63,65,67,68,89,135,150,197,204,206,207,208],downgrad:201,download:[6,30,32,39,42,48,52,63,69],downsampl:4,downstream:[26,207],downward:20,dozen:207,dpkg:42,drain:[4,135],draw:25,drive:[6,58,60,206,207,208],driven:27,driver:[6,12,14,20,28,30,43,47,50,71,204],drop:[6,10,15,50,53,58,63,66,98,193,196,197,199,204,207,208],drop_aggregate_stat:12,drop_function_stat:12,drop_index_stat:12,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_role_stat:12,drop_table_stat:12,drop_trigger_stat:12,drop_type_stat:[12,22],drop_user_stat:12,dropdown:208,droppabl:[6,58,196],dropped_mut:183,droppedmessag:62,droppedmut:63,dropwizard:63,drwxr:201,dry:[72,191],dsl:32,dt_socket:39,dtest:[32,38,40],due:[11,13,22,34,44,48,58,63,68,204,208],dump:[53,71,72,191,206],duplic:[25,27,28,38,65,202],durabl:[4,57],durable_writ:11,durat:[6,10,15,19,20,58,63,70,137,184,195],dure:[6,11,14,21,23,34,41,43,44,58,59,63,66,68,70,71,157,193,199,203,206,208],duse:52,dverbos:34,dying:44,dynam:[6,23,62,66],dynamic_snitch:67,dynamic_snitch_badness_threshold:67,dynamic_snitch_reset_interval_in_m:67,dynamic_snitch_update_interval_in_m:67,dynamo:[2,50],each:[0,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,27,28,29,32,35,37,41,49,50,52,53,58,59,60,63,65,66,67,68,69,70,71,72,135,160,175,188,191,192,206,207,208],each_quorum:[0,6],earli:[6,12,41],earlier:[41,52],eas:208,easi:[9,23,35,41,208],easier:[0,35,41,192],easiest:44,easili:[23,27,66],eben:[23,24,25,26,27,28,29,30],ec2:[6,52,60,67],ec2multiregionsnitch:[6,67],ec2snitch:[6,67],ecc:60,echo:[48,52,196],eclips:[33,40,43],ecosystem:38,eden:208,edg:38,edit:[35,39,42,45,48,63,66,202],editor:35,effect:[6,11,22,25,27,41,44,55,59,66,89,135,204,207,208],effectiv:63,effici:[6,11,53,58,67,68],effort:[6,35,41,65],either:[4,6,8,12,13,14,16,22,27,28,32,33,35,37,39,41,44,48,49,52,57,58,63,66,70,186,204,206,207,208],elaps:[58,63,208],elasticsearch:69,elder:39,element:[22,24,25,29,35,71],elig:6,elimin:[28,204],elixir:47,els:[11,13,33,41],email:[8,16,22,29,42,50],embed:43,emerg:34,emit:6,emploi:55,empti:[6,9,10,11,12,71,199],emptytyp:9,enabl:[0,6,11,14,17,20,43,44,52,58,59,67,68,71,94,95,96,98,101,102,135,172,195,196,206,208],enable_legacy_ssl_storage_port:6,enable_user_defined_funct:14,enableauditlog:[53,135],enableautocompact:[58,135],enablebackup:135,enablebinari:135,enablefullquerylog:[6,135,206],enablegossip:135,enablehandoff:135,enablehintsfordc:135,enableoldprotocolvers:135,encapsul:[33,63],enclos:[9,10,12,14,20,27,70],enclosur:12,encod:[15,22,38,53,71,196],encodingstat:196,encount:[5,13,42,48,63,70],encourag:[11,57],encrypt:[6,52,62,195],end:[22,24,27,32,44,53,58,66,71,78,113,135,150,197,202,208],end_dat:29,end_token:[78,150],end_token_1:140,end_token_2:140,end_token_n:140,endpoint:[6,63,67,73,113,135,150,186],endpoint_snitch:67,endpointsnitchinfo:66,endtoken:71,enforc:[17,27,66],engin:[2,11,28,41,50,63,69],enhac:36,enhanc:[36,60],enjoi:42,enough:[0,6,22,23,26,28,44,45,58,65,67,71,206,208],enqueu:[6,206],ensur:[13,18,21,28,44,57,59,66,194,206,207],entail:44,enter:[24,26,32,44,71,206,208],entir:[0,4,6,11,14,22,27,44,55,58,65,66,68,71,72,191,193,204,208],entiti:[23,24,25,26,27],entri:[4,6,9,13,16,32,41,42,50,63,66,71,196],entropi:6,entry_titl:13,enumer:[20,192],env:[44,45,52,63,66],environ:[0,5,6,30,34,39,40,43,44,47,50,52,60,198],envis:24,ephemer:60,epoch:[22,196],epol:6,equal:[0,6,10,11,13,22,24,28,33,58,70],equival:[10,11,12,13,14,20,37,58,205],eras:11,erlang:47,erlcass:46,err:71,errfil:71,error:[6,11,12,14,16,18,20,22,24,30,32,33,38,39,42,43,48,50,52,53,65,70,71,151,194,199,203,205,206,207],escap:[12,17,70],especi:[24,25,41,44,58,71,208],essenc:24,essenti:[0,14,44,52,71],establish:[6,20,67,195],estim:[4,28,63,65,196,207],estimatedcolumncounthistogram:63,estimatedpartitioncount:63,estimatedpartitionsizehistogram:63,etc:[6,18,22,33,38,44,45,48,53,58,63,66,70,195,208],eth0:6,eth1:6,ev1:22,evalu:[6,19,29,31,50],even:[0,6,10,11,12,13,14,17,22,24,25,26,27,29,36,41,50,58,65,66,71,81,157,187,204,206,207,208],evenli:6,evenlog:[194,197],event:[4,6,13,22,53,58,70,71,150,192],event_typ:13,eventlog:[192,194,197,200,202,203],eventlog_dump_2018jul26:192,eventlog_dump_2018jul26_d:192,eventlog_dump_2018jul26_excludekei:192,eventlog_dump_2018jul26_justkei:192,eventlog_dump_2018jul26_justlin:192,eventlog_dump_2018jul26_singlekei:192,eventlog_dump_2018jul26_tim:192,eventlog_dump_2018jul26b:192,eventu:[4,13,35,65],ever:[33,43,44,60],everi:[4,6,11,13,14,18,20,21,22,24,28,49,52,53,55,58,60,65,70,71,204,207,208],everyon:23,everyth:[4,12,33,39,44,69],evict:63,evil:[6,14],ex1:70,ex2:70,exact:[11,12,14,59,205],exactli:[11,14,18,66,192,208],examin:[25,28],exampl:[0,6,11,13,14,17,20,22,23,24,25,26,27,28,36,42,43,48,49,52,53,58,66,67,70,71,192,193,194,195,196,197,198,199,200,202,203,204,205,206,207,208],example2:70,exaust:6,excalibur:11,exce:[4,6,17,33,206],exceed:[6,60,197],excel:[11,28],excelsior:11,except:[0,6,13,14,17,38,40,41,43,44,52,53,63,192,197,206,208],excess:55,exchang:[6,44],exclud:[11,53,63,72,94,118,135,191],excluded_categori:[53,94],excluded_keyspac:[53,94],excluded_us:[53,94],exclus:[22,34,43],execut:[6,9,11,12,13,14,20,30,32,34,39,43,49,53,58,63,66,71,191,192,193,194,195,196,197,198,199,200,201,202,203,207,208],executor:32,exhaust:[6,52,204],exhibit:13,exist:[6,9,10,11,12,13,14,16,17,18,20,21,22,27,28,35,36,38,39,42,43,50,53,55,58,59,67,68,70,193],exit:[72,202],exp:70,expand:[11,72],expans:11,expect:[0,4,6,10,12,22,27,33,38,41,42,58,65,66,197,207],expens:[6,55,67],experi:[6,24,58,207],experienc:[0,6,206],experiment:[0,25,52,150],expir:[6,10,11,13,22,62,65,66,157,193,196,199],expiri:58,explain:[26,33,35,38,41,48],explan:[72,191],explicit:[10,11,20,29],explicitli:[4,6,10,11,13,17,22,33,58,67,70],explor:[24,39],expon:10,exponenti:[63,70,204],expos:[6,9,66],express:[0,6,10,12,19,67],expung:44,extend:[22,41,43,69,72,126,188,191],extens:[6,11,24,66],extern:[27,50,63,68,69,205],extra:[0,4,6,58],extract:[33,48],extrem:[6,13,24,70],f6845640a6cb11e8b6836d2c86545d91:196,f8a4fa30aa2a11e8af27091830ac5256:195,facilit:6,fact:[22,36,43,44,204],factor:[0,6,11,23,28,50,59,65,66],factori:70,fail:[0,6,11,13,14,22,28,32,34,50,52,58,63,71,135,151],failur:[2,6,41,50,58,60,63,67,103,135,188,204],failuredetector:135,fairli:[6,24,57,66,208],fake:14,fall:[6,24,53],fallback:[6,67],fals:[6,11,12,17,20,22,53,55,57,58,59,63,66,68,71,157],famili:[6,60,119,137,173,184,201],familiar:23,fanout_s:58,faq:72,far:[35,36],fare:208,fast:[6,24,28,55,58,69,206,208],faster:[6,41,59,60,135,160,207],fastest:[6,37,67],fatal:6,fault:44,fav:[16,22],favor:52,favorit:208,fax:22,fct:14,fct_using_udt:14,fear:44,feasibl:22,featur:[0,25,27,29,30,36,38,39,41,50,52,66],fed:6,feedback:41,feel:[35,37],felt:[26,28],fetch:[6,11,35,71],few:[6,25,58,60,204,206],fewer:[6,41],fffffffff:[17,22],fgc:208,fgct:208,field:[10,13,14,17,22,33,53,55,70,199],field_definit:22,field_nam:13,fifteen:63,fifteenminutecachehitr:63,fifth:207,figur:[24,25,26,27,28,36,58,193],file:[4,7,11,27,28,35,39,40,41,42,43,44,45,47,50,52,55,57,58,60,63,66,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,196,197,198,199,201,202,204,205,208],filenam:[4,11,71,119,135,196,200],filenamepattern:53,filesystem:[32,66],fill:[42,57,58],fillion:13,filter:[4,9,11,18,50,60,62,63,150,196,201,202],finalfunc:[9,14],finalis:42,find:[4,6,24,25,26,27,29,32,35,39,42,43,48,50,55,58,66,68,113,117,192,196,198,201,205,207,208],fine:[4,6,41,65,66],finer:[4,6],finish:[28,29,39,41,52,135,152,206],finish_releas:42,fip:[6,66],fire:[21,36],firefox:71,firewal:[6,44,45,67],first:[0,4,5,6,11,13,14,22,23,24,25,26,28,41,42,44,47,52,58,60,65,66,70,71,150,157,192,196,199,202,204,206,207,208],first_nam:29,firstnam:13,fit:[6,28,58,63],five:63,fiveminutecachehitr:63,fix:[6,10,12,18,27,32,35,37,40,42,44,58,60,65,70,199],fixm:42,flag:[6,13,37,38,41,57,63,65,68,194],flash:69,flexibl:66,flight:[6,66],flip:11,floor:6,flow:[6,20,26,38,40,53],fluent:46,flush:[4,6,11,57,58,60,63,93,135,173,202,206],fname:14,focu:[23,26,32,41],focus:70,focuss:208,folder:[39,180,200],follow:[0,4,5,6,8,9,10,11,12,13,14,17,18,19,20,22,26,28,32,33,34,35,36,37,38,39,41,42,43,44,45,48,50,52,53,57,58,59,63,65,66,67,68,71,75,78,85,95,104,105,141,150,157,171,175,187,188,193,198,199,202,204,208],font:12,foo:[11,12,57,208],footprint:[135,137],forc:[4,6,11,13,71,78,81,135,149,150,151,203],forcefulli:[73,135],foreground:[45,48],foreign:27,forev:58,forget:5,fork:[35,41],form:[6,10,11,12,14,20,24,27,80,134,183],formal:[12,35,42],format:[4,6,10,11,17,22,25,28,35,37,38,40,41,53,63,71,72,79,98,119,140,183,185,191,202,207],former:[6,63],formula:28,forward:[6,11,34],found:[5,6,12,14,23,32,35,36,41,43,45,49,66,68,70,71,72,180,188,191,195,196,201,202],four:[13,28,59],fourth:[28,207],fqcn:43,fql:206,fql_log:206,fqltool:[52,206],fraction:6,frame:6,framework:[38,43],franc:[13,22],free:[6,11,22,30,35,37,39,63,69,203,208],freed:4,freestyl:32,frequenc:[6,57,65],frequent:[6,11,24,25,27,50,52,58,66,204,208],fresh:68,friendli:[6,22,43],from:[0,4,6,9,11,12,13,14,15,17,18,19,20,22,23,24,25,26,27,28,29,30,36,37,40,41,43,47,49,50,52,53,55,57,58,59,60,63,66,67,68,70,72,73,75,76,78,83,85,91,94,95,101,104,105,108,110,113,117,119,121,125,126,133,135,137,140,141,142,144,145,148,149,150,151,153,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,191,193,194,196,197,199,202,204,206,207,208],fromjson:15,front:24,froom:22,frozen:[9,10,11,13,14,22,29],fruit:[22,41],fsync:[4,6,57,63,207],fulfil:70,full:[6,9,11,13,16,20,26,41,44,48,49,58,59,62,66,69,70,71,88,98,135,141,150,153,195,197,202,207],full_nam:183,fulli:[0,6,11,12,14,42,62,63,66],function_cal:12,function_nam:[13,14,20],fundament:17,further:[5,11,18,22,58,62,66,69,207],furthermor:[10,13,66],futher:53,futur:[6,9,10,11,22,41,100,135,179],g1gc:60,gain:30,galleri:23,game:[14,22],garbag:[11,60,62,63,105,206],garbage_collect:180,garbagecollect:135,gather:58,gaug:63,gaurante:0,gaussian:70,gc_grace_second:[11,196],gc_type:63,gce:[44,60],gcg:6,gcinspector:206,gcstat:135,gct:208,gcutil:208,gcviewer:206,gear:24,gen:208,gener:[0,2,4,6,8,11,12,13,14,17,22,30,32,35,38,39,40,41,42,44,60,66,69,70,71,121,157,171,199,205,206,207,208],genuin:33,geoloc:23,geospati:69,get:[4,6,8,24,25,27,28,32,34,35,37,39,40,41,44,48,50,52,55,58,63,72,110,111,112,115,118,135,191,193,197,205,207,208],getbatchlogreplaythrottl:135,getcompactionthreshold:135,getcompactionthroughput:135,getconcurr:135,getconcurrentcompactor:135,getconcurrentviewbuild:[18,135],getendpoint:135,getint:14,getinterdcstreamthroughput:135,getlocalhost:[6,44],getlogginglevel:[135,206],getlong:14,getmaxhintwindow:135,getpartition:33,getreplica:135,getse:135,getsstabl:135,getstr:14,getstreamthroughput:135,gettempsstablepath:33,getter:[20,33],gettimeout:135,gettraceprob:135,gib:[80,134,183,207],gist:[4,33],git1:42,git:[5,32,35,37,39,41,42,52,206,208],gitbox:[39,42],github:[32,33,37,40,41,42,43,52,69,208],give:[18,20,22,28,35,41,43,50,71,194,206,207],giveawai:208,given:[0,6,11,12,13,14,16,22,24,26,27,28,29,32,41,55,58,65,66,68,70,71,76,78,83,85,95,108,117,121,135,141,161,168,172,175,182,192,194,196,197,198,201,202],glanc:208,global:[6,25,32,52,71,135,159],gms:206,gmt:22,goal:[6,27,28,58,204],gocassa:46,gocql:46,goe:27,going:[6,25,28,41,58,199,205,207,208],gone:6,good:[6,28,33,35,41,43,44,65,71,198,204,206,207,208],googl:[33,71,208],gori:44,gossip:[2,6,44,52,63,67,89,99,123,135,178,206],gossipinfo:135,gossipingpropertyfilesnitch:[6,67],gossipstag:[63,206,207],got:6,gotcha:208,gp2:60,gpg:48,grace:[62,65,72,191],grafana:204,grai:22,grain:[27,66],grammar:[11,12,34],grant:[6,9,66],grant_permission_stat:12,grant_role_stat:12,granular:[4,6,11,105],graph:[20,72],graphit:204,gravesit:11,great:[26,36,41,58,205,206,207,208],greater:[0,6,22,44,67,164,165,206,208],greatli:6,green:[22,39],grep:[4,194,196,198,206,207,208],groovi:32,group:[6,10,11,20,24,28,58,63,66,67,204],group_by_claus:13,grow:[22,24,69],guarante:[0,2,6,11,13,14,22,24,41,50,55,58,65,68,69,71,193],guest:[23,24,25,26,27,29],guest_id:[25,29],guest_last_nam:29,gui:208,guid:[6,23,24,25,26,27,28,29,30,35,39],guidelin:[10,38,42,60],hackolad:30,had:[6,9,10,24,58,199,205,207],half:[4,6,37,44],hand:[6,13,60,207],handi:[28,208],handl:[6,14,38,40,41,44,57,60,63,66,70,98,206],handoff:[6,63,90,124,135,166],handoffwindow:135,hang:41,happen:[6,13,33,37,41,50,58,63,67,204,206,207,208],happi:41,happili:60,hard:[6,14,27,28,57,58,60,201,206],harder:6,hardest:36,hardwar:[6,32,50,62,204],has:[0,4,6,10,11,12,13,14,18,20,22,25,27,28,30,33,41,42,44,52,53,57,58,60,63,66,67,68,70,71,72,191,195,204,206,207,208],hash:[4,6,58,65,203,208],hashcod:33,hashtabl:23,haskel:47,hasn:[0,98],have:[0,5,6,9,10,11,12,13,14,15,18,19,20,22,24,25,26,27,28,29,32,33,35,36,37,38,39,41,42,43,44,45,48,53,55,58,59,60,63,66,67,98,157,193,195,197,199,202,203,204,205,206,207,208],haven:41,hayt:46,hdd:[4,6,60],head:[35,41,208],header:[39,71],headroom:6,health:208,healthi:208,heap:[4,6,39,45,50,52,55,59,60,63,206,207,208],heap_buff:6,hear:27,heartbeat:[6,206],heavi:[6,206,207,208],heavili:60,held:[6,60,135,139],help:[5,6,10,24,25,26,28,30,34,36,41,43,49,53,70,72,74,135,195,199,200,201,202,203,204,205,206,207,208],helper:43,henc:[5,6,11,22],here:[6,23,24,26,27,28,29,34,35,37,42,43,44,46,58,63,66,70,207],hewitt:[23,24,25,26,27,28,29,30],hex:[12,17,119],hexadecim:[10,12,119],hibern:68,hidden:[68,208],hide:[33,38,72,191],hierarch:20,hierarchi:[20,65],high:[0,6,35,42,44,58,60,69,204,206,207],higher:[0,19,20,41,55,58,63,68,137,184,206,208],highest:[58,196,197],highli:[41,44,60,66,206,207],highlight:[27,30],hint:[0,6,11,12,18,44,45,50,62,63,65,90,91,100,101,116,124,135,136,155,166,169,179,186,207],hint_delai:63,hintedhandoff:[6,62],hintedhandoffmanag:63,hints_creat:63,hints_directori:45,hints_not_stor:63,hintsdispatch:[63,207],hintsfail:63,hintsservic:62,hintssucceed:63,hintstimedout:63,histogram:[4,58,63,135,138,182,196,206],histor:[11,27,41],histori:[27,32,33,53,77,79,135],hit:[6,58,63,208],hitrat:63,hoc:43,hold:[0,6,10,13,20,44,58,71,204,206,208],home:[22,42,52,70,71],honor:[6,39],hope:58,hopefulli:41,horizont:52,hospit:25,host:[6,35,45,50,53,63,67,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195,207,208],hostnam:[6,44,52,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,208],hot:[6,62,63,208],hotel:[23,26,27,28,29,31],hotel_id:[24,25,28,29],hotels_by_poi:[24,25,29],hotspot:11,hotspotdiagnost:66,hottest:6,hour:[6,22,41,42,58],hourli:[53,98],how:[0,5,6,7,8,11,12,22,23,24,25,26,27,28,32,36,38,39,40,41,43,47,49,50,57,58,59,63,67,69,70,71,98,194,206,207,208],howev:[4,6,9,10,11,12,13,15,17,18,20,22,24,26,27,28,32,41,43,44,45,48,55,59,60,65,66,68,71],hoytech:208,html:[6,70,194],http:[6,32,33,35,37,39,42,48,52,63,192,193,194,195,196,197,198,199,200,202,203,208],httpadaptor:63,hub:44,hudson:32,huge_daili:53,human:[11,53,57,80,134,183,207],hundr:28,hurt:11,hypothet:37,iauthent:6,iauthor:6,ibm:69,icompressor:59,idct:195,ide:39,idea:[6,14,28,30,35,40,41,43,44,58,71,207,208],ideal:[6,43,58,63,66],idealclwritelat:63,idempot:[13,22],idemptot:22,ident:[0,70],identifi:[6,9,10,11,13,14,15,16,20,21,22,23,24,25,26,28,30,70,204],idiomat:8,idl:6,idx:57,ieee:[17,22],iendpointsnitch:[6,67],iftop:208,ignor:[0,6,10,14,22,33,71,183,195],iinternodeauthent:6,illeg:14,illegalargumentexcept:197,illustr:[20,197],imag:[22,35,42,208],imagin:58,immedi:[4,6,11,22,24,41,55,59,66,75,135],immut:[4,44,59,60],impact:[6,11,38,58,62,66,206,208],implement:[4,6,10,13,14,18,20,28,29,32,33,43,44,53,57,59,66,67,69],implementor:6,impli:[11,12,22],implic:[0,66],implicitli:[14,20],import_:71,impos:6,imposs:58,improv:[0,6,11,22,36,41,43,55,58,60,67,68,71,208],inaccur:208,inact:44,inam:198,inboundconnectioniniti:52,inc:[23,24,25,26,27,28,29,30],incast:208,includ:[4,6,10,11,12,13,18,20,22,23,24,25,26,27,28,30,32,33,34,35,41,42,52,53,58,60,63,66,69,71,72,94,151,187,191,198,204,205,206,207,208],included_categori:[53,94],included_keyspac:[53,94],included_us:[53,94],inclus:41,incom:[6,53],incomingbyt:63,incompat:[6,10],incomplet:[38,202],inconsist:[44,65],incorrect:44,increas:[0,4,6,11,18,44,55,58,59,60,63,67,68,150,195,203,204],increment:[0,6,10,13,22,41,42,58,62,63,86,96,135,151,157,176,199,202],incur:[13,22,52,63],indefinit:53,indent:33,independ:[11,58,60,66,207],index:[4,6,9,10,11,12,13,15,22,25,50,57,58,62,71,135,141,195,201,202,206],index_build:180,index_identifi:16,index_nam:16,index_summari:180,index_summary_off_heap_memory_us:183,indexclass:16,indexedentrys:63,indexinfocount:63,indexinfoget:63,indexnam:141,indexsummaryoffheapmemoryus:63,indic:[5,6,12,13,24,26,33,41,44,52,53,57,150,196,197,204,206,207,208],indirectli:13,individu:[6,10,14,22,24,28,41,43,60,66,195,203],induc:13,industri:25,inequ:[10,13],inet:[9,11,14,17,22],inetaddress:[6,44],inetworkauthor:6,inexpens:60,inf:52,infin:[9,10,12],influenc:11,info:[6,45,52,53,63,83,135,192,206],inform:[4,6,12,13,22,24,25,26,27,29,42,49,63,65,66,67,68,70,71,74,77,103,123,125,126,127,134,135,156,174,194,195,196,204,205],infrastructur:[41,69],ing:11,ingest:6,ingestr:71,inher:[11,22],inherit:20,init:63,initcond:[9,14],initi:[6,14,18,33,38,40,42,53,63,66,68,71,135,168,195],initial_token:68,inject:53,innov:69,input:[9,10,14,17,22,38,71,198,206],inputd:22,inreleas:48,insecur:6,insensit:[11,12],insert:[6,9,10,11,12,14,15,16,20,22,44,47,50,53,60,66,70,71,202],insert_stat:[12,13],insertedtimestamp:192,insid:[6,11,12,13,22,33,70,71],insight:[24,30,206,207],inspect:[6,39,70,71,203],instabl:6,instal:[6,21,32,34,43,44,47,50,52,66,71,201,208],instanc:[0,10,11,12,13,14,16,18,19,20,21,22,24,32,39,43,44,57,58,60,63],instantan:63,instanti:10,instantli:6,instead:[4,10,11,13,18,22,24,27,28,30,33,35,42,44,53,58,156,174,192,208],instrospect:205,instruct:[6,8,11,35,36,37,39,50,208],instrument:[34,66],insuffic:204,insuffici:208,insufici:206,intasblob:13,integ:[0,10,11,12,13,17,22,28,57,63,199],integr:[30,40,43,50,69],intellij:[30,33,40],intend:[27,38,66,195],intens:[6,43,44],intent:38,intention:20,inter:[6,114,135,167,195],interact:[30,43,49,71,208],interest:[0,23,24,26,27,58,66,207],interfac:[6,10,14,26,33,35,44,45,59,66,69,208],interleav:70,intern:[6,9,11,13,18,22,35,38,44,52,60,63,72,191,204,208],internaldroppedlat:63,internalresponsestag:[63,207],internet:6,internod:[6,44,66,195,204,208],internode_application_timeout_in_m:6,internode_encrypt:[6,66],internode_tcp_connect_timeout_in_m:6,internode_tcp_user_timeout_in_m:6,internodeconnect:[121,171],internodeus:[121,171],interpret:[6,10,22,30,71],interrupt:44,interv:[4,6,9,24,63,66,70,196],intra:[6,63,67,70],intrins:22,introduc:[6,10,17,24,28,36,53,68,202],introduct:[10,20,43],introspect:208,intrus:194,intvalu:14,invalid:[6,13,20,38,66,126,128,129,130,135,197,203,207],invalidatecountercach:135,invalidatekeycach:135,invalidaterowcach:135,inventori:28,invert:70,invertedindex:21,investig:[6,40,205,206,207,208],invoc:14,invoic:27,invok:[14,37,48,66,188],involv:[6,13,23,24,28,35,58,59,66,202,206,208],ioerror:33,ios:208,ip1:6,ip2:6,ip3:6,ip_address:73,ipv4:[6,17,22,44],ipv6:[6,17,22],irolemanag:6,irrevers:[11,22],is_avail:[28,29],isn:[0,18,33,41,44],iso8601:[53,192],iso:22,isol:[6,11,13,63,204,205,207],issu:[0,6,20,28,32,34,35,36,37,41,42,43,44,51,55,58,59,150,192,193,194,195,196,197,198,199,200,202,203,204,206,207,208],item:[12,22,23,24,25,32,38,39],iter:[0,6,25,197],its:[4,6,11,12,13,14,22,24,26,39,44,52,53,58,63,66,67,68,69,70,193,197],itself:[6,11,16,27,44,48,68,207],iv_length:6,jaa:66,jacki:37,jamm:39,januari:22,jar:[14,33,34,39,43,52,63],java7:66,java8_hom:39,java:[6,14,21,22,33,39,41,47,48,50,51,57,58,60,63,66,197,205,206,208],java_hom:[52,208],javaag:39,javac:52,javadoc:[32,33,38],javas:6,javascript:[6,14],javax:66,jbod:60,jce8:6,jce:6,jcek:6,jconsol:[50,58,66],jdbc:30,jdk11:52,jdk:[6,32,52],jdwp:39,jeff:[23,24,25,26,27,28,29,30],jenkin:[34,40,50],jetbrain:39,jira:[5,6,34,36,38,40,41,43,57,192,193,194,195,196,197,198,199,200,202,203],jks:70,jkskeyprovid:6,jmap:208,jmc:[58,66],jmx:[6,18,20,50,62,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190],jmx_password:66,jmx_user:66,jmxremot:66,jni:52,job:[41,75,105,148,150,157,187],job_thread:150,john:[13,22],join:[6,8,13,50,52,58,66,68,135,206,207],joss:13,jpg:22,jre:52,jre_hom:52,jsmith:22,json:[9,10,13,15,50,58,59,79,183,185,192],json_claus:13,jsr:[6,14],jsse:6,jsserefguid:6,jstackjunit:43,jstackjunittask:43,judgement:33,jul:[52,208],jump:52,junit:[32,33,34,39,43],junittask:43,jurisdict:6,just:[6,14,20,24,25,27,36,39,41,43,44,57,58,65,66,70,204,208],jvm:[6,21,39,44,45,52,58,62,66,68,205,206],jvm_extra_opt:39,jvm_opt:[45,66],jvmstabilityinspector:38,k_i:28,kashlev:30,keep:[6,8,11,25,27,28,33,36,41,44,53,58,63,72,126,191,202,204,207,208],keepal:[6,44],kei:[4,6,9,10,11,13,14,17,22,24,25,26,27,28,29,30,43,44,48,57,58,59,60,63,66,69,70,72,75,113,117,119,129,135,139,159,160,183,191,196],kept:[4,6,58,63,202],kernel:[6,44,57,208],key_alia:6,key_password:6,key_provid:6,keycach:63,keycachehitr:63,keyserv:[42,48],keyspac:[0,6,9,10,12,14,15,16,20,22,25,28,29,50,53,55,58,59,62,65,66,68,70,71,72,75,76,78,83,85,94,95,104,105,108,113,117,119,126,135,137,139,140,141,142,148,150,156,157,161,173,174,175,182,183,184,187,188,190,191,192,193,194,195,198,199,200,201,202,203,206,207],keyspace1:[20,193,195,196,197,198,199,201,206],keyspace_definit:70,keyspace_nam:[11,14,20,22,58,65,206],keystor:[6,66,195],keystore_password:6,keystorepassword:66,keytyp:196,keyword:[10,11,13,14,15,16,17,22],kib:[80,134,183,207],kick:[135,152],kill:[6,48],kilobyt:59,kind:[11,12,22,32,41,57,58,204,207],kitten:22,know:[4,6,13,22,24,33,36,42,58,198,206,207,208],knowledg:36,known:[20,22,24,27,28,46,49,55,58],krumma:43,ks_owner:66,ks_user:66,kspw:195,ktlist:173,kundera:46,label:[22,32,42],lack:[63,206,207],lag:63,laid:27,land:59,landlin:22,lang:[50,52,63,66,197,208],languag:[6,9,10,12,14,21,22,35,46,49,50,71],larg:[6,11,13,14,22,31,32,43,50,58,60,63,66,69,71,192,198,200,204,206,207,208],large_daili:53,larger:[6,43,44,58,59,60],largest:[6,63],last:[6,12,13,14,15,19,35,52,58,63,73,135,196,197,198,204,206,208],last_nam:29,lastli:[13,22],lastnam:13,latenc:[0,6,11,44,63,67,69,70,205,206],latent:[204,208],later:[0,11,22,26,28,33,35,41,44],latest:[0,6,32,42,48,58,71,188,194,206],latest_ev:70,latter:[12,27],law:27,layer:60,layout:[11,35],lazi:11,lazili:11,lead:[6,10,11,22,24,58,206,208],learn:[6,23,24,26,43,44,71],least:[0,4,6,11,12,13,18,35,44,58,60,65],leav:[6,12,13,24,33,43,44,71,204,206,207],left:[6,17,19,34,58,202],legaci:[4,6,20,70],legal:[10,11],length:[4,6,10,17,22,38,58],lengthier:41,less:[4,6,22,34,41,44,55,60,200,203,206,207,208],let:[6,23,24,25,26,27,28,36,42,58],letter:17,level:[4,6,10,11,13,19,20,33,38,45,53,59,60,62,63,66,71,72,115,126,135,168,191,196,197,199,204,206,207],leveledcompactionstrategi:[11,55,58,194,197,207],leverag:30,lexic:44,lib:[4,6,21,34,38,39,43,48,52,192,193,194,195,196,197,198,199,200,201,202,203,208],libqtcassandra:46,librari:[8,38,40,43,46,63,71],licenc:38,licens:[34,38,39,41],lie:204,lies:204,life:41,lifespan:60,lightweight:[27,72],like:[0,6,12,13,14,17,22,25,26,27,28,33,35,37,38,41,43,44,50,58,59,60,65,66,197,198,199,204,205,206,208],likewis:20,limit:[4,6,9,10,11,20,22,28,44,57,58,59,66,70,206,208],line:[6,12,24,27,33,41,43,45,48,49,52,57,66,72,73,75,76,78,83,85,91,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,192,195,197,202,206],linear:60,linearli:55,link:[6,8,11,12,41,43,48,57,201],linux:[6,35,42,44,52,205,206,208],list:[4,5,6,9,10,11,12,13,14,17,24,26,27,29,32,34,35,39,40,41,42,43,45,48,49,50,52,53,57,58,63,66,68,70,71,72,73,75,76,77,78,83,85,91,94,95,101,104,105,108,110,113,117,118,119,121,125,126,132,133,135,137,140,141,142,145,148,149,150,151,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,191,193,195,196,197,198,203],list_liter:[13,22],list_permissions_stat:12,list_roles_stat:12,list_users_stat:12,listarg:14,listen:[6,50,52,63,208],listen_address:[45,49,50],listen_interfac:45,listsnapshot:[135,201],liter:[10,12,14,17,20,30,71],littl:[33,204,207],live:[13,30,35,50,58,63,68,196,201,202,206,208],livediskspaceus:63,liveness_info:192,livescannedhistogram:63,livesstablecount:63,load:[0,6,11,21,22,36,50,52,62,63,66,67,68,70,72,127,135,142,150,174,191,204,207,208],loader:195,loadm:195,local:[0,4,6,11,20,34,39,40,41,43,49,60,63,66,67,71,135,144,150,154,186,196,204,205,206,208],local_jmx:66,local_on:[0,66,71,204,207],local_quorum:[0,71,204,208],local_read_count:183,local_read_latency_m:183,local_seri:71,local_write_latency_m:183,localhost:[6,49,52,53,66],locat:[6,24,26,42,47,48,53,59,63,66,67,71,180,195,204,206,208],lock:[6,44,63,208],log:[0,4,6,11,13,35,38,43,47,48,50,57,62,63,66,70,72,84,88,94,98,115,135,150,153,168,180,191,205,208],log_al:58,logback:[45,53,206],logdir:53,logger:[33,45,53,94],loggernam:53,logic:[6,21,25,28,30,31,50,206,207],login:[6,9,20,42,43,52,53,66,72,204],logmessag:53,lol:22,longer:[6,9,10,11,30,42,44,58,68,75,135,199,202,204],longest:206,look:[6,12,24,25,28,32,35,36,37,41,43,58,60,65,197,199,204,206,208],lookup:[26,63],loop:33,lose:[4,6,27,58,68,202],loss:[6,22,58,65,208],lost:[53,58,68,199],lot:[6,24,27,28,35,49,50,65,72,191,200,206,207,208],low:[6,41,69,135,137,208],lower:[0,4,6,11,12,13,20,44,55,58,59,63,68,204,206],lowercas:12,lowest:[6,41,58,196],lsm:[207,208],lucen:50,luckili:205,lwt:0,lz4:[4,6],lz4compressor:[4,6,11,59,196],mac:208,macaddr:9,machin:[6,11,43,44,63,66,67,68,196,205,208],made:[6,22,42,50,52,55,60,66,206],magnet:[4,6],magnitud:13,mai:[0,4,6,9,10,11,13,14,16,17,18,20,22,25,26,27,28,32,34,35,38,39,41,43,44,48,52,53,55,58,63,66,67,68,70,71,157,197,199,204,205,206,207,208],mail:[5,36,41,42,50],main:[0,6,14,18,39,44,47,48,52,66,71,197,204,206],main_actor:13,mainli:6,maintain:[6,11,23,36,41],mainten:63,major:[0,4,10,35,41,66,78,135,201,207],make:[0,6,8,9,21,22,23,24,25,27,28,29,30,32,33,34,35,36,39,41,43,44,45,48,58,66,68,70,71,192,206,208],malform:204,malici:66,man:6,manag:[6,20,24,25,27,28,30,32,35,39,40,42,43,50,63,66,68,72,74,135,191],mandatori:[11,14],mani:[0,6,11,23,24,25,27,30,33,38,41,58,59,60,63,66,70,71,72,75,78,85,95,98,104,105,150,157,175,187,188,191,197,203,204,207,208],manifest:[72,191],manipul:[12,15,18,43,50,192],manual:[6,25,34,37,44,202,208],map:[6,9,10,11,13,14,17,20,23,29,50,57,63,206,208],map_liter:[11,16,20,22],mar:22,mark:[6,20,24,41,58,65,68,89,135,196,198,202],marker:[4,6,11,12,38,44,202],markup:35,marshal:196,mashup:23,massiv:[36,208],match:[4,6,12,13,14,17,20,63,67,196,201],materi:[0,6,10,11,12,15,23,24,25,26,27,28,29,30,50,63,71,135,190],materialized_view_stat:12,matrix:51,matter:[11,44,208],maven:34,max:[4,6,50,52,58,63,66,70,71,98,108,116,135,150,161,169,196,199,206,207],max_hint_window_in_m:68,max_log_s:[53,98],max_map_count:44,max_mutation_size_in_kb:[4,6,44],max_queue_weight:[53,98],max_threshold:58,maxattempt:71,maxbatchs:71,maxfiledescriptorcount:63,maxfiles:53,maxhintwindow:169,maxhistori:53,maxim:60,maximum:[4,6,14,53,55,63,71,98,110,135,157,163,196,199,200,204,206,207],maximum_live_cells_per_slice_last_five_minut:183,maximum_tombstones_per_slice_last_five_minut:183,maxinserterror:71,maxldt:193,maxoutputs:71,maxparseerror:71,maxpartitions:63,maxpools:63,maxrequest:71,maxrow:71,maxt:193,maxtasksqueu:63,maxthreshold:161,maxtimestamp:4,maxtimeuuid:10,mayb:13,mbean:[6,20,58,63,66],mbeanserv:20,mbit:195,mbp:6,mct:6,mean:[0,6,9,11,12,13,14,17,18,22,27,50,58,63,67,70,71,150,204,205,206,207,208],meaning:13,meanpartitions:63,meant:[22,44,63],measur:[6,24,28,38,41,43,63,68,70,71,208],mechan:57,media:[23,24,25,26,27,28,29,30],median:[63,206],medium:208,meet:[6,38,66],megabit:195,megabyt:[6,200,207],mem:208,member:[33,66,70],membership:6,memlock:44,memori:[4,6,11,50,53,55,57,58,59,62,69,203,206,208],memory_pool:63,memtabl:[2,6,11,55,57,58,59,60,63,173,206,208],memtable_allocation_typ:4,memtable_cell_count:183,memtable_cleanup_threshold:4,memtable_data_s:183,memtable_flush_period_in_m:11,memtable_off_heap_memory_us:183,memtable_switch_count:183,memtablecolumnscount:63,memtableflushwrit:[63,207],memtablelivedatas:63,memtableoffheaps:63,memtableonheaps:63,memtablepool:6,memtablepostflush:[63,207],memtablereclaimmemori:[63,207],memtableswitchcount:63,mention:[6,22,27,41,52,63,66,195,204],menu:39,mere:33,merg:[35,37,41,55,59,60,62,208],mergetool:37,merkl:[6,63,65],mess:[41,43],messag:[6,22,32,35,38,41,48,50,52,53,63,66,195,199,200,201,202,203,204,206,207],met:13,meta:[13,52,63,70],metadata:[4,20,28,42,59,60,63,72,191,199,202,203,206],metal:6,meter:63,method:[10,13,14,20,33,36,38,39,43,50,52,66,70],methodolog:30,metric:[6,62,70,205,207,208],metricnam:63,metricsreporterconfigfil:63,mib:[80,134,183],micro:207,microsecond:[6,11,13,22,63,196,207],microservic:25,midnight:22,might:[6,13,23,24,25,26,27,28,42,58,63,73,75,76,78,83,85,91,95,98,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,203,204,207],migrat:[6,63,67],migrationstag:[63,207],milli:4,millisecond:[4,6,10,22,28,63,137,157,184,196,199,207,208],min:[4,6,44,57,58,63,70,71,108,135,161,196,206,207],min_sstable_s:58,min_threshold:58,minbatchs:71,mind:28,minim:[6,25,27,58,60],minimum:[6,11,14,28,45,63,65,194,196],minlocaldeletiontim:196,minor:[10,12,35,62],minpartitions:63,mint:193,minthreshold:161,mintimestamp:196,mintimeuuid:10,minttl:196,minu:28,minut:[6,22,27,52,53,58,63,66,70,98],mirror:35,misbehav:[50,58,205],misc:[121,171],miscelen:63,miscellan:6,miscstag:[63,207],mismatch:6,misrepres:199,miss:[11,18,32,34,58,63,65,68,202,208],misslat:63,misspel:194,mistaken:[73,75,76,78,83,85,91,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190],mistun:206,mit:42,mitig:[6,66],mix:[6,58,70,208],mkdir:[42,52,206],mmap:44,mnt:16,mock:43,mode:[4,6,66,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,204],model:[11,15,20,26,27,29,41,50,52,66,70,208],moder:[28,60],modern:[60,208],modif:[13,20,206],modifi:[6,9,10,11,14,20,22,27,41,55,58,59,198],modification_stat:13,modul:71,modular:38,moment:[6,41],monitor:[44,50,62,66,67,74,135,204,208],monkeyspeci:[11,18],monkeyspecies_by_popul:18,monoton:[0,11],month:[22,28,52],monument:23,more:[0,4,6,10,11,12,13,22,23,24,25,28,29,33,35,36,41,42,43,45,49,50,52,53,55,59,60,62,63,66,67,68,70,72,78,104,105,135,137,150,157,184,188,191,196,197,203,205,207,208],moreov:13,most:[6,11,12,13,22,24,27,28,35,36,39,41,43,44,45,53,58,59,60,63,66,71,77,135,184,196,197,204,206,207,208],mostli:[4,6,11,22,25,205,206],motiv:[43,58],mount:[6,208],move:[6,28,41,44,50,57,62,63,135,199,202,207],movement:[62,206],movi:[13,22],movingaverag:6,msg:53,mtime:[11,198],mtr:208,much:[0,5,6,11,23,24,27,28,55,57,58,67,195,204,206,208],multi:[0,6,12,38,206,208],multilin:40,multipl:[4,6,10,11,12,13,14,19,22,23,24,27,30,33,38,39,41,44,45,58,60,67,70,72,140,191,192,204,207],multipli:[28,58],multivari:69,murmur3:4,murmur3partit:4,murmur3partition:[6,14,71,196],museum:23,must:[0,4,6,10,11,13,14,17,18,20,24,27,28,33,34,39,41,43,44,45,52,58,63,66,68,70,71,173,191,192,193,194,195,196,197,198,199,200,201,202,203],mutant:16,mutat:[0,4,6,13,44,57,63,188,207],mutatedanticompactiongaug:63,mutationsizehistogram:63,mutationstag:[63,207],mv1:18,mvn:34,mx4j:63,mx4j_address:63,mx4j_port:63,mx4jtool:63,mxbean:20,myaggreg:14,mycolumn:17,mydir:71,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:14,mytabl:[11,14,17,19,21],mytrigg:21,n_c:28,n_r:28,n_v:28,nairo:22,name:[4,6,9,10,11,12,13,14,16,17,18,20,21,22,24,26,29,30,32,35,38,39,41,42,43,44,45,53,63,66,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,192,194,195,198,201,204,206,207,208],names_valu:13,nan:[9,10,12],nanosecond:[22,63],narrow:[204,206,207,208],nathan:13,nativ:[4,6,10,12,15,17,30,38,44,49,52,53,63,71,87,97,135,141,177,195,207,208],native_transport_port:45,native_transport_port_ssl:66,native_typ:22,natur:[11,22,26,33,58,59,208],navig:35,nbproject:39,ncurs:208,nearli:39,neccessari:6,necessari:[6,11,14,20,27,41,48,53,59,66,192,196,199],necessarili:[6,12,25,45],need:[0,4,6,10,11,12,13,20,22,23,24,25,26,27,28,32,33,34,38,39,41,42,43,44,45,48,49,53,55,58,59,60,65,66,67,69,71,113,117,195,200,201,203,207,208],neg:6,negat:[19,66],neglig:[13,208],neighbor:204,neighbour:58,neither:[6,18,22,66],neon:39,nerdmovi:[13,16],nest:[12,13,33],net:[6,39,44,47,48,53,66],netbean:[30,40],netstat:[68,135],netti:6,network:[6,13,44,60,65,66,67,134,135,138,206],network_author:20,network_permiss:6,networktopologystrategi:[66,70],never:[0,6,10,11,12,13,14,22,27,33,44,58,65,197],nevertheless:13,new_rol:20,new_superus:66,newargtuplevalu:14,newargudtvalu:14,newer:[28,58,60,71,105,193,208],newest:[11,58,193],newli:[11,20,22,41,57,135,142],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,next:[0,6,24,44,49,52,58,71,205,206],ngem3b:13,ngem3c:13,nic:[52,208],nice:28,nid:208,nifti:37,night:[24,28],nio:[6,14,63],nntp:208,no_pubkei:48,node:[0,4,6,11,13,14,21,22,27,28,38,43,45,46,49,50,52,53,55,57,58,60,62,63,65,67,69,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195,196,197,205,206,207,208],nodej:47,nodetool:[4,6,18,48,50,52,55,59,62,65,66,68,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,198,201,203,204,205,206,208],nois:[6,206],noiser:206,noisi:206,nologin:9,nomin:28,non:[6,9,10,11,12,13,14,20,22,25,44,52,55,59,63,66,71,196,199],none:[0,6,11,13,22,63,66,196],nonsens:20,nor:[6,11,18,22],norecurs:[9,20],norm:63,normal:[14,17,20,27,39,44,48,52,63,70,71,204,206,207,208],nosql:[30,69],nosuperus:[9,20],notabl:[14,17],notat:[10,12,13,24,25,28,71],note:[0,4,5,6,10,11,12,13,14,15,17,20,22,24,25,37,40,41,42,44,52,58,66,191,192,193,194,195,196,197,198,199,200,201,202,203,206,208],noth:[6,11,14,37,43,44,193],notic:[6,24,29,66,207,208],notif:8,notion:[11,12],notori:23,noun:27,now:[10,24,25,26,29,32,33,35,39,58,68,208],ntp:6,nullval:71,num_cor:71,num_token:68,number:[0,4,6,10,11,12,13,14,15,17,18,22,24,25,26,27,28,29,32,39,41,42,43,44,48,53,55,58,59,63,66,68,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195,196,200,204,205,207,208],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:183,numer:[15,19,32,55,70],numprocess:71,numsampl:206,ny229:25,object:[6,11,12,38,52,192],objectnam:20,observ:33,obsolet:[6,60,63,203],obtain:[12,24,26,28,66,208],obviou:[14,37],obvious:[11,23],occasion:[65,207],occup:[13,208],occupi:[6,63],occur:[6,10,12,13,21,22,44,52,58,60,63,65,191,192,193,194,195,196,197,198,199,200,201,202,203,208],occurr:22,octet:[6,67],odbc:30,oddli:6,off:[4,6,25,44,57,59,63,66,71,135,152,208],off_heap_memory_used_tot:183,offer:[15,43,59],offheap:[55,60],offheap_buff:6,offheap_object:6,offici:[35,41,50,69,71],offset:[4,57,63],often:[6,11,12,25,26,27,28,33,35,36,41,43,44,53,58,59,60,65,66,67,71,98,197,204,207,208],ohc:6,ohcprovid:6,okai:33,old:[4,6,58,68,72,92,102,135,191,202,208],older:[4,6,14,39,48,58,60,71,193,201],oldest:[4,6,11,53,193],omit:[4,6,10,11,13,17,22,24,168],onc:[0,4,6,11,12,14,22,25,28,29,32,37,39,41,43,44,57,58,59,60,63,65,66,68,70,71,197,204],one:[0,4,6,9,10,11,12,13,14,17,18,20,22,24,25,27,28,33,36,39,41,43,45,50,52,53,55,58,60,63,65,66,67,68,71,72,75,78,85,95,104,105,121,135,150,157,171,173,175,187,188,191,192,196,199,201,202,204,206,207,208],oneminutecachehitr:63,ones:[6,11,12,13,14,18,20,63,197],ongo:[36,58,63,68],onli:[0,4,6,9,11,12,13,14,17,18,20,22,24,26,27,28,33,35,41,42,43,45,50,52,55,57,58,59,60,63,65,66,67,68,70,71,72,150,173,183,191,193,195,198,199,200,201,203,204,207,208],onlin:71,only_purge_repaired_tombston:58,onto:[4,58],open:[5,6,20,32,36,40,42,52,66,67,69,195,208],openfiledescriptorcount:63,openhft:53,openjdk:[48,52],openssl:52,oper:[0,6,10,11,12,13,15,16,18,20,22,27,33,40,50,53,55,57,60,63,65,66,68,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,194,201,202,204,205,206,207,208],operand:19,operatingsystem:63,operationtimedoutexcept:204,opertaion:6,oplog:202,opnam:70,opportun:[35,55],ops:[44,70],opt:14,optim:[4,6,11,12,13,28,44,58,60,68,196,206],optimis:150,option1_valu:20,option:[4,6,9,10,11,12,13,14,16,20,22,24,27,28,30,35,39,41,43,44,48,52,59,60,62,66,68,70,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,192,195,196,197,198,199,200,201,202,203,206,207,208],oracl:[6,48,66,208],order:[0,4,6,9,10,11,14,18,22,24,25,26,27,28,29,33,41,44,55,57,58,67,68,70,71,196],ordering_claus:13,orderpreservingpartition:6,ordinari:6,org:[6,14,21,32,33,34,35,39,42,43,44,48,52,53,58,59,63,66,192,193,194,195,196,197,198,199,200,202,203,206],organ:[4,24,27,32,39,46],orgapachecassandra:42,origin:[4,9,28,35,37,41,57,157,197,199,200,201],orign:13,os_prio:208,osx:35,other:[0,4,6,10,11,12,13,14,18,20,22,23,24,25,27,29,30,34,35,36,37,39,41,45,50,53,55,58,60,62,63,66,67,68,135,140,151,193,196,197,202,204,205,206,207,208],other_rol:20,otherwis:[0,6,9,12,13,16,22,24,65,110,193,204],our:[5,6,8,32,35,36,37,39,42,58,208],ourselv:37,out:[4,6,11,12,24,25,27,30,33,34,36,39,41,42,58,63,65,66,67,68,69,150,192,193,204,207,208],outbound:6,outboundtcpconnect:6,outgo:[6,208],outgoingbyt:63,outlin:[32,66],outofmemoryerror:50,output:[14,20,38,39,42,52,55,58,70,71,72,78,79,183,185,191,196,199,200,202,203],outsid:[11,21,22],outstand:[202,207],oval:23,over:[0,4,6,11,22,24,27,44,58,63,65,66,67,68,70,197,199,202],overal:14,overflow:[17,72,157,191],overhead:[6,44,59,63,68],overidden:66,overlap:[0,58,197],overli:[27,28],overload:[6,14,44,195],overrid:[6,11,32,33,66,68,157,195,199],overridden:[6,11,53],overview:[2,50,62],overwrit:[24,53,59,60,66],overwritten:[63,105],own:[0,11,12,14,22,36,40,41,44,48,52,58,59,63,66,69,70,113,119,126,135,188,197,207],owner:22,ownership:[58,156],ownersip:206,p0000:22,p50:207,p99:208,pacif:22,packag:[32,39,43,44,45,47,49,71,206],packet:[6,206],page:[6,22,32,35,36,39,43,44,60,63,69,72,205,207],paged_rang:207,paged_slic:63,pages:71,pagetimeout:71,pai:[33,34,42],pair:[6,11,20,22,58,66],pane:30,parallel:[18,43,58,150,207],paramet:[4,6,14,32,33,38,39,45,53,55,60,67,68,135,168],parameter:32,paranoid:6,parent:[34,195],parenthes:29,parenthesi:[11,70,71,204],park:23,parnew:60,pars:[6,12,53,57,71,208],parser:[9,10],part:[5,6,11,13,14,18,22,24,25,26,28,34,38,39,41,43,44,66,67,68,71,195,204],parti:[38,50,63,192],partial:[4,11,28,202],particip:[0,21],particular:[0,6,11,12,13,14,17,20,22,24,44,60,63,66,204,206,207,208],particularli:[12,22,66,206,207,208],partit:[4,6,10,11,13,14,24,27,29,30,31,44,55,58,60,63,70,105,113,117,135,157,184,192,196,204,206,207],partition:[4,10,13,14,65,71,82,135,150,196],partition_kei:[11,13],partitionspercounterbatch:63,partitionsperloggedbatch:63,partitionsperunloggedbatch:63,partitionsvalid:63,partli:13,pass:[38,41,45,71,195,196,207,208],password:[6,9,13,20,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195],password_a:20,password_b:20,passwordauthent:[6,66],passwordfilepath:[73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190],past:[24,63],patch:[10,13,32,33,35,37,38,40,43,50],path1:53,path2:53,path:[5,6,16,27,38,48,52,55,58,59,60,63,66,69,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,192,193,195,199,201,203,206,208],pathn:53,patter:20,pattern:[6,20,22,23,29,30,31,53,204,207,208],paus:[6,44,135,136,206,208],pausehandoff:135,paxo:[13,63,71],pcap:208,peak:[195,208],peer:[6,20,52,63,208],peerip:63,pem:52,penalti:[6,13],pend:[6,58,63,135,149,207],pending_flush:183,pendingcompact:63,pendingflush:63,pendingrangecalcul:[63,207],pendingtask:63,pendingtasksbytablenam:63,pennsylvania:22,peopl:[35,41,42,44],per:[0,4,6,10,11,13,28,33,37,41,44,55,57,58,59,63,66,70,71,135,158,166,192,195,202,204,206,207,208],percent:63,percent_repair:183,percentag:[6,63,67,208],percentil:[63,204,207,208],percentrepair:63,perdiskmemtableflushwriter_0:[63,207],perf:208,perfdisablesharedmem:208,perfect:14,perfectli:27,perform:[6,11,13,20,22,24,25,27,28,30,36,37,38,40,41,44,45,53,55,58,60,63,65,66,67,71,150,206,207,208],perhap:[24,28,204,206],period:[4,6,32,60,63,65,66,68,135,137,208],perl:47,perman:[11,44,58,60,206],permiss:[6,9,12,23,24,25,26,27,28,29,30,43,66],permit:[6,20,57,66],persist:[4,44,52,55,57,60,66,208],person:208,perspect:[24,26,44],pet:22,peter:23,pgp:42,pgrep:48,phantom:46,phase:[68,71,207],phi:6,phone:[13,22,25,29],phone_numb:29,php:47,physic:[0,6,11,28,29,30,31,44,50,60,67],pick:[6,37,41,44,58,66,68,70,140],pid:[44,48,208],piec:[12,58,63],pile:6,pin:[6,67],ping:[41,208],pkcs5pad:6,pkill:48,place:[5,6,16,21,23,33,37,41,57,58,63,65,66,71,135,142,195,200,206,208],placehold:[14,71],plai:[14,22],plain:4,plan:[11,28,37,41],plane:35,platform:[20,32,69],platter:[6,60],player:[14,22],playorm:46,pleas:[5,6,11,13,14,22,32,33,35,39,42,43,44,52,53,66,70,203],plu:[14,28,58,63,207],plug:[6,28,32],pluggabl:[20,66],plugin:[30,50,63],pmc:42,poe:22,poi:[26,29],poi_nam:[24,29],point:[4,6,10,17,22,23,24,26,27,30,33,35,39,42,50,66,70,71,113,135,195,204,208],pointer:14,pois_by_hotel:[24,29],polici:[6,41,42,66,188,204],poll:66,pom:40,pool:[6,48,52,63,135,163,185,207,208],poorli:24,pop:70,popul:[11,18,70],popular:[23,24,30,39,60],port:[6,39,45,50,53,63,66,70,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195,208],portion:[24,60,71,200],posit:[4,6,10,11,19,22,55,63,68,192,196],possbili:6,possess:20,possibl:[0,6,10,11,13,14,17,20,22,28,32,38,41,43,44,55,58,60,63,66,68,70,197,204,206],post:[13,32,40,135,160],post_at:13,postal_cod:29,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,24,28,38,53,58,60,66,68,157,197,199],power8:69,power:[6,69],pr3z1den7:22,practic:[0,6,11,12,13,27,29,30,42,62,66],pre:[6,17,22,60,66,199,200,202],preced:[19,44,70],precis:[10,17,22,58,196],precondit:63,predefin:11,predict:[13,28,197],prefer:[0,6,11,12,22,27,33,41,66,67],preferipv4stack:39,prefix:[11,12,22,196,202],prepar:[6,14,15,53,63],prepare_releas:42,preparedstatementscount:63,preparedstatementsevict:63,preparedstatementsexecut:63,preparedstatementsratio:63,prepend:22,prerequisit:[40,47],presenc:6,presens:4,present:[12,13,18,30,45,57,63,66,199,208],preserv:[6,11,17,20,27,28],preserveframepoint:208,press:48,pressur:[6,63,207,208],presum:27,pretti:[24,28,71,208],prevent:[6,11,43,57,63,65,195,199,208],preview:[35,65,150],previou:[6,10,11,22,28,42,53,58,65,68,201],previous:[6,24,28,202],previsouli:[101,135],price:27,primari:[9,10,11,13,14,22,24,25,27,28,29,43,57,58,59,65,66,68,70],primarili:[6,11],primary_kei:[11,18],print0:198,print:[65,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,206],prio:208,prior:[6,13,20,22,68],prioriti:[41,208],privat:[6,33,42,66,67],privileg:[20,48,66],probabilist:[55,59],probabl:[0,4,6,11,24,43,55,58,65,122,135,172,206,207,208],problem:[5,6,14,24,27,37,38,42,44,66,204,205,207,208],problemat:[22,204],proc:[6,44],proce:[38,59,68,204],procedur:[13,66],proceed:203,process:[0,6,14,24,25,32,34,36,37,38,39,40,41,43,44,48,50,57,59,60,63,65,66,68,69,71,74,110,135,136,155,163,194,195,199,201,202,203,206,207,208],prod_clust:71,produc:[13,14,26,36,58,98,204],product:[0,6,11,27,28,34,36,41,44,52,60,67],profil:[13,39,72,135,137,208],profileload:135,program:[14,43,205,208],programmat:198,progress:[33,37,41,42,55,62,70,72,135,190,191,202,207],project:[26,32,33,34,35,36,43,63],promin:11,promot:4,prompt:71,propag:[6,11,14,33,38,67],proper:[11,22,35,44,66],properli:[6,27,38],properti:[4,6,11,18,20,25,39,42,47,57,58,66,67,68],propertyfilesnitch:[6,67],proport:[6,13],proportion:[6,107,135,158],propos:[6,24,42,63],protect:[6,24,60,65,66,202],protocol:[6,38,44,49,53,63,66,71,77,87,92,97,102,135,177,195,206,208],prove:[25,208],provid:[0,4,5,6,11,12,13,14,15,17,22,24,27,28,30,39,41,42,49,53,57,58,59,60,63,65,66,67,68,69,70,72,134,135,145,149,195,196,197,200,202,203,204,206],provis:208,proxim:[6,67],proxyhistogram:[135,207],prtcl:195,prv:[65,150],ps1:66,ps22dhd:13,pt89h8m53:22,publish:[23,24,25,26,27,28,29,30,34],published_d:70,pull:[27,35,43,58,63,150],pure:208,purg:60,purpos:[11,12,13,22,27,60,66],push:[37,41,42,63],put:[15,24,28,41,45,58,68,126,150,197,207],pwd:42,pwf:[73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190],python:[14,32,41,43,47,48,71],pytz:72,qos:53,quak:[14,22],qualifi:[11,14,41,207],qualiti:[35,66],quantiti:[22,207],queri:[0,6,10,11,12,13,14,16,18,20,24,25,28,29,30,31,47,50,53,58,63,69,70,71,88,98,135,153,205,208],queryvalidationexcept:204,question:[8,20,27,40,50,208],queu:[6,63],queue:[6,24,53,63,98,207,208],quick:[24,126,188,203],quickli:[24,44,58,207],quill:46,quintana:22,quit:[28,58,71,195,207,208],quorum:[0,11,66,71,204],quot:[9,10,11,12,14,17,20,70,71],quotat:20,quoted_identifi:12,quoted_nam:11,r_await:208,r_k:28,race:[22,37],rack1:6,rack:[0,6,66,67,204,207],rackdc:[6,67],rackinferringsnitch:[6,67],raid0:60,raid1:60,raid5:60,rain:12,rais:[6,12,44,204],raison:9,ram:[55,59,60,208],ran:198,random:[11,14,44,68],randomli:[6,68],randompartition:[6,13,14],rang:[2,6,10,11,13,22,24,26,38,58,62,63,65,70,71,78,83,121,135,140,150,171,204,207],range_slic:[63,207],rangekeysampl:135,rangelat:63,rangemov:68,rangeslic:63,rapid:60,rapidli:208,rare:[10,27,55,204],raspberri:60,rate:[6,11,23,26,63,66,70,71,195,208],ratebasedbackpressur:6,ratefil:71,rather:[6,13,24,26,44,57,58,60,70],ratio:[6,59,60,63,70,196],ration:4,raw:[4,6,14,72,191,206],rdbm:[31,50],reacah:53,reach:[4,6,11,28,41,44,57,58,197],read:[0,6,11,13,22,24,29,33,35,38,43,44,47,50,55,58,59,60,62,63,66,67,70,71,121,171,183,188,195,196,203,204,206,207,208],read_ahead_kb:208,read_lat:183,read_repair:[0,11,63,207],read_request_timeout:44,readabl:[11,25,53,57,80,134,183,207],readi:[0,11,24,29,35,41,66],readlat:[63,204],readm:[35,42],readrepair:63,readrepairstag:[63,207],readstag:[63,207],readtimeoutexcept:204,readwrit:66,real:[4,8,11,23,25,27,33,44,69,206],realclean:34,realis:70,realiz:[27,58],realli:[6,27,28,43,194,198,204,208],realtim:57,reappear:65,reason:[0,4,6,11,13,14,15,18,27,28,44,45,48,58,60,65,66,68,207,208],rebas:35,rebuild:[0,55,58,59,63,135,141,157],rebuild_index:135,receiv:[6,14,18,41,44,58,60,68,204,208],recent:[6,41,43,52,60,77,197,202],reclaim:[53,58],recogn:[13,28,39,41,52],recommend:[4,6,11,22,27,35,44,52,53,60,66,68,206],recompact:58,recompress:59,reconcil:11,reconnect:66,reconstruct:197,record:[4,6,11,13,19,22,23,24,26,27,28,41,53,58,63,70,208],recov:[6,44,58],recoveri:6,recreat:[20,71],rectangl:23,recurs:98,recv:48,recycl:[4,6,63],redhat:[42,52],redirect:53,redistribut:[6,206],redo:41,reduc:[4,6,25,28,36,44,58,59,65,72,81,107,135,150,158,191],reduct:6,redund:[0,33,38,41,60],reenabl:[97,99,100,135],ref:[42,53,192,193,194,195,196,197,198,199,200,202,203],refer:[6,11,12,13,14,22,24,25,26,27,28,32,33,34,43,44,48,49,53,68,70,71,204,206],referenc:[6,26,28,70],refin:[29,31,50],reflect:[57,58,192],refresh:[6,66,71,135,143],refreshsizeestim:135,refus:50,regard:[11,13],regardless:[0,6,20,41,208],regener:55,regexp:12,region:[6,67],regist:22,registri:66,regress:[38,43],regular:[9,12,28,35,39,43,44,63,71],regularcolumn:196,regularli:65,regularstatementsexecut:63,regularupd:70,reilli:[23,24,25,26,27,28,29,30],reinforc:30,reinsert:[157,199],reject:[6,13,44,57,66,204],rel:[6,22,25,28,71,208],relat:[8,10,12,13,23,24,27,28,30,34,39,41,58,63,70,196,204,208],relationship:[6,23,24,27],releas:[6,10,27,28,34,40,41,48,50,52,71,208],relev:[13,20,22,41,59,66,69,195,196,199,208],relevel:[72,191],reli:[6,14,22,24,44],reliabl:[36,58],reload:[6,62,135,144,145,146,147],reloadlocalschema:135,reloadse:135,reloadssl:[66,135],reloadtrigg:135,reloc:[135,148,180,206],relocatesst:135,remain:[6,13,14,20,22,37,58,63,65,68,183,207],remaind:[17,19,59],remeb:53,remedi:58,rememb:[24,25,28,53,204],remind:24,remot:[0,4,35,37,39,50,58,66,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,204],remov:[0,4,6,10,11,12,13,14,15,17,18,20,22,34,38,44,50,57,62,66,73,76,105,135,149,197,202,203,208],removenod:[68,73,135],renam:[9,22],render:35,reorder:6,repair:[0,4,6,11,18,44,50,59,62,63,67,68,72,126,135,151,168,188,191,196,199,203,207],repair_admin:135,repairedat:198,repairpreparetim:63,repairtim:63,repeat:[12,48,59,66],replac:[0,6,9,14,20,22,34,38,42,44,50,58,62,65,98,201,202],replace_address_first_boot:68,replai:[0,4,22,60,63,107,135,152,158,196],replaybatchlog:135,repli:36,replic:[2,6,11,28,29,50,58,60,65,66,68,70,73,135],replica:[0,6,11,13,28,44,58,63,65,67,68,81,117,135,204,207,208],replication_factor:[0,11,29,66,70],repo:[34,37,39,42],repodata:42,repomd:42,report:[6,27,34,40,41,50,62,204],report_writ:20,reportfrequ:71,repositori:[5,8,32,34,35,36,39,41,43,48,69],repres:[6,10,17,20,22,23,24,25,26,27,28,44,58,63,66,67,70,71,196,206],represent:[10,17,25,192],reproduc:[25,36],reproduct:36,request:[0,6,13,20,21,35,43,44,53,55,58,60,62,66,67,71,135,172,187,203,204,207,208],request_respons:[63,207],requestresponsest:207,requestresponsestag:[63,207],requesttyp:63,requir:[0,6,11,13,14,20,24,27,28,33,35,37,38,39,40,41,42,44,55,59,60,66,70,194,195,198,201],require_client_auth:6,require_endpoint_verif:6,resampl:6,reserv:[6,10,12,15,23,26,27,28,29,30,31,208],reservations_by_confirm:29,reservations_by_guest:[24,29],reservations_by_hotel_d:29,reservoir:204,reset:[6,13,35,135,154,168,194],reset_bootstrap_progress:68,resetfullquerylog:135,resetlocalschema:135,resid:[6,13,44,63,208],resolut:[6,13,40,44],resolv:[34,37,44,156,174],resort:[73,135],resourc:[20,66,195,207],resp:14,respect:[6,10,11,14,32,34,48,65,67,98,206],respond:[0,6,12,208],respons:[0,6,20,25,44,63,68,207],ressourc:22,rest:[6,11,12,22,32,38,68,204],restart:[44,58,66,68,135,142,160,194,206],restor:[58,68,71,195,201,202],restrict:[6,10,11,13,18,25,65],restructuredtext:35,result:[0,6,10,11,12,14,17,20,22,25,27,28,30,36,41,44,58,63,65,71,191,192,193,194,195,196,197,198,199,200,201,202,203,208],resum:[74,135,155],resumehandoff:135,resurrect:58,resync:[135,154],retain:[20,44,53,58,199,201],retent:27,rethrow:33,retir:35,retri:[0,6,11,22,63,98],retriev:[11,13,20,24,34],reus:[28,38],reveal:28,revers:[11,13,24],revert:206,review:[11,33,35,40,41,43,50],revis:[25,70],revok:[9,66],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[55,58,59,72,135,157,187,191,199],rewritten:[60,157,199],rfc:[14,22],rhel:50,rich:[22,206],rid:34,rider:22,riderresult:22,right:[6,19,23,24,25,26,27,28,29,30,39,42,44,65,71,207,208],ring:[2,6,50,52,65,66,68,71,131,133,135,168,195,204],rise:204,risk:[11,24,58],riski:58,rmb:208,rmem_max:6,rmi:[44,66],rogu:14,role:[6,9,10,12,15,62],role_a:20,role_admin:20,role_b:20,role_c:20,role_manag:66,role_nam:20,role_opt:20,role_or_permission_stat:12,role_permiss:6,roll:[44,53,66,98],roll_cycl:[53,98],rollcycl:53,rollingfileappend:53,rollingpolici:53,rollov:53,romain:22,room:[5,8,23,24,26,27,28,29,42],room_id:28,room_numb:[28,29],root:[6,37,41,48,52,203,206],rotat:[6,206],roughli:6,round:[13,24,58,63],rout:[6,67],routin:208,row:[0,4,6,10,11,13,14,15,17,18,24,28,43,49,55,59,60,63,70,71,72,105,126,130,135,157,159,160,191,196,199,203,208],rowcach:[50,63],rowcachehit:63,rowcachehitoutofrang:63,rowcachemiss:63,rowindexentri:63,rows_per_partit:11,rpc:[6,63],rpc_timeout_in_m:[121,171],rpm:42,rpmsign:42,rrqm:208,rsc:188,rst:35,rubi:[14,47],rule:[6,12,14,41,44,204,206],run:[4,5,6,12,22,24,28,30,32,34,37,39,41,42,44,45,48,52,58,60,63,65,66,68,69,70,72,126,150,191,194,195,196,198,200,201,205,206,207,208],runnabl:208,runtim:[6,18,28,47,52,115,135],runtimeexcept:33,rust:47,s_j:28,s_t:28,safe:[14,22,58,66,208],safeguard:60,safepoint:206,safeti:[11,58,68],sai:50,said:[11,41,44,135,187,208],same:[0,4,5,6,11,12,13,14,15,17,18,19,20,22,24,25,27,35,37,39,41,45,50,55,58,63,65,66,67,68,70,150,197,202,204,206,208],samerow:70,sampl:[4,6,12,14,25,63,70,71,98,135,137,139,184],sampler:[63,137,184,207],san:60,sandbox:[6,14],sasi:6,satisfi:[0,11,24,27,33,60,63,68],satur:[6,63,207,208],save:[6,13,22,32,34,44,45,55,59,60,68,70,135,160],saved_cach:6,saved_caches_directori:45,sbin:44,scala:[14,47],scalabl:[42,69],scalar:15,scale:[43,59,69,70],scan:[6,13,24,55,63],scenario:37,scene:44,schedul:[6,32],schema:[9,11,14,17,30,31,50,63,66,70,71,82,135,144,154,194,196],schema_own:20,scientif:24,scope:[20,25,53,63,66],score:[6,14,22,67],script:[6,14,30,32,39,42,43,52,72,98,191,192,193,194,195,196,197,199,200,201,202,203,208],scrub:[55,58,59,63,72,135,180,191],sda:208,sdb:208,sdc1:208,sdc:208,search:[24,27,28,41,69,206],searchabl:208,second:[6,11,12,13,22,24,27,28,44,52,57,60,66,70,71,72,135,158,166,191,204,206,207,208],secondari:[10,12,13,15,25,27,50,58,63,69,135,141],secondary_index_stat:12,secondaryindexmanag:[63,207],section:[2,4,5,7,10,11,12,13,15,20,22,42,44,47,48,49,51,53,58,63,65,66,68,72,191,195,206,207],secur:[6,14,15,42,50,62],see:[0,4,6,10,11,12,13,14,17,20,22,23,24,26,27,28,34,36,39,41,42,43,49,50,53,58,63,66,68,71,105,135,150,194,196,197,200,206,207,208],seed:[6,45,50,52,67,118,135,145],seedprovid:6,seek:[4,6,60,63],seem:24,seen:[6,11],segment:[4,6,53,57,63,71,98,206,207],segment_nam:57,segmentid:196,select:[6,9,10,11,12,14,15,19,20,24,26,27,29,30,32,39,42,43,44,49,53,55,58,66,70,71,140,206,207,208],select_claus:13,select_stat:[12,18],self:[24,38],selinux:44,semant:[10,13,14,27],semi:44,send:[6,8,44,70,204,208],sendto:70,sens:[10,13,15,25,44],sensic:14,sensit:[11,12,14,17,208],sensor:[22,24,28],sent:[0,6,11,22,44,63,204,208],sentenc:41,separ:[4,6,11,13,24,25,27,28,33,35,41,45,53,58,60,66,68,71,73,75,76,78,83,85,91,94,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,192,195,199],septemb:41,seq:[6,150],sequenc:12,sequenti:[6,60,150],seren:13,seri:[11,24,42,58,71],serial:[4,6,72],serializingcacheprovid:6,seriou:[35,204,207],serv:[13,24,60,66,208],server:[6,12,13,22,27,39,40,42,43,44,52,60,63,66,69,70,195,204],server_encryption_opt:[66,195],servic:[6,25,39,48,52,63,66,68,206,208],session:[6,20,66,72,135,151],set:[0,4,6,9,10,11,12,13,14,17,18,24,27,29,35,38,40,41,43,45,50,52,53,55,57,58,59,60,63,66,67,68,70,71,72,75,94,105,135,148,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,187,191,192,194,195,201,203,204,205,206,207,208],set_liter:[20,22],setbatchlogreplaythrottl:135,setcachecapac:135,setcachekeystosav:135,setcompactionthreshold:[58,135],setcompactionthroughput:[58,135],setconcurr:135,setconcurrentcompactor:135,setconcurrentviewbuild:[18,135],sethintedhandoffthrottlekb:135,setint:14,setinterdcstreamthroughput:135,setlogginglevel:[135,206],setlong:14,setmaxhintwindow:135,setstr:14,setstreamthroughput:135,setter:[20,32,33],settimeout:135,settraceprob:135,setup:[35,41,43,66],sever:[4,13,20,24,26,27,30,58,65,66,70,195],sfunc:[9,14],sha1:[42,201],sha:[37,42],shadow:[18,58],shall:52,shape:70,shard:[4,28],share:[11,13,39,196,204,208],sharedpool:71,sharp:46,shed:44,shell:[49,50,52,72],shift:22,ship:[34,43,49,66,71,206,208],shop:[23,24,26],shortcut:18,shorter:[35,66],shorthand:[26,71],shortli:28,shortlog:42,should:[0,4,5,6,10,11,12,13,14,17,20,22,24,27,32,34,35,38,39,41,43,44,45,46,47,49,52,53,55,58,59,60,63,65,66,67,68,70,71,140,150,171,200,202,204,208],shouldn:[11,45],show:[20,23,24,25,27,34,50,53,65,68,72,83,103,123,135,139,149,156,174,175,183,190,191,203,204,206,207,208],shown:[12,24,25,26,28,71,183,195],shrink:6,shut:6,shutdown:[4,6,60],side:[6,11,13,17,22,27,66,204],sig:42,sign:[13,22,44],signal:[6,135,146],signatur:[48,57],signifi:208,signific:[6,35,39,41,43,60,204],significantli:[6,28,65,208],silent:14,similar:[6,13,14,24,26,52,59,60,203,204,208],similarli:[0,10,17,29,33,60,135,140],similiar:65,simpl:[6,11,23,24,25,27,28,34,36,39,43,66],simple_classnam:43,simple_select:13,simplequerytest:43,simplereplicationstrategi:66,simpleseedprovid:6,simplesnitch:[6,67],simplestrategi:[29,70],simpli:[0,4,6,11,13,14,17,22,27,28,39,43,58,60,63,68,188],simplifi:[25,28],simul:43,simultan:[6,60,71,75,105,148,157,187],sinc:[6,11,13,14,22,28,35,39,43,44,48,52,58,63,65,68,194,197,199,207,208],singl:[0,6,10,11,12,13,14,17,18,20,22,24,27,28,29,33,41,45,49,50,52,62,63,65,66,67,71,72,78,191,204,206,207,208],singleton:38,site:[35,42],situat:[6,43,58,208],six:52,size:[4,6,11,22,25,31,33,44,45,52,53,55,57,59,60,62,63,66,70,71,72,98,132,135,191,194,196,197,198,201,206,207,208],size_estim:[135,143,206],sizeandtimebasedrollingpolici:53,sizeof:28,sizetieredcompactionstrategi:[11,58,207],skinni:207,skip:[6,13,44,63,68,71,72,157,173,191,194,200],skipcol:71,skiprow:71,sks:48,sla:38,slack:[5,41,50,65],slash:12,slave:32,sleep:208,slf4j:[33,34,53],slightli:6,slow:[6,11,67,204,206,207,208],slower:[6,11,55,207,208],slowest:6,slowli:[6,22],small:[4,6,11,13,22,28,44,58,60,72,191,195,204,208],smaller:[4,6,28,44,58,60,71,200],smallest:[0,11,14,63,197],smallint:[9,10,14,17,19,22,29],smith:22,smoother:10,smoothli:6,snappi:[4,6],snappycompressor:[11,59],snapshot:[4,6,27,34,52,63,72,76,132,135,157,191,199,203,208],snapshot_nam:[76,201],snapshotnam:[76,135],snippet:52,snitch:[6,50,62,82,135],snt:208,socket:[6,66,171],soft:35,softwar:[24,34],sold:24,sole:[11,36],solid:[6,60],solr:69,solut:32,some:[0,6,9,11,12,13,14,22,24,25,26,27,28,30,34,35,36,39,41,42,43,44,45,57,58,59,63,66,68,71,196,198,204,206,207,208],some_funct:14,some_keysopac:11,some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[37,58],someth:[6,23,27,198,206,208],sometim:[6,12,13,24,204,205,206,207,208],someudt:14,somewher:[48,65],soon:[26,66],sooner:6,sort:[4,11,13,22,24,28,58,60,69,183,197,206],sort_kei:183,sound:28,sourc:[4,5,6,8,14,32,34,35,36,40,42,48,52,53,63,72,140,192,201,204],source_elaps:71,space:[4,6,28,33,44,57,58,60,63,200,208],space_used_by_snapshots_tot:183,space_used_l:183,space_used_tot:183,span:[6,13,58],spare:[32,206],sparingli:13,spark:46,speak:[205,206,208],spec:[38,49,63,70,71],speci:[11,18],special:[12,13,43,44,58,63,72,202],specif:[9,11,12,13,22,24,28,35,39,41,44,46,57,58,63,65,66,70,71,135,140,150,195],specifc:63,specifi:[0,6,10,11,12,13,14,16,18,20,22,24,27,28,34,39,44,49,57,58,59,63,66,68,70,71,72,76,78,119,135,140,150,156,169,171,173,180,183,186,191,195,201,204],specific_dc:150,specific_host:150,specific_keyspac:140,specific_sourc:140,specific_token:140,specifii:20,specnam:70,specul:[0,11,63],speculative_retri:11,speculativefailedretri:63,speculativeinsufficientreplica:63,speculativeretri:63,speculativesamplelatencynano:63,speed:[6,50,59,72,191,207],spend:208,spent:[63,208],sphinx:40,spike:44,spin:[6,60],spindl:[4,6],spirit:[6,67],split:[28,33,44,58,63,70,71,72,78,191],spread:[6,11,67],sql:[13,15,30],squar:12,squash:[35,41],src:[34,42,140],ssd:[6,16,60,208],ssh:[52,204],ssl:[6,44,62,70,71,72,135,146,191],ssl_storage_port:67,ssp:195,sss:17,sstabl:[2,6,11,28,44,50,55,59,60,62,72,75,78,105,119,126,135,142,148,157,187,188,192,196,197,199,200,201,203,206,207,208],sstable_act:206,sstable_compression_ratio:183,sstable_count:183,sstable_s:58,sstable_size_in_mb:58,sstable_task:206,sstabledump:[72,191],sstableexpiredblock:[58,72,191],sstablelevelreset:[72,191],sstableload:[66,72,191],sstablemetadata:[72,191,194,198],sstableofflinerelevel:[72,191],sstablerepairedset:[72,191,196],sstablerepairset:198,sstablescrub:[72,191],sstablesperreadhistogram:63,sstablesplit:[72,191],sstableupgrad:[72,191],sstableutil:[72,191,192,196],sstableverifi:[72,191],sstablewrit:33,stabil:[32,41],stabl:[48,71,206],stack:[6,199,200,201,202,203,208],stackcollaps:208,staff:[24,26,70],staff_act:70,stage:[41,42,110,135,163,204,207],staging_numb:42,stai:[23,24,50,58],stakehold:[24,26],stale:66,stall:[6,68],stamp:53,stand:43,standalon:43,standard1:[193,195,196,198,199,201,206],standard:[6,22,32,36,44,48,63,70,192,196,206],start:[0,6,9,13,24,25,26,27,28,30,35,40,44,45,48,50,52,58,60,63,65,66,68,78,150,180,197,201,204,206,207,208],start_dat:29,start_token:[78,150],start_token_1:140,start_token_2:140,start_token_n:140,starter:41,startup:[4,6,21,39,44,58,63,68,202],startupcheck:206,starvat:6,stat:196,state:[6,14,52,55,58,60,63,68,135,174,205,206],state_or_provinc:29,statement:[6,9,10,11,13,14,15,16,17,20,21,22,27,38,40,53,55,58,63,66,70,71,204,208],static0:11,static1:11,staticcolumn:196,statist:[4,58,63,71,80,106,135,138,182,183,185,195,196,201,202,207],statu:[20,32,38,41,44,48,66,71,72,135,149,175,176,177,178,179,188,191,204,205],statusautocompact:135,statusbackup:135,statusbinari:135,statusgossip:135,statushandoff:135,stc:11,stdev:[70,208],stdin:71,stdout:71,stdvrng:70,step:[6,26,28,32,35,39,40,42,66,205,206],still:[0,6,10,11,13,14,17,20,22,27,28,30,32,33,42,65,66,68,71,193,204,208],stop:[4,6,48,52,71,93,135,153,181,191,192,193,194,195,196,197,198,199,200,201,202,203,206],stop_commit:6,stop_paranoid:6,stopdaemon:[52,135],storag:[0,2,11,15,16,28,41,44,50,59,60,62,69,195,196],storage_port:[45,67],storageservic:[6,33,52,66],store:[0,4,6,10,11,12,13,22,24,26,27,28,29,50,55,58,59,60,63,66,69,71,90,98,100,135,179,195,196,199],store_typ:6,stort:197,straight:[34,68,208],straightforward:[28,57],strain:28,strategi:[0,6,11,28,62,67,70,194,207],stratio:50,stream:[4,6,50,58,59,62,65,74,114,120,135,140,150,167,168,170,171,195,202,208],stream_throughput_outbound_megabits_per_sec:195,street:[22,29],strength:6,stress:[50,52,72,208],stresscql:70,strict:[10,58],strictli:[8,11,14],string:[4,6,10,11,12,13,14,16,17,20,21,22,32,63,71,119,192],strong:0,strongli:[6,11,12,28,66],structur:[4,6,9,20,23,27,35,38,55,63,72,191,208],stub:66,stuck:197,style:[6,24,25,28,38,39,40,41,43,50],stype:[9,14],sub:[11,13,22,48,58,208],subclass:6,subdirectori:[6,21],subject:[6,14,20,66],submiss:[6,41],submit:[40,41,43,50,78],subopt:70,subqueri:27,subrang:6,subscrib:[8,36],subscript:8,subsequ:[6,11,13,20,26,44,52,58,59,66],subset:[0,20,58,71,204],substanti:208,substitut:48,substract:19,subsystem:66,subtract:196,subvert:58,succed:63,succeed:203,succesfulli:63,success:[0,26,28,42,52,53,71],successfulli:[42,53,63,203],sudden:6,sudo:[44,48,52,208],suffer:208,suffici:[6,28,60,66],suggest:[12,27,35,36,41,60,203],suit:[6,32,41,43,66,195],suitabl:[13,14,28,38,41],sum:[28,57],sum_i:28,sum_j:28,sum_k:28,sum_l:28,summari:[4,6,41,63,195,196,201,202],sun:[33,66,208],sunx509:195,supercolumn:9,supersed:[10,157,199],superus:[9,20,66],suppli:[11,13,27,37,192,204],supplier:24,support:[0,4,6,9,10,11,12,13,14,15,16,18,19,20,22,24,25,27,29,30,36,39,41,43,44,46,50,51,58,66,71,72,157,180,199,206,208],suppos:13,sure:[6,8,24,30,32,33,34,35,36,39,41,43,44,45,48,58,70,208],surfac:66,surplu:44,surpris:0,surprisingli:6,surround:[17,29,71],suscept:14,suspect:[5,41,208],suspend:39,svctm:208,svg:208,svn:42,svnpubsub:42,swamp:44,swap:[4,6,208],symbol:[24,208],symmetri:17,symptom:44,sync:[4,6,27,35,44,63,65,150,208],synchron:[6,65],synctim:63,synonym:20,synopsi:[73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190],syntact:[11,20],syntax:[10,12,13,14,20,22,30,35,58,59,70],syntaxerror:204,sys:6,sysctl:[6,44],sysf:208,sysintern:6,system:[6,11,14,20,25,28,32,39,43,44,45,49,53,58,60,63,66,69,71,109,111,112,114,120,126,135,142,143,144,162,164,165,167,170,195,200,202,204,205,208],system_auth:[6,66],system_schema:[20,53],system_trac:150,system_view:206,system_virtual_schema:[53,206],tab:[33,39],tabl:[0,4,6,9,10,12,13,14,15,16,17,18,20,21,22,24,25,26,27,28,29,43,52,53,55,58,59,62,65,66,70,71,72,75,78,85,93,95,104,105,108,113,117,126,135,141,142,144,148,150,157,161,173,175,180,182,183,187,188,191,193,195,196,198,202,203,204,206,207],table1:[20,65],table2:65,table_definit:70,table_nam:[11,13,16,20,21,58,183,206],table_opt:[11,18],tablehistogram:[135,207],tablestat:135,tag:[22,38,42,173],tail:206,take:[6,10,11,13,14,22,27,28,35,38,39,41,42,44,55,58,59,60,68,135,173,198,200,203,206,207,208],taken:[6,57,58,63,70,201],talk:26,tar:[34,48],tarbal:[34,45,47,71],target:[11,20,34,39,43,53,58,195],task:[6,26,32,34,36,39,41,63,71,206,207,208],taskdef:43,taught:27,tcp:[6,44,208],tcp_keepalive_intvl:44,tcp_keepalive_prob:44,tcp_keepalive_tim:44,tcp_nodelai:6,tcp_retries2:6,tcp_wmem:6,tcpdump:208,teach:[6,67],team:[24,42,44],technetwork:6,technic:[11,15],techniqu:[27,28,205,208],technot:6,tee:48,tell:[6,13,38,44,45,63,208],templat:[32,42],tempor:6,temporari:[66,72,191],temporarili:6,tempt:[24,28],ten:28,tend:[4,6,28,44,60],tendenc:[6,26],tension:26,tent:42,terabyt:59,term:[6,13,14,15,18,22,26,27,28,52,69],termin:[12,20,71],ternari:33,test:[6,25,33,34,38,40,41,42,49,50,52,60,70,71],test_keyspac:[66,206],testabl:[38,41],testbatchandlist:43,testmethod1:43,testmethod2:43,testsom:43,teststaticcompactt:43,text:[4,9,11,12,13,14,17,22,25,28,29,35,42,53,57,59,66,69,70,208],than:[0,4,6,11,12,13,14,15,18,19,22,24,28,33,41,50,57,58,59,60,66,67,68,70,151,164,165,193,195,197,200,201,204,206,207,208],thei:[6,9,10,11,12,13,14,15,18,19,20,22,23,24,25,26,27,30,33,38,41,43,50,52,53,55,58,59,60,63,66,193,197,202,203,204,206,207,208],them:[0,6,10,11,13,14,22,23,26,27,32,33,36,41,42,43,44,49,53,55,58,63,66,135,187,195,202,204,206,208],themselv:[13,20],theoret:11,therefor:[24,28,35,41,43,66,194,202],theses:66,thi:[0,2,4,5,6,7,10,11,12,13,14,15,17,18,20,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,39,41,42,43,44,45,47,48,50,51,52,53,55,58,59,60,63,65,66,67,68,70,71,72,73,75,76,78,81,83,85,91,95,101,104,105,107,108,110,113,117,119,121,125,126,133,135,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,206,207,208],thing:[6,22,24,28,36,37,41,44,47,58,65,208],think:[6,26,27],third:[22,28,38,50,63,207],thobb:71,those:[11,12,13,14,16,17,18,20,22,23,24,27,41,44,57,58,66,71,187,195,199,200,202,204,208],though:[10,12,22,29,50,58,59,63],thought:200,thousand:71,thousandssep:71,thread:[4,6,18,52,53,60,63,66,70,75,105,135,148,150,157,166,185,187,197,206,207],threaddump:208,threadpool:[62,205],threadpoolnam:63,threadprioritypolici:39,three:[0,6,11,25,28,55,58,59,66,71,204,206,207],threshold:[4,11,57,60,67,108,135,161,168,208],thrift:[9,70],throttl:[6,32,72,107,135,158,162,166,167,170,191],through:[0,5,9,10,11,12,13,18,24,25,26,32,35,39,41,44,49,53,57,58,71,208],throughout:[26,66],throughput:[0,6,58,59,60,63,109,114,120,135,162,167,170,195,206,207],throwabl:[38,43],thrown:[22,197],thu:[6,10,11,12,13,18,22,44,63,67,68,135,187],thumb:[6,41],thusli:22,tib:[80,134,183],tick:41,ticket:[5,35,36,37,38,41,42,43,57],tid:208,tie:44,tier:62,ties:[13,207],tighter:6,tightli:6,tild:71,time:[0,4,6,8,9,10,11,12,13,15,16,17,18,24,26,27,28,33,35,38,39,41,43,44,52,53,55,57,59,62,63,65,66,69,70,71,135,137,196,198,203,204,206,207,208],timefram:68,timehorizon:6,timelin:11,timeout:[6,22,44,63,71,121,135,171,204,207],timeout_in_m:171,timeout_typ:[121,171],timer:[6,63],timestamp:[4,9,10,11,13,14,15,17,19,24,28,50,53,58,71,72,157,191,193,196,199],timeunit:58,timeuuid:[9,10,11,17,22,70],timewindowcompactionstrategi:11,timezon:[17,71],tini:[6,58],tinyint:[9,10,14,17,19,22],tip:204,titl:[24,29,41,70],tjake:33,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,tmp:[201,202,206,208],tmpf:208,tmplink:202,toc:[4,201,202],tock:41,todai:12,todat:14,todo:38,togeth:[6,11,13,14,27,28,32,58,204,207,208],toggl:66,tojson:15,token:[2,4,6,9,10,12,13,44,58,63,65,70,71,78,83,126,127,133,135,140,150,156,188,196,197,204,206,207],tokenawar:204,tokenrang:70,toler:[0,55],tom:13,tombston:[4,6,11,17,24,44,62,63,65,105,157,193,196,199,208],tombstone_compact:180,tombstone_compaction_interv:58,tombstone_threshold:58,tombstonescannedhistogram:63,ton:43,too:[6,11,12,14,22,28,38,58,70,204,207,208],took:[204,206],tool:[6,12,31,32,34,35,41,42,44,50,52,53,58,63,66,68,70,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207],toolset:208,top:[13,22,32,41,42,50,63,137,183,184,197],topcount:[137,184],topic:71,topolog:[6,67,156],toppartit:135,total:[4,6,13,28,52,57,58,63,70,195,206,207,208],total_replica:[0,11],totalblockedtask:63,totalcolumnsset:196,totalcommitlogs:63,totalcompactionscomplet:63,totaldiskspaceus:63,totalhint:63,totalhintsinprogress:63,totallat:63,totalrow:196,totalsizecap:53,totimestamp:14,touch:[8,23,28,44,58],tough:43,tounixtimestamp:14,tour:22,tpstat:[135,207],trace:[6,30,63,72,122,135,150,172,199,200,201,202,203,206,208],tracerout:208,track:[4,6,58,63],tracker:[35,41],trade:25,tradeoff:[0,6,208],tradit:[58,59],traffic:[6,66,67,208],trail:33,transact:[13,21,24,27,63,72,180,191],transfer:[6,44,66,195],transform:13,transient_replica:[0,11],transit:[10,20,34],translat:208,transpar:[6,27,44],transport:[6,39,63,70,87,97,135,177,195,207],trap:24,treat:[0,6,10,27,44,67],tree:[6,34,39,63,65],tri:[6,58,204],trigger:[4,6,9,11,12,15,32,50,53,55,59,62,66,75,135,147],trigger_nam:21,trigger_stat:12,trip:[6,13],trivial:66,troubl:[28,206],troubleshoot:[6,38,40,50,65,204,206,207,208],truesnapshotss:63,truli:9,truncat:[4,6,9,10,15,20,66,70,121,135,171,186],truncate_stat:12,truncatehint:135,trunk:[35,37,38,39,41,43],trust:66,trusti:208,trustor:6,truststor:[6,66,70,195],truststore_password:6,truststorepassword:66,tspw:195,tstamp:192,ttl:[4,6,9,10,11,14,17,22,28,62,157,196,199],tty:71,tunabl:2,tune:[11,44,55,58,60,206,207],tupl:[6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:22,tuplevalu:[10,14],turn:[0,6,24,41,44,66,204],twc:[11,58],twice:[4,6,22],two:[0,6,11,12,13,14,17,19,27,28,39,50,52,53,55,58,60,66,67,71,196,207,208],txt:[4,14,37,38,41,42,201,202],type:[0,4,6,10,11,12,13,14,15,19,20,24,25,28,29,30,38,40,48,50,53,60,62,65,66,70,71,121,135,171,180,192,195,196,200,202,206,207],type_hint:12,typeasblob:14,typecodec:14,typic:[0,6,13,23,25,27,28,30,44,55,58,60,63,66,69,71,201,204,206,207,208],typo:35,ubuntu:39,udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17,25,30],udt_liter:12,udt_nam:22,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:44,ultim:27,ultra:59,unabl:[4,38,50,52,207],unacknowledg:6,unaffect:22,unari:19,unavail:[6,11,63,66,68,208],unavailableexcept:204,unblock:63,unbound:[6,22],uncaught:206,unchecked_tombstone_compact:58,uncom:[6,63,66],uncommon:[24,41],uncompress:[4,6,59,63],unconfirm:6,undecor:4,undelet:58,under:[6,22,32,33,43,53,63,66,208],underli:[6,18,58,66,208],underlin:23,undersold:24,understand:[6,23,41,44,65,66,206,208],understood:23,unencrypt:[6,66],unexpect:[4,191,192,193,194,195,196,197,198,199,200,201,202,203],unexpectedli:22,unfinishedcommit:63,unflush:173,unfortun:43,uniform:70,uniq:206,uniqu:[11,14,22,23,24,25,26,30,70,196],unit:[22,27,38,40,58,135,159,195,200,207],unix:[53,205],unixtimestampof:[10,14],unknown:197,unless:[6,11,13,16,18,20,22,33,57,66,67,196,200,208],unlik:[6,10,13,22],unlimit:[6,44,71,195],unlock:26,unlog:[9,63,70],unnecessari:[38,68],unnecessarili:57,unpredict:13,unprepar:63,unquot:12,unquoted_identifi:12,unquoted_nam:11,unreach:65,unrel:[41,204],unrepair:[6,62,63,65,72,191],unrespons:11,unsafe_aggressive_sstable_expir:58,unsecur:66,unselected_column:18,unset:[6,10,13,17,198],unsign:22,unspecifi:6,unsubscrib:[8,50],unsuccess:53,unsupportedclassversionerror:52,untar:48,until:[0,4,6,11,18,22,55,57,58,59,66,67],unus:6,unusu:38,unwrit:6,upcom:24,updat:[6,9,10,11,12,14,15,17,18,20,22,24,27,35,38,40,41,43,48,50,53,58,59,63,66,70,71,206,207],update_paramet:13,update_stat:[12,13],updatewithlwt:70,upgrad:[4,6,11,58,135,187,201,202],upgrade_sst:180,upgradesst:[55,58,59,135],upload:41,upload_bintrai:42,upon:[6,22,53,55,57,59],upper:[12,17,58,66],ups:60,upstream:41,uptim:[127,135],urgent:[6,42],url:[35,37,70],usag:[0,4,6,11,22,50,55,57,59,62,63,71,72,191],use:[0,4,6,9,10,11,12,13,14,16,17,18,20,22,23,24,25,26,27,28,32,33,35,38,39,41,42,43,45,48,49,50,52,53,55,57,58,60,63,66,67,68,70,71,75,105,118,135,137,148,157,184,187,192,195,196,198,199,200,202,204,205,206,207,208],use_k:53,use_stat:12,usec:208,usecas:58,useconcmarksweepgc:39,usecondcardmark:39,used:[0,4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,26,28,30,34,38,39,41,42,43,44,52,53,58,59,60,63,66,67,68,70,71,73,75,76,78,83,85,91,94,95,101,104,105,108,110,113,117,119,121,125,126,133,135,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,192,193,194,195,196,197,198,200,201,202,204,207,208],useecassandra:66,useful:[0,4,6,11,14,24,28,30,34,41,58,59,63,65,68,71,73,75,76,78,83,85,91,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,194,196,206,207,208],useparnewgc:39,user1:13,user2:13,user3:13,user4:13,user:[0,5,6,8,9,10,11,12,13,15,16,17,18,24,25,26,27,32,38,40,41,42,44,48,52,53,55,58,59,60,66,71,72,78,94,135,193,201,206,208],user_count:13,user_defined_typ:22,user_funct:20,user_nam:13,user_occup:13,user_opt:20,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,63,66,71,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,195],uses:[0,4,6,11,12,13,14,16,20,21,24,25,35,43,44,52,66,70,203,207,208],usethreadprior:39,using:[4,6,10,11,12,13,14,18,20,22,23,25,27,28,29,30,32,39,40,41,43,47,48,49,50,52,53,55,59,60,62,63,66,68,71,78,140,157,173,192,194,196,197,198,199,202,204,205,206,207,208],usr:[52,71,208],usual:[6,13,22,27,37,41,43,55,66,150,199,204,206],utc:[17,71],utd:11,utf8:[22,71],utf8typ:[9,196],utf:71,util:[4,14,38,58,71,206,208],uuid:[9,10,11,12,17,22,25,29],val0:11,val1:11,val:[14,70],valid:[0,6,10,11,12,13,14,17,22,24,25,42,44,52,58,59,63,65,66,71,72,150,157,180,191,203],validationexecutor:[63,207],validationtim:63,valu:[4,6,9,10,11,12,13,14,16,17,19,22,25,28,38,39,42,44,53,55,58,59,63,66,67,69,70,71,72,94,122,126,135,158,162,164,165,166,167,169,170,171,172,191,192,203,204,206,208],valuabl:206,value1:13,value2:13,value_in_kb_per_sec:[158,166],value_in_m:169,value_in_mb:[162,167,170],valueof:14,varchar:[9,11,14,17,22],vari:[11,28,59],variabl:[6,10,12,17,22,28,32,39,42,47,52,198],varianc:[25,206],variant:12,varieti:57,varint:[9,11,14,17,19,22],variou:[6,11,23,24,25,26,32,39,43,60,66,70,191,205,206],vector:66,vendor:52,verbos:[195,199,202,203],veri:[6,11,13,27,35,41,43,44,55,58,59,60,198,203,204,206,207,208],verif:[72,191],verifi:[41,44,46,48,59,65,126,135,180,191,192,193,194,195,196,197,198,199,200,201,202,203],versa:202,version:[2,5,6,9,11,14,15,22,34,39,41,46,48,52,58,63,68,72,77,82,92,102,135,187,188,191,199,202,206],versu:27,vertic:[52,71],via:[4,6,8,10,18,20,24,27,34,38,39,44,45,53,58,59,60,63,65,66,67,196,198,208],vice:202,view:[0,6,10,11,12,15,20,24,25,26,27,30,50,63,71,112,135,165,190,198,206,207,208],view_build:180,view_nam:18,viewbuildexecutor:[63,207],viewbuildstatu:135,viewlockacquiretim:63,viewmutationstag:[63,207],viewpendingmut:63,viewreadtim:63,viewreplicasattempt:63,viewreplicassuccess:63,viewwrit:63,viewwritelat:63,violat:27,virtual:[0,6,44,58,63,68],virtualenv:32,visibl:[11,20,33,55],visit:[23,70],visual:[24,25,35,206],vnode:[6,59],volum:[4,6,57,59,203,207,208],vote:40,vulner:[6,42,66],w_await:208,wai:[4,6,12,15,17,18,22,24,27,28,32,36,37,39,43,44,53,58,59,150,196,197,198,199,206,208],wait:[0,4,6,11,41,44,52,53,63,135,152,206,207,208],waitingoncommit:63,waitingonfreememtablespac:63,waitingonsegmentalloc:63,walk:25,want:[4,6,11,13,23,26,27,28,30,32,39,41,42,43,44,53,65,66,68,70,194,195,198,206,208],warmup:[70,135,160],warn:[6,11,33,43,62,150,203,206],warrant:207,washington:22,wasn:10,wast:6,watch:[43,208],weaker:0,web:35,websit:[24,43,48,208],wed:52,week:[22,65,198],weibul:70,weight:[53,63,98],welcom:8,well:[0,6,11,13,14,17,22,24,26,27,28,30,38,39,53,57,59,60,66,67,135,153,201,206,208],went:63,were:[6,9,10,20,24,30,38,39,42,58,63,199,202,206,207],west:42,what:[11,13,22,23,24,35,36,40,43,45,50,58,60,66,70,71,196,204,205,206,207,208],whatev:[10,13,44],whedon:13,wheel:201,when:[4,6,9,10,11,12,13,14,15,16,17,20,22,24,26,27,28,32,33,35,38,41,42,43,45,50,52,53,55,57,59,60,62,63,65,66,67,68,70,71,73,75,76,78,81,83,85,91,95,101,104,105,108,110,113,117,119,121,125,126,133,137,140,141,142,148,149,150,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,180,182,183,184,186,187,188,190,196,197,202,204,206,207,208],whenev:[197,208],where:[0,4,6,9,10,11,12,14,16,17,18,19,20,22,24,26,28,38,43,45,48,53,55,58,59,66,68,70,71,98,150,204,206,208],where_claus:13,wherea:[22,66,207],whether:[0,6,9,11,13,27,28,39,58,67,71,98],which:[0,4,5,6,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,35,41,42,43,44,45,48,49,52,53,55,57,58,59,60,63,65,66,67,68,70,78,113,117,126,140,150,202,204,205,206,207,208],whichev:[0,6],whilst:6,whitelist:66,whitespac:40,who:[20,32,41,44],whole:[6,13,14,22,58,65],whose:[11,22,27,180],why:[24,38,41,50,193,204,206,208],wide:[4,23,24,28,57,207],width:12,wiki:[6,35,39],wildcard:[13,20,200],wildli:11,window:[0,4,6,62,63,66,116,124,135,169,205],winner:44,wip:41,wipe:[44,68],wire:[6,44],wirefram:26,wireshark:208,wise:11,wish:[6,24,42,58,63,206],within:[0,4,6,11,12,13,16,24,39,41,42,44,58,60,63,66],withing:6,without:[0,6,11,12,13,14,20,22,37,39,41,42,43,44,52,57,60,63,66,71,72,73,126,135,142,191,192],wmb:208,wmem_max:6,wnen:30,won:[4,6,13,35,37,65,208],wont:[53,58],word:[10,11,12,18,20,22,44,57,66],work:[0,4,6,10,11,14,15,17,23,24,25,27,30,32,33,36,37,39,40,42,43,44,50,58,60,63,65,66,67,68,71,195,208],workaround:[195,199],worker:71,workflow:[24,26],workload:[6,36,38,55,58,60,70,207,208],workspac:39,worktre:39,world:[23,27],worri:[26,41,44],wors:[6,67],worst:[6,28,41],worth:[6,27,53],worthwhil:[6,24],would:[6,12,13,14,17,20,23,24,25,26,27,28,35,39,41,43,50,58,59,60,65,66,67,196,198,202,206,208],wrap:[24,67],write:[0,4,6,10,11,13,22,24,27,33,35,36,38,43,44,57,58,59,60,63,65,66,67,68,70,71,93,121,135,171,183,196,199,202,204,206,207,208],write_lat:183,write_request_timeout:44,writefailedideacl:63,writelat:[63,204],writer:[4,6,33],writetim:[9,14],writetimeoutexcept:[6,204],written:[4,6,11,21,26,27,32,44,53,55,58,59,63,65],wrong:[6,27,42,207],wrqm:208,wrst:208,wrte:63,www:[6,42,48,208],x86:69,xandra:46,xarg:[198,206],xdm:208,xlarg:60,xlarge_daili:53,xml:[34,39,42,43,45,52,206],xmn220m:39,xms1024m:39,xmx1024m:39,xmx:60,xss256k:39,xzvf:48,yaml:[0,4,6,14,18,20,45,48,63,66,67,68,70,79,94,98,135,153,183,185,195,196,204],year:[13,22,27,28,52],yes:[9,11,66],yet:[6,11,32,36,42,57,63,202],ygc:208,ygct:208,yield:[13,27,28,53,68,208],ymmv:206,you:[0,4,5,6,8,10,11,12,13,14,16,17,18,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,39,40,42,43,44,45,46,47,48,49,50,53,57,58,63,65,66,67,68,69,70,71,73,135,173,192,194,195,196,198,199,200,202,203,204,205,206,207,208],young:208,younger:14,your:[0,5,6,8,10,11,12,24,25,26,27,28,29,30,33,35,36,39,40,41,43,44,45,48,50,52,58,60,65,66,67,70,71,195,200,203,205,206,207,208],yourself:[36,37,43],yum:[42,52],yyyi:[17,22,53],z_0:[11,16,18],zero:[6,10,11,44,63,67,206],zgrep:206,zip:[22,53],zipcod:22,zone:[6,22,67],zoomabl:208,zstd:4,zstdcompressor:59},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Arithmetic Operators","Security","Triggers","Data Types","Conceptual Data Modeling","Logical Data Modeling","Physical Data Modeling","Defining Application Queries","RDBMS Design","Evaluating and Refining Data Models","Defining Database Schema","Cassandra Data Modeling Tools","Data Modeling","Jenkins CI Environment","Code Style","Dependency Management","Working on Documentation","Getting Started","How-to Commit","Review Checklist","Building and IDE Integration","Contributing to Cassandra","Contributing Code Changes","Release Process","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","New Features in Apache Cassandra 4.0","Support for Java 11","Audit Logging","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","Third-Party Plugins","Cassandra Stress","cqlsh: the CQL shell","Cassandra Tools","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","SSTable Tools","sstabledump","sstableexpiredblockers","sstablelevelreset","sstableloader","sstablemetadata","sstableofflinerelevel","sstablerepairedset","sstablescrub","sstablesplit","sstableupgrade","sstableutil","sstableverify","Find The Misbehaving Nodes","Troubleshooting","Cassandra Logs","Use Nodetool","Diving Deep, Use External Tools"],titleterms:{"break":28,"class":67,"final":202,"function":[13,14,17],"import":[33,126],"long":43,"new":[44,51],"switch":58,"transient":0,Adding:68,Doing:197,IDE:39,IDEs:33,LCS:58,TLS:66,The:[13,15,17,58,204],USE:11,Use:[59,195,207,208],Uses:59,Using:[39,52,198],Will:44,With:66,about:32,abov:196,access:66,adcanc:53,add:[34,44],address:44,advanc:[59,208],after:68,aggreg:14,alias:13,all:[20,44,196,202],alloc:68,allocate_tokens_for_keyspac:6,allocate_tokens_for_local_replication_factor:6,allow:13,alreadi:194,alter:[11,18,20,22],ani:44,announc:42,answer:36,anti:24,apach:[32,39,50,51],appendic:9,appendix:9,applic:26,architectur:2,arithmet:19,artifact:42,ask:44,assassin:73,assign:68,attempt:200,audit:53,audit_logging_opt:6,auditlog:53,auth:66,authent:[6,20,66],author:[6,66],auto_snapshot:6,automat:20,automatic_sstable_upgrad:6,avg:14,back_pressure_en:6,back_pressure_strategi:6,backup:54,base:35,basic:[199,203,208],batch:[13,44,63],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,bcc:208,befor:41,benefit:59,best:65,between:27,binari:48,binauditlogg:53,bintrai:42,blob:[14,44],block:193,bloom:55,boilerpl:33,bootstrap:[44,58,68,74],branch:41,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:63,bug:[5,36,41],build:[39,52],bulk:[44,56],cach:[63,66,208],calcul:28,call:[42,44],can:44,capi:69,captur:[53,57,71,208],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,27,30,32,35,39,40,42,43,44,45,48,50,51,53,57,62,66,69,70,72,201,206],cast:14,cdc:57,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,certif:66,chang:[10,41,44,45,55,57,58],characterist:22,check:199,checklist:38,choic:60,choos:41,circleci:43,claus:13,clean:202,cleanup:[68,75],clear:71,clearsnapshot:76,client:[46,49,63,66,204],client_encryption_opt:6,clientstat:77,clojur:46,cloud:60,cluster:[44,195,207],cluster_nam:6,code:[4,33,41],collect:[22,58,208],column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[39,53,58,71,198],comment:12,commit:37,commit_failure_polici:6,commitlog:[4,63],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_group_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:57,committ:35,common:[11,52,58,60,206],compact:[9,58,63,78,207],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:79,compactionstat:80,compactionstrategi:58,compat:71,compress:59,conceptu:23,concern:58,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_build:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_valid:6,concurrent_writ:6,condition:20,config:195,configur:[6,7,45,53,57,59],conflict:34,connect:[20,44],consist:[0,71],constant:12,contact:8,content:[42,53],contribut:[36,40,41],control:20,convent:[12,33],convers:14,coordin:207,copi:71,corrupt:[199,203],corrupted_tombstone_strategi:6,count:14,counter:[13,22,199],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:[60,208],cql:[9,15,63,71],cqlsh:[49,71],cqlshrc:71,creat:[11,14,16,18,20,21,22,36,41,42],credenti:20,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:43,current:[14,201],custom:22,cython:71,dart:46,data:[11,13,17,20,22,23,24,25,28,30,31,44,57,58,68],data_file_directori:6,databas:[20,29],datacent:20,date:[14,22,199],datetim:[14,19],dead:68,deal:199,debian:48,debug:[39,206],decis:27,decommiss:81,deep:208,defin:[14,22,26,29],definit:[11,12],defragment:58,delet:[13,42,44,58],denorm:27,depend:[34,71],describ:[71,83],describeclust:82,design:27,detail:[58,195],detect:0,develop:42,diagnostic_events_en:6,dies:44,differ:27,directori:[45,58],disabl:[53,57],disableauditlog:84,disableautocompact:85,disablebackup:86,disablebinari:87,disablefullquerylog:88,disablegossip:89,disablehandoff:90,disablehintsfordc:91,disableoldprotocolvers:92,disk:[28,44,60],disk_failure_polici:6,disk_optimization_strategi:6,displai:192,distribut:42,dive:208,document:[35,36,50],doe:[44,53],drain:93,driver:[46,49],drop:[9,11,14,16,18,20,21,22,44],droppedmessag:63,dry:197,dtest:[36,43],dump:192,durat:22,dynam:67,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:[44,196],eclips:39,elixir:46,email:44,enabl:[53,57,66],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_transient_repl:6,enable_user_defined_funct:6,enableauditlog:94,enableautocompact:95,enablebackup:96,enablebinari:97,enablefullquerylog:98,enablegossip:99,enablehandoff:100,enablehintsfordc:101,enableoldprotocolvers:102,encod:17,encrypt:66,endpoint_snitch:6,engin:4,entir:192,entri:44,environ:[32,45],erlang:46,error:[44,204],evalu:28,even:44,exampl:4,except:33,exclud:192,exist:44,exit:71,expand:71,experiment:6,expir:58,explan:196,extend:203,extern:208,factor:44,fail:[44,68],failur:[0,44],failuredetector:103,faq:70,featur:[6,51],file:[6,33,34,48,53,195,200,203,206],file_cache_size_in_mb:6,fileauditlogg:53,filedescriptorratio:63,filter:[13,53,55],find:204,first:27,fix:[36,41],flamegraph:208,flow:35,flush:104,format:[33,192],found:[194,197],freez:41,frequent:44,from:[39,42,44,48,71,195],fromjson:17,full:[65,206],full_query_logging_opt:6,fulli:58,further:57,garbag:[58,208],garbagecollect:105,garbagecollector:63,gc_grace_second:58,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:106,gener:33,get:[36,47,195,206],getbatchlogreplaythrottl:107,getcompactionthreshold:108,getcompactionthroughput:109,getconcurr:110,getconcurrentcompactor:111,getconcurrentviewbuild:112,getendpoint:113,getinterdcstreamthroughput:114,getlogginglevel:115,getmaxhintwindow:116,getreplica:117,getse:118,getsstabl:119,getstreamthroughput:120,gettimeout:121,gettraceprob:122,github:35,give:44,gossip:0,gossipinfo:123,gpg:42,grace:[58,196],grant:20,graph:70,group:13,guarante:1,handl:33,handoffwindow:124,hang:68,happen:44,hardwar:60,has:194,haskel:46,heap:44,help:[71,125],hide:195,high:208,hint:61,hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:63,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,hintsservic:63,host:[44,71],hot:66,hotel:[24,25],how:[35,37,44,53],htop:208,idea:39,ideal_consistency_level:6,identifi:12,impact:59,includ:202,increment:65,incremental_backup:6,index:[16,63,69],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:127,inform:[206,208],initi:36,initial_token:6,insert:[13,17,49],instal:48,integr:[27,39,66],intellij:39,inter:66,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,intern:[20,66,192],internode_application_receive_queue_capacity_in_byt:6,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:6,internode_application_receive_queue_reserve_global_capacity_in_byt:6,internode_application_send_queue_capacity_in_byt:6,internode_application_send_queue_reserve_endpoint_capacity_in_byt:6,internode_application_send_queue_reserve_global_capacity_in_byt:6,internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,invalidatecountercach:128,invalidatekeycach:129,invalidaterowcach:130,investig:[36,204],iostat:208,issu:52,java:[44,46,52],jconsol:44,jenkin:32,jira:[35,42],jmx:[44,58,63,66],job:32,join:[27,44,131],json:17,jstack:208,jstat:208,jvm:[63,208],keep:201,kei:[16,18,42,192],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,44,63,197],keyword:[9,12],lang:44,languag:15,larg:[28,44],latenc:[204,207,208],level:[0,58,194,208],librari:34,lightweight:70,limit:[13,18,53],line:[39,71],list:[8,20,22,36,44,202],listen:44,listen_address:[6,44],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:132,liter:22,live:44,load:[44,56,195],local:[35,207],locat:45,log:[44,45,53,58,202,204,206],logger:206,logic:24,login:71,lot:[44,198],lucen:69,made:44,mail:8,main:45,major:58,manag:[34,192],mani:198,manifest:199,manipul:13,manual:68,map:[16,22,44],materi:18,matrix:52,max:[14,44],max_concurrent_automatic_sstable_upgrad:6,max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:44,memori:[44,60,63],memorypool:63,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:58,messag:44,metadata:[196,198],method:44,metric:[63,204],min:14,minor:58,mintimeuuid:14,misbehav:204,mode:70,model:[23,24,25,28,30,31],monitor:[63,68],more:[44,58,192,195,206],move:[68,133],movement:68,multilin:33,multipl:200,nativ:[14,22],native_transport_allow_older_protocol:6,native_transport_flush_in_batches_legaci:6,native_transport_frame_block_size_in_kb:6,native_transport_idle_timeout_in_m:6,native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:46,netbean:39,netstat:134,network:208,network_author:6,networktopologystrategi:[0,11],newer:39,next:[42,204],nexu:42,node:[44,66,68,204],nodej:46,nodetool:[44,53,58,135,207],note:35,noteworthi:22,now:14,num_token:6,number:19,old:[42,201],one:[44,198],onli:[44,192,202],open:39,oper:[19,42,44,58,59,62],optim:27,option:[18,53,58,65,71],order:13,otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[44,65],outofmemoryerror:44,output:[53,192,193,195],overflow:199,overview:[3,57],own:32,packag:[42,48],packet:208,page:[71,208],paramet:[13,57,58],parti:69,partit:28,partition:6,password:66,patch:[36,41],pattern:24,pausehandoff:136,perform:[42,43],periodic_commitlog_sync_lag_block_in_m:6,perl:46,permiss:20,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:46,physic:25,pick:0,plugin:[32,69],point:44,pom:34,port:44,post:42,practic:65,prepar:12,prepared_statements_cache_size_mb:6,prerequisit:[42,48],primari:18,print:[196,198],process:42,profil:70,profileload:137,progress:[68,195],project:39,promot:42,properti:45,proxyhistogram:138,publish:[35,42],python:46,pytz:71,queri:[15,26,27,49,204,206,207],question:[36,44],rang:[0,68],range_request_timeout_in_m:6,rangekeysampl:139,rate:204,raw:192,rdbm:27,read:[57,64],read_request_timeout_in_m:6,rebuild:140,rebuild_index:141,reduc:194,referenti:27,refin:28,refresh:142,refreshsizeestim:143,refus:44,releas:42,relevel:197,reliabl:208,reload:[53,66],reloadlocalschema:144,reloadse:145,reloadssl:146,reloadtrigg:147,relocatesst:148,remot:44,remov:[58,68],removenod:149,repair:[58,64,65,150,198],repair_admin:151,repair_session_space_in_mb:6,repaired_data_tracking_for_partition_reads_en:6,repaired_data_tracking_for_range_reads_en:6,replac:68,replaybatchlog:152,replic:[0,44],report:[5,36,44,63],report_unconfirmed_repaired_data_mismatch:6,repositori:42,request:63,request_timeout_in_m:6,requir:[32,34],reserv:[9,24,25],resetfullquerylog:153,resetlocalschema:154,resolut:34,resourc:208,restrict:20,result:13,resum:68,resumehandoff:155,retriev:14,review:[36,38],revok:20,rewrit:201,rhel:44,right:41,ring:[0,44,156],role:[20,66],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,row:192,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rowcach:69,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rubi:46,run:[43,197],runtim:45,rust:46,safeti:6,sai:44,same:44,sampl:53,saved_caches_directori:6,scala:46,scalar:14,schema:29,script:198,scrub:[157,199],second:196,secondari:16,secur:[20,66],see:44,seed:[32,44],seed_provid:6,select:[13,17,18],selector:13,send:42,serial:71,server:32,server_encryption_opt:6,session:71,set:[20,22,32,39,44,198],setbatchlogreplaythrottl:158,setcachecapac:159,setcachekeystosav:160,setcompactionthreshold:161,setcompactionthroughput:162,setconcurr:163,setconcurrentcompactor:164,setconcurrentviewbuild:165,sethintedhandoffthrottlekb:166,setinterdcstreamthroughput:167,setlogginglevel:168,setmaxhintwindow:169,setstreamthroughput:170,settimeout:171,settraceprob:172,setup:[32,39],share:71,shell:71,show:[44,71,198],sign:42,signatur:14,simplestrategi:[0,11],singl:[44,58,192],size:[28,58,200],skip:199,slack:[8,42],slow_query_log_timeout_in_m:6,small:200,snapshot:[173,195,200,201],snapshot_before_compact:6,snitch:67,sort:27,sourc:[39,71],special:71,specif:20,specifi:[196,200],speed:[44,195],sphinx:35,split:200,ssl:[66,195],ssl_storage_port:6,sstabl:[4,58,63,191,193,194,195,198,202],sstable_preemptive_open_interval_in_mb:6,sstabledump:192,sstableexpiredblock:193,sstablelevelreset:194,sstableload:195,sstablemetadata:196,sstableofflinerelevel:197,sstablerepairedset:198,sstablescrub:199,sstablesplit:200,sstableupgrad:201,sstableutil:202,sstableverifi:203,stai:44,standard:66,start:[36,39,41,47],start_native_transport:6,starv:58,state:[207,208],statement:[12,18,33],statu:[174,198,207],statusautocompact:175,statusbackup:176,statusbinari:177,statusgossip:178,statushandoff:179,stc:58,step:[34,204],stop:180,stopdaemon:181,storag:[4,9,27,63],storage_port:6,store:44,strategi:58,stratio:69,stream:[44,63,68],stream_entire_sst:6,stream_throughput_outbound_megabits_per_sec:6,streaming_connections_per_host:6,streaming_keep_alive_period_in_sec:6,stress:[43,70],structur:192,style:33,submit:36,sum:14,support:[17,52,70],sync:42,system:206,tabl:[11,57,63,192,194,197,199,201],tablehistogram:182,tablestat:183,tarbal:48,temporari:202,term:12,test:[32,36,39,43],than:44,thei:44,third:69,though:44,thread:208,threadpool:[63,207],threshold:6,throttl:195,throughput:208,tier:58,time:[14,22,58],timestamp:[22,44,192],timeuuid:14,timewindowcompactionstrategi:58,todo:[0,1,3,11,54,56,61,64],tojson:17,token:[0,14,68],tombston:58,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[30,43,72,191,208],top:[44,208],topic:42,toppartit:184,tpstat:185,trace:71,tracetype_query_ttl:6,tracetype_repair_ttl:6,transact:[70,202],transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[21,58],troubleshoot:[34,205],truncat:11,truncate_request_timeout_in_m:6,truncatehint:186,ttl:[13,58],tunabl:0,tupl:22,tweet:42,two:44,type:[9,17,22,34,58,63],udt:22,unabl:44,unit:[36,39,43],unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:[58,198],unsubscrib:44,updat:[13,34,36,42,44],upgradesst:187,upload:42,usag:[44,65,70,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,192,193,194,195,196,197,198,199,200,201,202,203,208],use:44,user:[14,20,22,36,70],using:[35,44,58],uuid:14,valid:199,valu:196,variabl:45,verif:203,verifi:188,version:[4,10,42,71,189,201],view:[18,53],viewbuildstatu:190,vmtouch:208,vote:42,wait:42,warn:57,websit:42,welcom:50,what:[41,44,53],when:[44,58],where:13,whitespac:33,why:[44,58],window:58,windows_timer_interv:6,without:[58,199,200],work:[22,35,41],write_request_timeout_in_m:6,writetim:13,yaml:[53,57],you:41,your:[32,42]}}) \ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/cassandra_stress.html b/src/doc/4.0-alpha3/tools/cassandra_stress.html deleted file mode 100644 index 2ef28a803..000000000 --- a/src/doc/4.0-alpha3/tools/cassandra_stress.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Cassandra Stress" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Stress

-

cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model.

-

This documentation focuses on user mode as this allows the testing of your -actual schema.

-
-

Usage

-

There are several operation types:

-
-
    -
  • write-only, read-only, and mixed workloads of standard data
  • -
  • write-only and read-only workloads for counter columns
  • -
  • user configured workloads, running custom queries on custom schemas
  • -
-
-

The syntax is cassandra-stress <command> [options]. If you want more information on a given command -or options, just run cassandra-stress help <command|option>.

-
-
Commands:
-
-
read:
-
Multiple concurrent reads - the cluster must first be populated by a write test
-
write:
-
Multiple concurrent writes against the cluster
-
mixed:
-
Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test
-
counter_write:
-
Multiple concurrent updates of counters.
-
counter_read:
-
Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.
-
user:
-
Interleaving of user provided queries, with configurable ratio and distribution.
-
help:
-
Print help for a command or option
-
print:
-
Inspect the output of a distribution definition
-
legacy:
-
Legacy support mode
-
-
-
Primary Options:
-
-
-pop:
-
Population distribution and intra-partition visit order
-
-insert:
-
Insert specific options relating to various methods for batching and splitting partition updates
-
-col:
-
Column details such as size and count distribution, data generator, names, comparator and if super columns should be used
-
-rate:
-
Thread count, rate limit or automatic mode (default is auto)
-
-mode:
-
Thrift or CQL with options
-
-errors:
-
How to handle errors when encountered during stress
-
-sample:
-
Specify the number of samples to collect for measuring latency
-
-schema:
-
Replication settings, compression, compaction, etc.
-
-node:
-
Nodes to connect to
-
-log:
-
Where to log progress to, and the interval at which to do it
-
-transport:
-
Custom transport factories
-
-port:
-
The port to connect to cassandra nodes on
-
-sendto:
-
Specify a stress server to send this command to
-
-graph:
-
Graph recorded metrics
-
-tokenrange:
-
Token range settings
-
-
-
Suboptions:
-
Every command and primary option has its own collection of suboptions. These are too numerous to list here. -For information on the suboptions for each command or option, please use the help command, -cassandra-stress help <command|option>.
-
-
-
-

User mode

-

User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn’t scale.

-
-

Profile

-

User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname.

-

An identifier for the profile:

-
specname: staff_activities
-
-
-

The keyspace for the test:

-
keyspace: staff
-
-
-

CQL for the keyspace. Optional if the keyspace already exists:

-
keyspace_definition: |
- CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-
-
-

The table to be stressed:

-
table: staff_activities
-
-
-

CQL for the table. Optional if the table already exists:

-
table_definition: |
-  CREATE TABLE staff_activities (
-      name text,
-      when timeuuid,
-      what text,
-      PRIMARY KEY(name, when, what)
-  )
-
-
-

Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:

-
columnspec:
-  - name: name
-    size: uniform(5..10) # The names of the staff members are between 5-10 characters
-    population: uniform(1..10) # 10 possible staff members to pick from
-  - name: when
-    cluster: uniform(20..500) # Staff members do between 20 and 500 events
-  - name: what
-    size: normal(10..100,50)
-
-
-

Supported types are:

-

An exponential distribution over the range [min..max]:

-
EXP(min..max)
-
-
-

An extreme value (Weibull) distribution over the range [min..max]:

-
EXTREME(min..max,shape)
-
-
-

A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:

-
GAUSSIAN(min..max,stdvrng)
-
-
-

A gaussian/normal distribution, with explicitly defined mean and stdev:

-
GAUSSIAN(min..max,mean,stdev)
-
-
-

A uniform distribution over the range [min, max]:

-
UNIFORM(min..max)
-
-
-

A fixed distribution, always returning the same value:

-
FIXED(val)
-
-
-

If preceded by ~, the distribution is inverted

-

Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)

-

Insert distributions:

-
insert:
-  # How many partition to insert per batch
-  partitions: fixed(1)
-  # How many rows to update per partition
-  select: fixed(1)/500
-  # UNLOGGED or LOGGED batch for insert
-  batchtype: UNLOGGED
-
-
-

Currently all inserts are done inside batches.

-

Read statements to use during the test:

-
queries:
-   events:
-      cql: select *  from staff_activities where name = ?
-      fields: samerow
-   latest_event:
-      cql: select * from staff_activities where name = ?  LIMIT 1
-      fields: samerow
-
-
-

Running a user mode test:

-
cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once
-
-
-

This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test.

-

The full example can be found here yaml

-
-
Running a user mode test with multiple yaml files::
-
cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m “ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)” truncate=once
-
This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table
-
although care must be taken that the table definition is identical (data generation specs can be different).
-
-
-
-

Lightweight transaction support

-

cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s).

-

Lightweight transaction update query:

-
queries:
-  regularupdate:
-      cql: update blogposts set author = ? where domain = ? and published_date = ?
-      fields: samerow
-  updatewithlwt:
-      cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ?
-      fields: samerow
-
-
-

The full example can be found here yaml

-
-
-
-

Graphing

-

Graphs can be generated for each run of stress.

-../_images/example-stress-graph.png -

To create a new graph:

-
cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph"
-
-
-

To add a new run to an existing graph point to an existing file and add a revision name:

-
cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run"
-
-
-
-
-

FAQ

-

How do you use NetworkTopologyStrategy for the keyspace?

-

Use the schema option making sure to either escape the parenthesis or enclose in quotes:

-
cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)"
-
-
-

How do you use SSL?

-

Use the transport option:

-
cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra"
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/cqlsh.html b/src/doc/4.0-alpha3/tools/cqlsh.html deleted file mode 100644 index 8d4e802a5..000000000 --- a/src/doc/4.0-alpha3/tools/cqlsh.html +++ /dev/null @@ -1,486 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/index.html b/src/doc/4.0-alpha3/tools/index.html deleted file mode 100644 index 257ecd5a4..000000000 --- a/src/doc/4.0-alpha3/tools/index.html +++ /dev/null @@ -1,258 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/assassinate.html b/src/doc/4.0-alpha3/tools/nodetool/assassinate.html deleted file mode 100644 index 864da2149..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/assassinate.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/bootstrap.html b/src/doc/4.0-alpha3/tools/nodetool/bootstrap.html deleted file mode 100644 index a278c8ce3..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] [(-h <host> | --host <host>)]
-                [(-pw <password> | --password <password>)] [(-pp | --print-port)]
-                [(-p <port> | --port <port>)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/cleanup.html b/src/doc/4.0-alpha3/tools/nodetool/cleanup.html deleted file mode 100644 index 5cde18733..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/cleanup.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/clearsnapshot.html b/src/doc/4.0-alpha3/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 68c581e87..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/clientstats.html b/src/doc/4.0-alpha3/tools/nodetool/clientstats.html deleted file mode 100644 index 324565872..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/clientstats.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/compact.html b/src/doc/4.0-alpha3/tools/nodetool/compact.html deleted file mode 100644 index 95e26dde7..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/compact.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/compactionhistory.html b/src/doc/4.0-alpha3/tools/nodetool/compactionhistory.html deleted file mode 100644 index 4b6288a90..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/compactionstats.html b/src/doc/4.0-alpha3/tools/nodetool/compactionstats.html deleted file mode 100644 index 09bb80318..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/decommission.html b/src/doc/4.0-alpha3/tools/nodetool/decommission.html deleted file mode 100644 index b0ac58cec..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/decommission.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/describecluster.html b/src/doc/4.0-alpha3/tools/nodetool/describecluster.html deleted file mode 100644 index 893c6258a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/describecluster.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/describering.html b/src/doc/4.0-alpha3/tools/nodetool/describering.html deleted file mode 100644 index d76b40980..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/describering.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disableauditlog.html b/src/doc/4.0-alpha3/tools/nodetool/disableauditlog.html deleted file mode 100644 index ccb696ef5..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disableautocompaction.html b/src/doc/4.0-alpha3/tools/nodetool/disableautocompaction.html deleted file mode 100644 index ca23ca0fe..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablebackup.html b/src/doc/4.0-alpha3/tools/nodetool/disablebackup.html deleted file mode 100644 index da953b896..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablebinary.html b/src/doc/4.0-alpha3/tools/nodetool/disablebinary.html deleted file mode 100644 index 024afd6c4..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablefullquerylog.html b/src/doc/4.0-alpha3/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index c1fc12d8c..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablegossip.html b/src/doc/4.0-alpha3/tools/nodetool/disablegossip.html deleted file mode 100644 index dbbc00f63..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablehandoff.html b/src/doc/4.0-alpha3/tools/nodetool/disablehandoff.html deleted file mode 100644 index 3cbf1ece6..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disablehintsfordc.html b/src/doc/4.0-alpha3/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index 35ed978ad..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/disableoldprotocolversions.html b/src/doc/4.0-alpha3/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index a7d041e74..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/drain.html b/src/doc/4.0-alpha3/tools/nodetool/drain.html deleted file mode 100644 index e68670868..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/drain.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enableauditlog.html b/src/doc/4.0-alpha3/tools/nodetool/enableauditlog.html deleted file mode 100644 index a04104620..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enableautocompaction.html b/src/doc/4.0-alpha3/tools/nodetool/enableautocompaction.html deleted file mode 100644 index 9a22d21be..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablebackup.html b/src/doc/4.0-alpha3/tools/nodetool/enablebackup.html deleted file mode 100644 index d0f4644e0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablebinary.html b/src/doc/4.0-alpha3/tools/nodetool/enablebinary.html deleted file mode 100644 index 5559bdd7b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablefullquerylog.html b/src/doc/4.0-alpha3/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index e92fa6b94..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablegossip.html b/src/doc/4.0-alpha3/tools/nodetool/enablegossip.html deleted file mode 100644 index 988960310..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablehandoff.html b/src/doc/4.0-alpha3/tools/nodetool/enablehandoff.html deleted file mode 100644 index 08a389a8f..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enablehintsfordc.html b/src/doc/4.0-alpha3/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 4fd9545ec..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/enableoldprotocolversions.html b/src/doc/4.0-alpha3/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 466ab3c6b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/failuredetector.html b/src/doc/4.0-alpha3/tools/nodetool/failuredetector.html deleted file mode 100644 index be8cc46ff..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/flush.html b/src/doc/4.0-alpha3/tools/nodetool/flush.html deleted file mode 100644 index f2b3392f8..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/flush.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/garbagecollect.html b/src/doc/4.0-alpha3/tools/nodetool/garbagecollect.html deleted file mode 100644 index faa5bafa9..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/gcstats.html b/src/doc/4.0-alpha3/tools/nodetool/gcstats.html deleted file mode 100644 index bfc8921fe..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/gcstats.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/4.0-alpha3/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index 6b25c6ca3..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getcompactionthreshold.html b/src/doc/4.0-alpha3/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index f43131aec..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getcompactionthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index 4218398fe..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getconcurrency.html b/src/doc/4.0-alpha3/tools/nodetool/getconcurrency.html deleted file mode 100644 index a68555ce6..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getconcurrentcompactors.html b/src/doc/4.0-alpha3/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index eaae59dd0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/4.0-alpha3/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 5be14aa67..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getendpoints.html b/src/doc/4.0-alpha3/tools/nodetool/getendpoints.html deleted file mode 100644 index b5ec2236e..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index f20c9f3e0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getlogginglevels.html b/src/doc/4.0-alpha3/tools/nodetool/getlogginglevels.html deleted file mode 100644 index 8429b7863..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getmaxhintwindow.html b/src/doc/4.0-alpha3/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 525afe899..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getreplicas.html b/src/doc/4.0-alpha3/tools/nodetool/getreplicas.html deleted file mode 100644 index 7e7b15fbc..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getseeds.html b/src/doc/4.0-alpha3/tools/nodetool/getseeds.html deleted file mode 100644 index 4b48e1085..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getsstables.html b/src/doc/4.0-alpha3/tools/nodetool/getsstables.html deleted file mode 100644 index 752a1477d..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getsstables.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/getstreamthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 662ed3813..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/gettimeout.html b/src/doc/4.0-alpha3/tools/nodetool/gettimeout.html deleted file mode 100644 index a385be54b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/gettraceprobability.html b/src/doc/4.0-alpha3/tools/nodetool/gettraceprobability.html deleted file mode 100644 index a93e263b4..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/gossipinfo.html b/src/doc/4.0-alpha3/tools/nodetool/gossipinfo.html deleted file mode 100644 index 7848c30ea..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/handoffwindow.html b/src/doc/4.0-alpha3/tools/nodetool/handoffwindow.html deleted file mode 100644 index d059d72a2..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/help.html b/src/doc/4.0-alpha3/tools/nodetool/help.html deleted file mode 100644 index 1b7c1e000..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/help.html +++ /dev/null @@ -1,112 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/import.html b/src/doc/4.0-alpha3/tools/nodetool/import.html deleted file mode 100644 index 0c5e22b9d..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/import.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/info.html b/src/doc/4.0-alpha3/tools/nodetool/info.html deleted file mode 100644 index 52d0d5741..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/info.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/invalidatecountercache.html b/src/doc/4.0-alpha3/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 04efd95e3..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/invalidatekeycache.html b/src/doc/4.0-alpha3/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 732c96364..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/invalidaterowcache.html b/src/doc/4.0-alpha3/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index e405d25e6..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/join.html b/src/doc/4.0-alpha3/tools/nodetool/join.html deleted file mode 100644 index 1e9e95287..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/join.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/listsnapshots.html b/src/doc/4.0-alpha3/tools/nodetool/listsnapshots.html deleted file mode 100644 index 02137b7e9..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/move.html b/src/doc/4.0-alpha3/tools/nodetool/move.html deleted file mode 100644 index cd64a81b6..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/move.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/netstats.html b/src/doc/4.0-alpha3/tools/nodetool/netstats.html deleted file mode 100644 index e7c3f6d91..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/netstats.html +++ /dev/null @@ -1,130 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/nodetool.html b/src/doc/4.0-alpha3/tools/nodetool/nodetool.html deleted file mode 100644 index 283c1ef67..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/nodetool.html +++ /dev/null @@ -1,244 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-pwf <passwordFilePath> | –password-file <passwordFilePath>)]
-
[(-u <username> | –username <username>)] [(-h <host> | –host <host>)] -[(-pw <password> | –password <password>)] [(-pp | –print-port)] -[(-p <port> | –port <port>)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/pausehandoff.html b/src/doc/4.0-alpha3/tools/nodetool/pausehandoff.html deleted file mode 100644 index 3662cf20d..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/profileload.html b/src/doc/4.0-alpha3/tools/nodetool/profileload.html deleted file mode 100644 index ce70362f0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/profileload.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/proxyhistograms.html b/src/doc/4.0-alpha3/tools/nodetool/proxyhistograms.html deleted file mode 100644 index c5a2c1d48..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/rangekeysample.html b/src/doc/4.0-alpha3/tools/nodetool/rangekeysample.html deleted file mode 100644 index c1d8fd0e5..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/rebuild.html b/src/doc/4.0-alpha3/tools/nodetool/rebuild.html deleted file mode 100644 index 68f38f059..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/rebuild.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/rebuild_index.html b/src/doc/4.0-alpha3/tools/nodetool/rebuild_index.html deleted file mode 100644 index 4319ea0e8..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/refresh.html b/src/doc/4.0-alpha3/tools/nodetool/refresh.html deleted file mode 100644 index a1aae25b3..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/refresh.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/refreshsizeestimates.html b/src/doc/4.0-alpha3/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 1670497c5..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/reloadlocalschema.html b/src/doc/4.0-alpha3/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 22f58758a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/reloadseeds.html b/src/doc/4.0-alpha3/tools/nodetool/reloadseeds.html deleted file mode 100644 index 55609f7cb..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/reloadssl.html b/src/doc/4.0-alpha3/tools/nodetool/reloadssl.html deleted file mode 100644 index d593ecac0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/reloadtriggers.html b/src/doc/4.0-alpha3/tools/nodetool/reloadtriggers.html deleted file mode 100644 index ea5790293..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/relocatesstables.html b/src/doc/4.0-alpha3/tools/nodetool/relocatesstables.html deleted file mode 100644 index e711a1194..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/removenode.html b/src/doc/4.0-alpha3/tools/nodetool/removenode.html deleted file mode 100644 index 01894677d..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/removenode.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/repair.html b/src/doc/4.0-alpha3/tools/nodetool/repair.html deleted file mode 100644 index cc6015a06..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/repair.html +++ /dev/null @@ -1,198 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/repair_admin.html b/src/doc/4.0-alpha3/tools/nodetool/repair_admin.html deleted file mode 100644 index 8e0dbdbf9..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/replaybatchlog.html b/src/doc/4.0-alpha3/tools/nodetool/replaybatchlog.html deleted file mode 100644 index 9bac241fc..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/resetfullquerylog.html b/src/doc/4.0-alpha3/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 596a398d0..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/resetlocalschema.html b/src/doc/4.0-alpha3/tools/nodetool/resetlocalschema.html deleted file mode 100644 index 4242a2a0f..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/resumehandoff.html b/src/doc/4.0-alpha3/tools/nodetool/resumehandoff.html deleted file mode 100644 index 46511648a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/ring.html b/src/doc/4.0-alpha3/tools/nodetool/ring.html deleted file mode 100644 index 3025d881f..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/ring.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/scrub.html b/src/doc/4.0-alpha3/tools/nodetool/scrub.html deleted file mode 100644 index b2523f8af..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/scrub.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/4.0-alpha3/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 05177e909..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setcachecapacity.html b/src/doc/4.0-alpha3/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 45c6b8f12..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setcachekeystosave.html b/src/doc/4.0-alpha3/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index f23c8358c..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setcompactionthreshold.html b/src/doc/4.0-alpha3/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index d36fb86f4..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setcompactionthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 852bbfd02..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setconcurrency.html b/src/doc/4.0-alpha3/tools/nodetool/setconcurrency.html deleted file mode 100644 index 790fccb17..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setconcurrentcompactors.html b/src/doc/4.0-alpha3/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index 205a394d7..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/4.0-alpha3/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 1ccbc1dd1..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/4.0-alpha3/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index 4ef2cfba5..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index bab7060ec..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setlogginglevel.html b/src/doc/4.0-alpha3/tools/nodetool/setlogginglevel.html deleted file mode 100644 index c09ebcf8e..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setmaxhintwindow.html b/src/doc/4.0-alpha3/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 51313009b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/setstreamthroughput.html b/src/doc/4.0-alpha3/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index 4ad9ca4c9..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/settimeout.html b/src/doc/4.0-alpha3/tools/nodetool/settimeout.html deleted file mode 100644 index acd9e970e..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/settimeout.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/settraceprobability.html b/src/doc/4.0-alpha3/tools/nodetool/settraceprobability.html deleted file mode 100644 index 46dd0ec2a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/snapshot.html b/src/doc/4.0-alpha3/tools/nodetool/snapshot.html deleted file mode 100644 index 5d99bfa23..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/snapshot.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/status.html b/src/doc/4.0-alpha3/tools/nodetool/status.html deleted file mode 100644 index 633f3081b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/status.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/statusautocompaction.html b/src/doc/4.0-alpha3/tools/nodetool/statusautocompaction.html deleted file mode 100644 index 8f120d679..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/statusbackup.html b/src/doc/4.0-alpha3/tools/nodetool/statusbackup.html deleted file mode 100644 index 1583e9871..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/statusbinary.html b/src/doc/4.0-alpha3/tools/nodetool/statusbinary.html deleted file mode 100644 index 78dca80fe..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/statusgossip.html b/src/doc/4.0-alpha3/tools/nodetool/statusgossip.html deleted file mode 100644 index d67c33b9e..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/statushandoff.html b/src/doc/4.0-alpha3/tools/nodetool/statushandoff.html deleted file mode 100644 index 22060194a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/stop.html b/src/doc/4.0-alpha3/tools/nodetool/stop.html deleted file mode 100644 index eed9bf20b..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/stop.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/stopdaemon.html b/src/doc/4.0-alpha3/tools/nodetool/stopdaemon.html deleted file mode 100644 index e64d8946a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/tablehistograms.html b/src/doc/4.0-alpha3/tools/nodetool/tablehistograms.html deleted file mode 100644 index 58e344ff3..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/tablestats.html b/src/doc/4.0-alpha3/tools/nodetool/tablestats.html deleted file mode 100644 index 27012788a..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/tablestats.html +++ /dev/null @@ -1,169 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/toppartitions.html b/src/doc/4.0-alpha3/tools/nodetool/toppartitions.html deleted file mode 100644 index ea9c7246e..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/tpstats.html b/src/doc/4.0-alpha3/tools/nodetool/tpstats.html deleted file mode 100644 index 5fe972d09..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/tpstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/truncatehints.html b/src/doc/4.0-alpha3/tools/nodetool/truncatehints.html deleted file mode 100644 index 43a6ade5f..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/upgradesstables.html b/src/doc/4.0-alpha3/tools/nodetool/upgradesstables.html deleted file mode 100644 index 0d8b188e1..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,145 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/verify.html b/src/doc/4.0-alpha3/tools/nodetool/verify.html deleted file mode 100644 index 43e476628..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/verify.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/version.html b/src/doc/4.0-alpha3/tools/nodetool/version.html deleted file mode 100644 index 0963a2489..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/version.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/nodetool/viewbuildstatus.html b/src/doc/4.0-alpha3/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index cb697259c..000000000 --- a/src/doc/4.0-alpha3/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/index.html b/src/doc/4.0-alpha3/tools/sstable/index.html deleted file mode 100644 index beb331727..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/index.html +++ /dev/null @@ -1,229 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "SSTable Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

SSTable Tools

-

This section describes the functionality of the various sstable tools.

-

Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstabledump.html b/src/doc/4.0-alpha3/tools/sstable/sstabledump.html deleted file mode 100644 index 461c38707..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstabledump.html +++ /dev/null @@ -1,404 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstabledump" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstabledump

-

Dump contents of a given SSTable to standard output in JSON format.

-

You must supply exactly one sstable.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstabledump <options> <sstable file path>

- ---- - - - - - - - - - - - - - - - - - - - - -
-dCQL row per line internal representation
-eEnumerate partition keys only
-k <arg>Partition key
-x <arg>Excluded partition key(s)
-tPrint raw timestamps instead of iso8601 date strings
-lOutput each row as a separate JSON object
-

If necessary, use sstableutil first to find out the sstables used by a table.

-
-
-

Dump entire table

-

Dump the entire table without any options.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26
-
-cat eventlog_dump_2018Jul26
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-]
-
-
-
-
-

Dump table in a more manageable format

-

Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines
-
-cat eventlog_dump_2018Jul26_justlines
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Dump only keys

-

Dump only the keys by using the -e option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys
-
-cat eventlog_dump_2018Jul26b
-[ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ]
-
-
-
-
-

Dump row for a single key

-

Dump a single key using the -k option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey
-
-cat eventlog_dump_2018Jul26_singlekey
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Exclude a key or keys in dump of rows

-

Dump a table except for the rows excluded with the -x option. Multiple keys can be used.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e  > eventlog_dump_2018Jul26_excludekeys
-
-cat eventlog_dump_2018Jul26_excludekeys
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display raw timestamps

-

By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times
-
-cat eventlog_dump_2018Jul26_times
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "1532118147028809" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display internal structure in output

-

Dump the table in a format that reflects the internal structure.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d
-
-cat eventlog_dump_2018Jul26_d
-[3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]:  | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711]
-[d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]:  | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522]
-[cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]:  | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809]
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableexpiredblockers.html b/src/doc/4.0-alpha3/tools/sstable/sstableexpiredblockers.html deleted file mode 100644 index 401ed5b30..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableexpiredblockers.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableexpiredblockers" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableexpiredblockers

-

During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable.

-

This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-10015

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableexpiredblockers <keyspace> <table>

-
-
-

Output blocked sstables

-

If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing.

-

Otherwise, the script will return <sstable> blocks <#> expired sstables from getting dropped followed by a list of the blocked sstables.

-

Example:

-
sstableexpiredblockers keyspace1 standard1
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstablelevelreset.html b/src/doc/4.0-alpha3/tools/sstable/sstablelevelreset.html deleted file mode 100644 index d78a124f9..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstablelevelreset.html +++ /dev/null @@ -1,175 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablelevelreset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablelevelreset

-

If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration.

-

See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5271

-
-

Usage

-

sstablelevelreset –really-reset <keyspace> <table>

-

The really-reset flag is required, to ensure this intrusive command is not run accidentally.

-
-
-

Table not found

-

If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error.

-

Example:

-
ColumnFamily not found: keyspace/evenlog.
-
-
-
-
-

Table has no sstables

-

Example:

-
Found no sstables, did you give the correct keyspace/table?
-
-
-
-
-

Table already at level 0

-

The script will not set the level if it is already set to 0.

-

Example:

-
Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0
-
-
-
-
-

Table levels reduced to 0

-

If the level is not already 0, then this will reset it to 0.

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 1
-
-sstablelevelreset --really-reset keyspace eventlog
-Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableloader.html b/src/doc/4.0-alpha3/tools/sstable/sstableloader.html deleted file mode 100644 index d249e2118..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableloader.html +++ /dev/null @@ -1,409 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableloader" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableloader

-

Bulk-load the sstables found in the directory <dir_path> to the configured cluster. The parent directories of <dir_path> are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files.

-

Several of the options listed below don’t work quite as intended, and in those cases, workarounds are mentioned for specific use cases.

-

To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-1278

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableloader <options> <dir_path>

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-d, –nodes <initial hosts>Required. Try to connect to these hosts (comma-separated) -initially for ring information
-u, –username <username>username for Cassandra authentication
-pw, –password <password>password for Cassandra authentication
-p, –port <native transport port>port used for native connection (default 9042)
-sp, –storage-port <storage port>port used for internode communication (default 7000)
-ssp, –ssl-storage-port <ssl storage port>port used for TLS internode communication (default 7001)
–no-progressdon’t display progress
-t, –throttle <throttle>throttle speed in Mbits (default unlimited)
-idct, –inter-dc-throttle <inter-dc-throttle>inter-datacenter throttle speed in Mbits (default unlimited)
-cph, –connections-per-host <connectionsPerHost>number of concurrent connections-per-host
-i, –ignore <NODES>don’t stream to this (comma separated) list of nodes
-alg, –ssl-alg <ALGORITHM>Client SSL: algorithm (default: SunX509)
-ciphers, –ssl-ciphers <CIPHER-SUITES>Client SSL: comma-separated list of encryption suites to use
-ks, –keystore <KEYSTORE>Client SSL: full path to keystore
-kspw, –keystore-password <KEYSTORE-PASSWORD>Client SSL: password of the keystore
-st, –store-type <STORE-TYPE>Client SSL: type of store
-ts, –truststore <TRUSTSTORE>Client SSL: full path to truststore
-tspw, –truststore-password <TRUSTSTORE-PASSWORD>Client SSL: password of the truststore
-prtcl, –ssl-protocol <PROTOCOL>Client SSL: connections protocol to use (default: TLS)
-ap, –auth-provider <auth provider>custom AuthProvider class name for cassandra authentication
-f, –conf-path <path to config file>cassandra.yaml file path for streaming throughput and client/server SSL
-v, –verboseverbose output
-h, –helpdisplay this help message
-

You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

Load sstables from a Snapshot

-

Copy the snapshot sstables into an accessible directory and use sstableloader to restore them.

-

Example:

-
cp snapshots/1535397029191/* /path/to/keyspace1/standard1/
-
-sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4700000
-   Total duration (ms):          : 4390
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

The -d or –nodes option is required, or the script will not run.

-

Example:

-
sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Initial hosts must be specified (-d)
-
-
-
-
-

Use a Config File for SSL Clusters

-

If SSL encryption is enabled in the cluster, use the –conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line.

-

Example:

-
sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 9.165KiB/s (avg: 9.165KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 5.147MiB/s (avg: 18.299KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 9.751MiB/s (avg: 27.423KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 8.203MiB/s (avg: 36.524KiB/s)
-...
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 9356 ms
-   Average transfer rate   : 480.105KiB/s
-   Peak transfer rate      : 586.410KiB/s
-
-
-
-
-

Hide Progress Output

-

To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the –no-progress option.

-

Example:

-
sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2]
-
-
-
-
-

Get More Detail

-

Using the –verbose option will provide much more progress output.

-

Example:

-
sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 12.056KiB/s (avg: 12.056KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 9.092MiB/s (avg: 24.081KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 18.832MiB/s (avg: 36.099KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 2.253MiB/s (avg: 47.882KiB/s)
-progress: [/172.17.0.2]0:0/1 7  % total: 7% 6.388MiB/s (avg: 59.743KiB/s)
-progress: [/172.17.0.2]0:0/1 8  % total: 8% 14.606MiB/s (avg: 71.635KiB/s)
-progress: [/172.17.0.2]0:0/1 9  % total: 9% 8.880MiB/s (avg: 83.465KiB/s)
-progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s)
-progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s)
-progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s)
-progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s)
-progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s)
-progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s)
-progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s)
-progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s)
-progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s)
-progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s)
-progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s)
-progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s)
-progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s)
-progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s)
-progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s)
-progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s)
-progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s)
-progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s)
-progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s)
-progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s)
-progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s)
-progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s)
-progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s)
-progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s)
-progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s)
-progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s)
-progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s)
-progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s)
-progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s)
-progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s)
-progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s)
-progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s)
-progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s)
-progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s)
-progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s)
-progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s)
-progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s)
-progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s)
-progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s)
-progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s)
-progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s)
-progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s)
-progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s)
-progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s)
-progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s)
-progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s)
-progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s)
-progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s)
-progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s)
-progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s)
-progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s)
-progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s)
-progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s)
-progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s)
-progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s)
-progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s)
-progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s)
-progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s)
-progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s)
-progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s)
-progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s)
-progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s)
-progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 6706 ms
-   Average transfer rate   : 669.835KiB/s
-   Peak transfer rate      : 767.802KiB/s
-
-
-
-
-

Throttling Load

-

To prevent the table loader from overloading the system resources, you can throttle the process with the –throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below.

-

Example:

-
sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 0 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 37634
-   Average transfer rate (MB/s): : 0
-   Peak transfer rate (MB/s):    : 0
-
-
-
-
-

Speeding up Load

-

To speed up the load process, the number of connections per host can be increased.

-

Example:

-
sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 100
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 3486
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

This small data set doesn’t benefit much from the increase in connections per host, but note that the total duration has decreased in this example.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstablemetadata.html b/src/doc/4.0-alpha3/tools/sstable/sstablemetadata.html deleted file mode 100644 index b86c03fc4..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstablemetadata.html +++ /dev/null @@ -1,473 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablemetadata" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablemetadata

-

Print information about an sstable from the related Statistics.db and Summary.db files to standard output.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablemetadata <options> <sstable filename(s)>

- ---- - - - - - -
–gc_grace_seconds <arg>The gc_grace_seconds to use when calculating droppable tombstones
-
- -
-

Specify gc grace seconds

-

To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn’t access the schema directly, this is a way to more accurately estimate droppable tombstones – for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds).

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-12208

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4
-Estimated tombstone drop times:
-1536599100:         1
-1536599640:         1
-1536599700:         2
-
-echo $(date +%s)
-1536602005
-
-# if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 4.0E-5
-
-# if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 9.61111111111111E-6
-
-# if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 0.0
-
-
-
-
-

Explanation of each value printed above

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueExplanation
SSTableprefix of the sstable filenames related to this sstable
Partitionerpartitioner type used to distribute data across nodes; defined in cassandra.yaml
Bloom Filter FPprecision of Bloom filter used in reads; defined in the table definition
Minimum timestampminimum timestamp of any entry in this sstable, in epoch microseconds
Maximum timestampmaximum timestamp of any entry in this sstable, in epoch microseconds
SSTable min local deletion timeminimum timestamp of deletion date, based on TTL, in epoch seconds
SSTable max local deletion timemaximum timestamp of deletion date, based on TTL, in epoch seconds
Compressorblank (-) by default; if not blank, indicates type of compression enabled on the table
TTL mintime-to-live in seconds; default 0 unless defined in the table definition
TTL maxtime-to-live in seconds; default 0 unless defined in the table definition
First tokenlowest token and related key found in the sstable summary
Last tokenhighest token and related key found in the sstable summary
Estimated droppable tombstonesratio of tombstones to columns, using configured gc grace seconds if relevant
SSTable levelcompaction level of this sstable, if leveled compaction (LCS) is used
Repaired atthe timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds
Replay positions coveredthe interval of time and commitlog positions related to this sstable
totalColumnsSetnumber of cells in the table
totalRowsnumber of rows in the table
Estimated tombstone drop timesapproximate number of rows that will expire, ordered by epoch seconds
Count Row Size Cell Counttwo histograms in two columns; one represents distribution of Row Size -and the other represents distribution of Cell Count
Estimated cardinalityan estimate of unique values, used for compaction
EncodingStats* minTTLin epoch milliseconds
EncodingStats* minLocalDeletionTimein epoch seconds
EncodingStats* minTimestampin epoch microseconds
KeyTypethe type of partition key, useful in reading and writing data -from/to storage; defined in the table definition
ClusteringTypesthe type of clustering key, useful in reading and writing data -from/to storage; defined in the table definition
StaticColumnsa list of the shared columns in the table
RegularColumnsa list of non-static, non-key columns in the table
-
    -
  • For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableofflinerelevel.html b/src/doc/4.0-alpha3/tools/sstable/sstableofflinerelevel.html deleted file mode 100644 index 4a4c7e39b..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableofflinerelevel.html +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableofflinerelevel" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableofflinerelevel

-

When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-8301

-

The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):

-
L3 [][][][][][][][][][][]
-L2 [    ][    ][    ][  ]
-L1 [          ][        ]
-L0 [                    ]
-
-
-

Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):

-
[][][]
-[    ][][][]
-    [    ]
-[          ]
-...
-
-
-

Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below.

-

If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableofflinerelevel [–dry-run] <keyspace> <table>

-
-
-

Doing a dry run

-

Use the –dry-run option to see the current level distribution and predicted level after the change.

-

Example:

-
sstableofflinerelevel --dry-run keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-Potential leveling:
-L0=1
-L1=1
-
-
-
-
-

Running a relevel

-

Example:

-
sstableofflinerelevel keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-New leveling:
-L0=1
-L1=1
-
-
-
-
-

Keyspace or table not found

-

If an invalid keyspace and/or table is provided, an exception will be thrown.

-

Example:

-
sstableofflinerelevel --dry-run keyspace evenlog
-
-Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog
-    at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstablerepairedset.html b/src/doc/4.0-alpha3/tools/sstable/sstablerepairedset.html deleted file mode 100644 index cfb4c30a7..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstablerepairedset.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablerepairedset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablerepairedset

-

Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired.

-

Note that running a repair (e.g., via nodetool repair) doesn’t set the status of this metadata. Only setting the status of this metadata via this tool does.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5351

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablerepairedset –really-set <options> [-f <sstable-list> | <sstables>]

- ---- - - - - - - - - - - - - - - -
–really-setrequired if you want to really set the status
–is-repairedset the repairedAt status to the last modified time
–is-unrepairedset the repairedAt status to 0
-fuse a file containing a list of sstables as the input
-
-
-

Set a lot of sstables to unrepaired status

-

There are many ways to do this programmatically. This way would likely include variables for the keyspace and table.

-

Example:

-
find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired %
-
-
-
-
-

Set one to many sstables to repaired status

-

Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice.

-

Example:

-
nodetool repair keyspace1 standard1
-find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired %
-
-
-
- -
-

Using command in a script

-

If you know you ran repair 2 weeks ago, you can do something like the following:

-
sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstablescrub.html b/src/doc/4.0-alpha3/tools/sstable/sstablescrub.html deleted file mode 100644 index 83954bc83..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstablescrub.html +++ /dev/null @@ -1,211 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablescrub" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablescrub

-

Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4321

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablescrub <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-m,–manifest-checkonly check and repair the leveled manifest, without actually scrubbing the sstables
-n,–no-validatedo not validate columns using column validator
-r,–reinsert-overflowed-ttlRewrites rows with overflowed expiration date affected by CASSANDRA-14092 -with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows.
-s,–skip-corruptedskip corrupt rows in counter tables
-v,–verboseverbose output
-
-
-

Basic Scrub

-

The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable.

-

Example:

-
sstablescrub keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped
-Checking leveled manifest
-
-
-
-
-

Scrub without Validation

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-9406

-

Use the –no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client.

-

Example:

-
sstablescrub --no-validate keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned
-
-
-
-
-

Skip Corrupted Counter Tables

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5930

-

If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the –skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+.

-

Example:

-
sstablescrub --skip-corrupted keyspace1 counter1
-
-
-
-
-

Dealing with Overflow Dates

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-14092

-

Using the option –reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow).

-

Example:

-
sstablescrub --reinsert-overflowed-ttl keyspace1 counter1
-
-
-
-
-

Manifest Check

-

As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstablesplit.html b/src/doc/4.0-alpha3/tools/sstable/sstablesplit.html deleted file mode 100644 index 55e383b47..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstablesplit.html +++ /dev/null @@ -1,202 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablesplit" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablesplit

-

Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4766

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablesplit <options> <filename>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h, –helpdisplay this help message
–no-snapshotdon’t snapshot the sstables before splitting
-s, –size <size>maximum size in MB for the output sstables (default: 50)
-

This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped.

-
-
-

Split a File

-

Split a large sstable into smaller sstables. By default, unless the option –no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-Pre-split sstables snapshotted into snapshot pre-split-1533144514795
-
-
-
-
-

Split Multiple Files

-

Wildcards can be used in the filename portion of the command to split multiple files.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1*
-
-
-
-
-

Attempt to Split a Small File

-

If the file is already smaller than the split size provided, the sstable will not be split.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB)
-No sstables needed splitting.
-
-
-
-
-

Split a File into Specified Size

-

The default size used for splitting is 50MB. Specify another size with the –size option. The size is in megabytes (MB). Specify only the number, not the units. For example –size 50 is correct, but –size 50MB is not.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db
-Pre-split sstables snapshotted into snapshot pre-split-1533144996008
-
-
-
-
-

Split Without Snapshot

-

By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the –no-snapshot option to skip it.

-

Example:

-
sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db
-
-
-

Note: There is no output, but you can see the results in your file system.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableupgrade.html b/src/doc/4.0-alpha3/tools/sstable/sstableupgrade.html deleted file mode 100644 index 7bb955dec..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableupgrade.html +++ /dev/null @@ -1,249 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableupgrade" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableupgrade

-

Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version.

-

The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableupgrade <options> <keyspace> <table> [snapshot_name]

- ---- - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-k,–keep-sourcedo not delete the source sstables
-
-
-

Rewrite tables to the current Cassandra version

-

Start with a set of sstables in one version of Cassandra:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables:

-
sstableupgrade keyspace1 standard1
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 13:48 backups
--rw-r--r--   1 user  wheel      292 Aug 22 13:48 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4599475 Aug 22 13:48 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:48 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 13:48 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330807 Aug 22 13:48 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 13:48 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 13:48 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 13:48 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite tables to the current Cassandra version, and keep tables in old version

-

Again, starting with a set of sstables in one version:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:

-
sstableupgrade keyspace1 standard1 -k
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 14:00 backups
--rw-r--r--@  1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--@  1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--@  1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--@  1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--@  1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--@  1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--@  1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--@  1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
--rw-r--r--   1 user  wheel      292 Aug 22 14:01 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4596370 Aug 22 14:01 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 14:01 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 14:01 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330801 Aug 22 14:01 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 14:01 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 14:01 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 14:01 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite a snapshot to the current Cassandra version

-

Find the snapshot name:

-
nodetool listsnapshots
-
-Snapshot Details:
-Snapshot name       Keyspace name                Column family name           True size          Size on disk
-...
-1534962986979       keyspace1                    standard1                    5.85 MB            5.85 MB
-
-
-

Then rewrite the snapshot:

-
sstableupgrade keyspace1 standard1 1534962986979
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete.
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableutil.html b/src/doc/4.0-alpha3/tools/sstable/sstableutil.html deleted file mode 100644 index da49062de..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableutil.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableutil" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableutil

-

List sstable files for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7066

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableutil <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - -
-c, –cleanupclean up any outstanding transactions
-d, –debugdisplay stack traces
-h, –helpdisplay this help message
-o, –oploginclude operation logs
-t, –type <arg>all (list all files, final or temporary), tmp (list temporary files only), -final (list final files only),
-v, –verboseverbose output
-
-
-

List all sstables

-

The basic command lists the sstables associated with a given keyspace/table.

-

Example:

-
sstableutil keyspace eventlog
-Listing files...
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt
-
-
-
-
-

List only temporary sstables

-

Using the -t option followed by tmp will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra.

-
-
-

List only final sstables

-

Using the -t option followed by final will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option.

-
-
-

Include transaction logs

-

Using the -o option will include transaction logs in the listing, in the format above.

-
-
-

Clean up sstables

-

Using the -c option removes any transactions left over from incomplete writes or compactions.

-

From the 3.0 upgrade notes:

-

New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix “add:” or “remove:”. They also contain a special line “commit”, only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the “add” prefix) and delete the old sstables (those with the “remove” prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/tools/sstable/sstableverify.html b/src/doc/4.0-alpha3/tools/sstable/sstableverify.html deleted file mode 100644 index 39553781b..000000000 --- a/src/doc/4.0-alpha3/tools/sstable/sstableverify.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableverify" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableverify

-

Check sstable(s) for errors or corruption, for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5791

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableverify <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-e, –extendedextended verification
-h, –helpdisplay this help message
-v, –verboseverbose output
-
-
-

Basic Verification

-

This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-
-
-
-
-

Extended Verification

-

During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time.

-

Example:

-
root@DC1C1:/# sstableverify -e keyspace eventlog
-WARN  14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully
-
-
-
-
-

Corrupted File

-

Corrupted files are listed if they are detected by the script.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db
-
-
-

A similar (but less verbose) tool will show the suggested actions:

-
nodetool verify keyspace eventlog
-error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/troubleshooting/finding_nodes.html b/src/doc/4.0-alpha3/troubleshooting/finding_nodes.html deleted file mode 100644 index 56c6f3673..000000000 --- a/src/doc/4.0-alpha3/troubleshooting/finding_nodes.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Find The Misbehaving Nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Find The Misbehaving Nodes

-

The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware).

-

There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below.

-
-

Client Logs and Errors

-

Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter’s nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with.

-

Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax drivers:

-
    -
  • SyntaxError (client). This and other QueryValidationException -indicate that the client sent a malformed request. These are rarely server -issues and usually indicate bad queries.
  • -
  • UnavailableException (server): This means that the Cassandra -coordinator node has rejected the query as it believes that insufficent -replica nodes are available. If many coordinators are throwing this error it -likely means that there really are (typically) multiple nodes down in the -cluster and you can identify them using nodetool status If only a single coordinator is throwing this error it may -mean that node has been partitioned from the rest.
  • -
  • OperationTimedOutException (server): This is the most frequent -timeout message raised when clients set timeouts and means that the query -took longer than the supplied timeout. This is a client side timeout -meaning that it took longer than the client specified timeout. The error -message will include the coordinator node that was last tried which is -usually a good starting point. This error usually indicates either -aggressive client timeout values or latent server coordinators/replicas.
  • -
  • ReadTimeoutException or WriteTimeoutException (server): These -are raised when clients do not specify lower timeouts and there is a -coordinator timeouts based on the values supplied in the cassandra.yaml -configuration file. They usually indicate a serious server side problem as -the default values are usually multiple seconds.
  • -
-
-
-

Metrics

-

If you have Cassandra metrics reporting to a -centralized location such as Graphite or -Grafana you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are:

-
-

Errors

-

Cassandra refers to internode messaging errors as “drops”, and provided a -number of Dropped Message Metrics to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue.

-
-
-

Latency

-

For timeouts or latency related issues you can start with Table -Metrics by comparing Coordinator level metrics e.g. -CoordinatorReadLatency or CoordinatorWriteLatency with their associated -replica metrics e.g. ReadLatency or WriteLatency. Issues usually show -up on the 99th percentile before they show up on the 50th percentile or -the mean. While maximum coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, maximum replica latencies that correlate with increased 99th -percentiles on coordinators can help narrow down the problem.

-

There are usually three main possibilities:

-
    -
  1. Coordinator latencies are high on all nodes, but only a few node’s local -read latencies are high. This points to slow replica nodes and the -coordinator’s are just side-effects. This usually happens when clients are -not token aware.
  2. -
  3. Coordinator latencies and replica latencies increase at the -same time on the a few nodes. If clients are token aware this is almost -always what happens and points to slow replicas of a subset of token -ranges (only part of the ring).
  4. -
  5. Coordinator and local latencies are high on many nodes. This usually -indicates either a tipping point in the cluster capacity (too many writes or -reads per second), or a new query pattern.
  6. -
-

It’s important to remember that depending on the client’s load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use TokenAware policies the same -node’s coordinator and replica latencies will often increase together, but if -you just use normal DCAwareRoundRobin coordinator latencies can increase -with unrelated replica node’s latencies. For example:

-
    -
  • TokenAware + LOCAL_ONE: should always have coordinator and replica -latencies on the same node rise together
  • -
  • TokenAware + LOCAL_QUORUM: should always have coordinator and -multiple replica latencies rise together in the same datacenter.
  • -
  • TokenAware + QUORUM: replica latencies in other datacenters can -affect coordinator latencies.
  • -
  • DCAwareRoundRobin + LOCAL_ONE: coordinator latencies and unrelated -replica node’s latencies will rise together.
  • -
  • DCAwareRoundRobin + LOCAL_QUORUM: different coordinator and replica -latencies will rise together with little correlation.
  • -
-
-
-

Query Rates

-

Sometimes the Table query rate metrics can help -narrow down load issues as “small” increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with BATCH writes, where a client may send a single BATCH -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator BATCH write turns into 450 -replica writes! This is why keeping BATCH’s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a “single” -query.

-
-
-
-

Next Step: Investigate the Node(s)

-

Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -logs, nodetool, and -os tools. If you are not able to login you may still -have access to logs and nodetool -remotely.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/troubleshooting/index.html b/src/doc/4.0-alpha3/troubleshooting/index.html deleted file mode 100644 index fa6333395..000000000 --- a/src/doc/4.0-alpha3/troubleshooting/index.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-

As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you.

-

These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don’t -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/troubleshooting/reading_logs.html b/src/doc/4.0-alpha3/troubleshooting/reading_logs.html deleted file mode 100644 index 31d32afcb..000000000 --- a/src/doc/4.0-alpha3/troubleshooting/reading_logs.html +++ /dev/null @@ -1,351 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Cassandra Logs" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Logs

-

Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs.

-
-

Common Log Files

-

Cassandra has three main logs, the system.log, debug.log and -gc.log which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively.

-

These logs by default live in ${CASSANDRA_HOME}/logs, but most Linux -distributions relocate logs to /var/log/cassandra. Operators can tune -this location as well as what levels are logged using the provided -logback.xml file.

-
-

system.log

-

This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log:

-
    -
  • Uncaught exceptions. These can be very useful for debugging errors.
  • -
  • GCInspector messages indicating long garbage collector pauses. When long -pauses happen Cassandra will print how long and also what was the state of -the system (thread state) at the time of that pause. This can help narrow -down a capacity issue (either not enough heap or not enough spare CPU).
  • -
  • Information about nodes joining and leaving the cluster as well as token -metadata (data ownersip) changes. This is useful for debugging network -partitions, data movements, and more.
  • -
  • Keyspace/Table creation, modification, deletion.
  • -
  • StartupChecks that ensure optimal configuration of the operating system -to run Cassandra
  • -
  • Information about some background operational tasks (e.g. Index -Redistribution).
  • -
-

As with any application, looking for ERROR or WARN lines can be a -great first step:

-
$ # Search for warnings or errors in the latest system.log
-$ grep 'WARN\|ERROR' system.log | tail
-...
-
-$ # Search for warnings or errors in all rotated system.log
-$ zgrep 'WARN\|ERROR' system.log.* | less
-...
-
-
-
-
-

debug.log

-

This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal system.log. Some -examples of activities logged to this log:

-
    -
  • Information about compactions, including when they start, which sstables -they contain, and when they finish.
  • -
  • Information about memtable flushes to disk, including when they happened, -how large the flushes were, and which commitlog segments the flush impacted.
  • -
-

This log can be very noisy, so it is highly recommended to use grep and -other log analysis tools to dive deep. For example:

-
$ # Search for messages involving a CompactionTask with 5 lines of context
-$ grep CompactionTask debug.log -C 5
-...
-
-$ # Look at the distribution of flush tasks per keyspace
-$ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c
-    6 compaction_history:
-    1 test_keyspace:
-    2 local:
-    17 size_estimates:
-    17 sstable_activity:
-
-
-
-
-

gc.log

-

The gc log is a standard Java GC log. With the default jvm.options -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:

-
$ grep stopped gc.log.0.current | tail
-2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds
-2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds
-2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds
-2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds
-2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds
-2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds
-2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds
-2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds
-2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds
-2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds
-
-
-

This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n  | tail | xargs -IX grep X gc.log.0.current | sort -k 1
-2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds
-2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds
-2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds
-2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds
-2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds
-2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds
-2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds
-2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds
-2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds
-2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds
-
-
-

In this case any client waiting on a query would have experienced a 56ms -latency at 17:13:41.

-

Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn’t know could have disk latency, so the JVM safepoint logic -doesn’t handle a blocking memory mapped read particularly well).

-

Using these logs you can even get a pause distribution with something like -histogram.py:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py
-# NumSamples = 410293; Min = 0.00; Max = 11.49
-# Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498
-# each ∎ represents a count of 5470
-    0.0001 -     1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
-    1.1496 -     2.2991 [    15]:
-    2.2991 -     3.4486 [     5]:
-    3.4486 -     4.5981 [     1]:
-    4.5981 -     5.7475 [     5]:
-    5.7475 -     6.8970 [     9]:
-    6.8970 -     8.0465 [     1]:
-    8.0465 -     9.1960 [     0]:
-    9.1960 -    10.3455 [     0]:
-   10.3455 -    11.4949 [     2]:
-
-
-

We can see in this case while we have very good average performance something -is causing multi second JVM pauses … In this case it was mostly safepoint -pauses caused by slow disks:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X  gc.log.0.current| sort -k 1
-2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds
-2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds
-2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds
-2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds
-2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds
-2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds
-2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds
-2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds
-2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds
-2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds
-
-
-

Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as GCViewer which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -200ms and GC throughput greater than 99% (ymmv).

-

Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues.

-
-
-
-

Getting More Information

-

If the default logging levels are insuficient, nodetool can set higher -or lower logging levels for various packages and classes using the -nodetool setlogginglevel command. Start by viewing the current levels:

-
$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-
-
-

Perhaps the Gossiper is acting up and we wish to enable it at TRACE -level for even more insight:

-
$ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE
-
-$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-org.apache.cassandra.gms.Gossiper                      TRACE
-
-$ grep TRACE debug.log | tail -2
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating
-heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ...
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local
-heartbeat version 2341 greater than 2340 for 127.0.0.1:7000
-
-
-

Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -logback.xml.

-
diff --git a/conf/logback.xml b/conf/logback.xml
-index b2c5b10..71b0a49 100644
---- a/conf/logback.xml
-+++ b/conf/logback.xml
-@@ -98,4 +98,5 @@ appender reference in the root level section below.
-   </root>
-
-   <logger name="org.apache.cassandra" level="DEBUG"/>
-+  <logger name="org.apache.cassandra.gms.Gossiper" level="TRACE"/>
- </configuration>
-
-
-
-

Full Query Logger

-

Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -nodetool and the logs are read with the provided bin/fqltool utility:

-
$ mkdir /var/tmp/fql_logs
-$ nodetool enablefullquerylog --path /var/tmp/fql_logs
-
-# ... do some querying
-
-$ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail
-Query time: 1530750927224
-Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name =
-'system_views' AND table_name = 'sstable_tasks';
-Values:
-
-Type: single
-Protocol version: 4
-Query time: 1530750934072
-Query: select * from keyspace1.standard1 ;
-Values:
-
-$ nodetool disablefullquerylog
-
-
-

Note that if you want more information than this tool provides, there are other -live capture options available such as packet capture.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/troubleshooting/use_nodetool.html b/src/doc/4.0-alpha3/troubleshooting/use_nodetool.html deleted file mode 100644 index 8e634ac6d..000000000 --- a/src/doc/4.0-alpha3/troubleshooting/use_nodetool.html +++ /dev/null @@ -1,321 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Use Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Use Nodetool

-

Cassandra’s nodetool allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see nodetool help -for all the commands), but briefly some of the most useful for troubleshooting:

-
-

Cluster Status

-

You can use nodetool status to assess status of the cluster:

-
$ nodetool status <optional keyspace>
-
-Datacenter: dc1
-=======================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-UN  127.0.1.1  4.69 GiB   1            100.0%            35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e  r1
-UN  127.0.1.2  4.71 GiB   1            100.0%            752e278f-b7c5-4f58-974b-9328455af73f  r2
-UN  127.0.1.3  4.69 GiB   1            100.0%            9dc1a293-2cc0-40fa-a6fd-9e6054da04a7  r3
-
-
-

In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all “up”. The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -nodetool status on multiple nodes in a cluster to see the full view.

-

You can use nodetool status plus a little grep to see which nodes are -down:

-
$ nodetool status | grep -v '^UN'
-Datacenter: dc1
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-Datacenter: dc2
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-DN  127.0.0.5  105.73 KiB  1            33.3%             df303ac7-61de-46e9-ac79-6e630115fd75  r1
-
-
-

In this case there are two datacenters and there is one node down in datacenter -dc2 and rack r1. This may indicate an issue on 127.0.0.5 -warranting investigation.

-
-
-

Coordinator Query Latency

-

You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using nodetool proxyhistograms:

-
$ nodetool proxyhistograms
-Percentile       Read Latency      Write Latency      Range Latency   CAS Read Latency  CAS Write Latency View Write Latency
-                     (micros)           (micros)           (micros)           (micros)           (micros)           (micros)
-50%                    454.83             219.34               0.00               0.00               0.00               0.00
-75%                    545.79             263.21               0.00               0.00               0.00               0.00
-95%                    654.95             315.85               0.00               0.00               0.00               0.00
-98%                    785.94             379.02               0.00               0.00               0.00               0.00
-99%                   3379.39            2346.80               0.00               0.00               0.00               0.00
-Min                     42.51             105.78               0.00               0.00               0.00               0.00
-Max                  25109.16           43388.63               0.00               0.00               0.00               0.00
-
-
-

Here you can see the full latency distribution of reads, writes, range requests -(e.g. select * from keyspace.table), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds).

-
-
-

Local Query Latency

-

If you know which table is having latency/error issues, you can use -nodetool tablehistograms to get a better idea of what is happening -locally on a node:

-
$ nodetool tablehistograms keyspace table
-Percentile  SSTables     Write Latency      Read Latency    Partition Size        Cell Count
-                              (micros)          (micros)           (bytes)
-50%             0.00             73.46            182.79             17084               103
-75%             1.00             88.15            315.85             17084               103
-95%             2.00            126.93            545.79             17084               103
-98%             2.00            152.32            654.95             17084               103
-99%             2.00            182.79            785.94             17084               103
-Min             0.00             42.51             24.60             14238                87
-Max             2.00          12108.97          17436.92             17084               103
-
-
-

This shows you percentile breakdowns particularly critical metrics.

-

The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. SizeTieredCompactionStrategy typically has many more reads -per read than LeveledCompactionStrategy does for update heavy workloads.

-

The second column shows you a latency breakdown of local write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments.

-

The third column shows you a latency breakdown of local read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read.

-

The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it’s read.

-
-
-

Threadpool State

-

You can use nodetool tpstats to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:

-
$ nodetool tpstats
-Pool Name                         Active   Pending      Completed   Blocked  All time blocked
-ReadStage                              2         0             12         0                 0
-MiscStage                              0         0              0         0                 0
-CompactionExecutor                     0         0           1940         0                 0
-MutationStage                          0         0              0         0                 0
-GossipStage                            0         0          10293         0                 0
-Repair-Task                            0         0              0         0                 0
-RequestResponseStage                   0         0             16         0                 0
-ReadRepairStage                        0         0              0         0                 0
-CounterMutationStage                   0         0              0         0                 0
-MemtablePostFlush                      0         0             83         0                 0
-ValidationExecutor                     0         0              0         0                 0
-MemtableFlushWriter                    0         0             30         0                 0
-ViewMutationStage                      0         0              0         0                 0
-CacheCleanupExecutor                   0         0              0         0                 0
-MemtableReclaimMemory                  0         0             30         0                 0
-PendingRangeCalculator                 0         0             11         0                 0
-SecondaryIndexManagement               0         0              0         0                 0
-HintsDispatcher                        0         0              0         0                 0
-Native-Transport-Requests              0         0            192         0                 0
-MigrationStage                         0         0             14         0                 0
-PerDiskMemtableFlushWriter_0           0         0             30         0                 0
-Sampler                                0         0              0         0                 0
-ViewBuildExecutor                      0         0              0         0                 0
-InternalResponseStage                  0         0              0         0                 0
-AntiEntropyStage                       0         0              0         0                 0
-
-Message type           Dropped                  Latency waiting in queue (micros)
-                                             50%               95%               99%               Max
-READ                         0               N/A               N/A               N/A               N/A
-RANGE_SLICE                  0              0.00              0.00              0.00              0.00
-_TRACE                       0               N/A               N/A               N/A               N/A
-HINT                         0               N/A               N/A               N/A               N/A
-MUTATION                     0               N/A               N/A               N/A               N/A
-COUNTER_MUTATION             0               N/A               N/A               N/A               N/A
-BATCH_STORE                  0               N/A               N/A               N/A               N/A
-BATCH_REMOVE                 0               N/A               N/A               N/A               N/A
-REQUEST_RESPONSE             0              0.00              0.00              0.00              0.00
-PAGED_RANGE                  0               N/A               N/A               N/A               N/A
-READ_REPAIR                  0               N/A               N/A               N/A               N/A
-
-
-

This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the RequestResponseState queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ALL ties up RF -RequestResponseState threads whereas LOCAL_ONE only uses a single -thread in the ReadStage threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the concurrent_compactors or compaction_throughput options.

-

The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation.

-
-
-

Compaction State

-

As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS page cache, -and can put a lot of load on your disk drives. There are great -os tools to determine if this is the case, but often it’s a -good idea to check if compactions are even running using -nodetool compactionstats:

-
$ nodetool compactionstats
-pending tasks: 2
-- keyspace.table: 2
-
-id                                   compaction type keyspace table completed total    unit  progress
-2062b290-7f3a-11e8-9358-cd941b956e60 Compaction      keyspace table 21848273  97867583 bytes 22.32%
-Active compaction remaining time :   0h00m04s
-
-
-

In this case there is a single compaction running on the keyspace.table -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass -H to get the units in a human readable format.

-

Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don’t take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra’s concurrent_compactors -or compaction_throughput options.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha3/troubleshooting/use_tools.html b/src/doc/4.0-alpha3/troubleshooting/use_tools.html deleted file mode 100644 index 7d94eaeaa..000000000 --- a/src/doc/4.0-alpha3/troubleshooting/use_tools.html +++ /dev/null @@ -1,609 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Diving Deep, Use External Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Diving Deep, Use External Tools

-

Machine access allows operators to dive even deeper than logs and nodetool -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes.

-
-

JVM Tooling

-

The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks.

-

NOTE: There are two common gotchas with JVM tooling and Cassandra:

-
    -
  1. By default Cassandra ships with -XX:+PerfDisableSharedMem set to prevent -long pauses (see CASSANDRA-9242 and CASSANDRA-9483 for details). If -you want to use JVM tooling you can instead have /tmp mounted on an in -memory tmpfs which also effectively works around CASSANDRA-9242.
  2. -
  3. Make sure you run the tools as the same user as Cassandra is running as, -e.g. if the database is running as cassandra the tool also has to be -run as cassandra, e.g. via sudo -u cassandra <cmd>.
  4. -
-
-

Garbage Collection State (jstat)

-

If you suspect heap pressure you can use jstat to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):

-
jstat -gcutil <cassandra pid> 500ms
- S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT
- 0.00   0.00  81.53  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.94  31.16  93.07  88.20     12    0.151     3    0.257    0.408
-
-
-

In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies.

-
-
-

Thread Information (jstack)

-

To get a point in time snapshot of exactly what Cassandra is doing, run -jstack against the Cassandra PID. Note that this does pause the JVM for -a very brief period (<20ms).:

-
$ jstack <cassandra pid> > threaddump
-
-# display the threaddump
-$ cat threaddump
-...
-
-# look at runnable threads
-$grep RUNNABLE threaddump -B 1
-"Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000]
-   java.lang.Thread.State: RUNNABLE
---
-"Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-...
-
-# Note that the nid is the Linux thread id
-
-
-

Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on.

-
-
-
-

Basic OS Tooling

-

A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of:

-
    -
  • CPU cores. For executing concurrent user queries
  • -
  • CPU processing time. For query activity (data decompression, row merging, -etc…)
  • -
  • CPU processing time (low priority). For background tasks (compaction, -streaming, etc …)
  • -
  • RAM for Java Heap. Used to hold internal data-structures and by default the -Cassandra memtables. Heap space is a crucial component of write performance -as well as generally.
  • -
  • RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS -disk cache is a crucial component of read performance.
  • -
  • Disks. Cassandra cares a lot about disk read latency, disk write throughput, -and of course disk space.
  • -
  • Network latency. Cassandra makes many internode requests, so network latency -between nodes can directly impact performance.
  • -
  • Network throughput. Cassandra (as other databases) frequently have the -so called “incast” problem where a small request (e.g. SELECT * from -foo.bar) returns a massively large result set (e.g. the entire dataset). -In such situations outgoing bandwidth is crucial.
  • -
-

Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource.

-
-

High Level Resource Usage (top/htop)

-

Cassandra makes signifiant use of system resources, and often the very first -useful action is to run top or htop (website)to see the state of the machine.

-

Useful things to look at:

-
    -
  • System load levels. While these numbers can be confusing, generally speaking -if the load average is greater than the number of CPU cores, Cassandra -probably won’t have very good (sub 100 millisecond) latencies. See -Linux Load Averages -for more information.
  • -
  • CPU utilization. htop in particular can help break down CPU utilization -into user (low and normal priority), system (kernel), and io-wait -. Cassandra query threads execute as normal priority user threads, while -compaction threads execute as low priority user threads. High system -time could indicate problems like thread contention, and high io-wait -may indicate slow disk drives. This can help you understand what Cassandra -is spending processing resources doing.
  • -
  • Memory usage. Look for which programs have the most resident memory, it is -probably Cassandra. The number for Cassandra is likely inaccurately high due -to how Linux (as of 2018) accounts for memory mapped file memory.
  • -
-
-
-

IO Usage (iostat)

-

Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:

-
$ sudo iostat -xdm 2
-Linux 4.13.0-13-generic (hostname)     07/03/2018     _x86_64_    (8 CPU)
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.28    0.32    5.42     0.01     0.13    48.55     0.01    2.21    0.26    2.32   0.64   0.37
-sdb               0.00     0.00    0.00    0.00     0.00     0.00    79.34     0.00    0.20    0.20    0.00   0.16   0.00
-sdc               0.34     0.27    0.76    0.36     0.01     0.02    47.56     0.03   26.90    2.98   77.73   9.21   1.03
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.00    2.00   32.00     0.01     4.04   244.24     0.54   16.00    0.00   17.00   1.06   3.60
-sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00    0.00    0.00   0.00   0.00
-sdc               0.00    24.50    0.00  114.00     0.00    11.62   208.70     5.56   48.79    0.00   48.79   1.12  12.80
-
-
-

In this case we can see that /dev/sdc1 is a very slow drive, having an -await close to 50 milliseconds and an avgqu-sz close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user.

-

Important metrics to assess using iostat:

-
    -
  • Reads and writes per second. These numbers will change with the workload, -but generally speaking the more reads Cassandra has to do from disk the -slower Cassandra read latencies are. Large numbers of reads per second -can be a dead giveaway that the cluster has insufficient memory for OS -page caching.
  • -
  • Write throughput. Cassandra’s LSM model defers user writes and batches them -together, which means that throughput to the underlying medium is the most -important write metric for Cassandra.
  • -
  • Read latency (r_await). When Cassandra missed the OS page cache and reads -from SSTables, the read latency directly determines how fast Cassandra can -respond with the data.
  • -
  • Write latency. Cassandra is less sensitive to write latency except when it -syncs the commit log. This typically enters into the very high percentiles of -write latency.
  • -
-

Note that to get detailed latency breakdowns you will need a more advanced -tool such as bcc-tools.

-
-
-

OS page Cache Usage

-

As Cassandra makes heavy use of memory mapped files, the health of the -operating system’s Page Cache is -crucial to performance. Start by finding how much available cache is in the -system:

-
$ free -g
-              total        used        free      shared  buff/cache   available
-Mem:             15           9           2           0           3           5
-Swap:             0           0           0
-
-
-

In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap.

-

If you suspect that you are missing the OS page cache frequently you can use -advanced tools like cachestat or -vmtouch to dive deeper.

-
-
-

Network Latency and Reliability

-

Whenever Cassandra does writes or reads that involve other replicas, -LOCAL_QUORUM reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ping and traceroute or most -effectively mtr:

-
$ mtr -nr www.google.com
-Start: Sun Jul 22 13:10:28 2018
-HOST: hostname                     Loss%   Snt   Last   Avg  Best  Wrst StDev
-  1.|-- 192.168.1.1                0.0%    10    2.0   1.9   1.1   3.7   0.7
-  2.|-- 96.123.29.15               0.0%    10   11.4  11.0   9.0  16.4   1.9
-  3.|-- 68.86.249.21               0.0%    10   10.6  10.7   9.0  13.7   1.1
-  4.|-- 162.141.78.129             0.0%    10   11.5  10.6   9.6  12.4   0.7
-  5.|-- 162.151.78.253             0.0%    10   10.9  12.1  10.4  20.2   2.8
-  6.|-- 68.86.143.93               0.0%    10   12.4  12.6   9.9  23.1   3.8
-  7.|-- 96.112.146.18              0.0%    10   11.9  12.4  10.6  15.5   1.6
-  9.|-- 209.85.252.250             0.0%    10   13.7  13.2  12.5  13.9   0.0
- 10.|-- 108.170.242.238            0.0%    10   12.7  12.4  11.1  13.0   0.5
- 11.|-- 74.125.253.149             0.0%    10   13.4  13.7  11.8  19.2   2.1
- 12.|-- 216.239.62.40              0.0%    10   13.4  14.7  11.5  26.9   4.6
- 13.|-- 108.170.242.81             0.0%    10   14.4  13.2  10.9  16.0   1.7
- 14.|-- 72.14.239.43               0.0%    10   12.2  16.1  11.0  32.8   7.1
- 15.|-- 216.58.195.68              0.0%    10   25.1  15.3  11.1  25.1   4.8
-
-
-

In this example of mtr, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between 200ms and 3s of additional latency, so that -can be a common cause of latency issues.

-
-
-

Network Throughput

-

As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is iftop which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ccm cluster:

-
$ # remove the -t for ncurses instead of pure text
-$ sudo iftop -nNtP -i lo
-interface: lo
-IP address is: 127.0.0.1
-MAC address is: 00:00:00:00:00:00
-Listening on lo
-   # Host name (port/service if enabled)            last 2s   last 10s   last 40s cumulative
---------------------------------------------------------------------------------------------
-   1 127.0.0.1:58946                          =>      869Kb      869Kb      869Kb      217KB
-     127.0.0.3:9042                           <=         0b         0b         0b         0B
-   2 127.0.0.1:54654                          =>      736Kb      736Kb      736Kb      184KB
-     127.0.0.1:9042                           <=         0b         0b         0b         0B
-   3 127.0.0.1:51186                          =>      669Kb      669Kb      669Kb      167KB
-     127.0.0.2:9042                           <=         0b         0b         0b         0B
-   4 127.0.0.3:9042                           =>     3.30Kb     3.30Kb     3.30Kb       845B
-     127.0.0.1:58946                          <=         0b         0b         0b         0B
-   5 127.0.0.1:9042                           =>     2.79Kb     2.79Kb     2.79Kb       715B
-     127.0.0.1:54654                          <=         0b         0b         0b         0B
-   6 127.0.0.2:9042                           =>     2.54Kb     2.54Kb     2.54Kb       650B
-     127.0.0.1:51186                          <=         0b         0b         0b         0B
-   7 127.0.0.1:36894                          =>     1.65Kb     1.65Kb     1.65Kb       423B
-     127.0.0.5:7000                           <=         0b         0b         0b         0B
-   8 127.0.0.1:38034                          =>     1.50Kb     1.50Kb     1.50Kb       385B
-     127.0.0.2:7000                           <=         0b         0b         0b         0B
-   9 127.0.0.1:56324                          =>     1.50Kb     1.50Kb     1.50Kb       383B
-     127.0.0.1:7000                           <=         0b         0b         0b         0B
-  10 127.0.0.1:53044                          =>     1.43Kb     1.43Kb     1.43Kb       366B
-     127.0.0.4:7000                           <=         0b         0b         0b         0B
---------------------------------------------------------------------------------------------
-Total send rate:                                     2.25Mb     2.25Mb     2.25Mb
-Total receive rate:                                      0b         0b         0b
-Total send and receive rate:                         2.25Mb     2.25Mb     2.25Mb
---------------------------------------------------------------------------------------------
-Peak rate (sent/received/total):                     2.25Mb         0b     2.25Mb
-Cumulative (sent/received/total):                     576KB         0B      576KB
-============================================================================================
-
-
-

In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring.

-
-
-
-

Advanced tools

-

Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy.

-
-

bcc-tools

-

Most modern Linux distributions (kernels newer than 4.1) support bcc-tools for diving deep into performance problems. -First install bcc-tools, e.g. via apt on Debian:

-
$ apt install bcc-tools
-
-
-

Then you can use all the tools that bcc-tools contains. One of the most -useful tools is cachestat -(cachestat examples) -which allows you to determine exactly how many OS page cache hits and misses -are happening:

-
$ sudo /usr/share/bcc/tools/cachestat -T 1
-TIME        TOTAL   MISSES     HITS  DIRTIES   BUFFERS_MB  CACHED_MB
-18:44:08       66       66        0       64           88       4427
-18:44:09       40       40        0       75           88       4427
-18:44:10     4353       45     4308      203           88       4427
-18:44:11       84       77        7       13           88       4428
-18:44:12     2511       14     2497       14           88       4428
-18:44:13      101       98        3       18           88       4428
-18:44:14    16741        0    16741       58           88       4428
-18:44:15     1935       36     1899       18           88       4428
-18:44:16       89       34       55       18           88       4428
-
-
-

In this case there are not too many page cache MISSES which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node’s “hot” dataset. If you don’t have enough cache, MISSES will -be high and performance will be slow. If you have enough cache, MISSES will -be low and performance will be fast (as almost all reads are being served out -of memory).

-

You can also measure disk latency distributions using biolatency -(biolatency examples) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:

-
$ sudo /usr/share/bcc/tools/biolatency -D 10
-Tracing block device I/O... Hit Ctrl-C to end.
-
-
-disk = 'sda'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 12       |****************************************|
-        32 -> 63         : 9        |******************************          |
-        64 -> 127        : 1        |***                                     |
-       128 -> 255        : 3        |**********                              |
-       256 -> 511        : 7        |***********************                 |
-       512 -> 1023       : 2        |******                                  |
-
-disk = 'sdc'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 0        |                                        |
-        32 -> 63         : 0        |                                        |
-        64 -> 127        : 41       |************                            |
-       128 -> 255        : 17       |*****                                   |
-       256 -> 511        : 13       |***                                     |
-       512 -> 1023       : 2        |                                        |
-      1024 -> 2047       : 0        |                                        |
-      2048 -> 4095       : 0        |                                        |
-      4096 -> 8191       : 56       |*****************                       |
-      8192 -> 16383      : 131      |****************************************|
-     16384 -> 32767      : 9        |**                                      |
-
-
-

In this case most ios on the data drive (sdc) are fast, but many take -between 8 and 16 milliseconds.

-

Finally biosnoop (examples) -can be used to dive even deeper and see per IO latencies:

-
$ sudo /usr/share/bcc/tools/biosnoop | grep java | head
-0.000000000    java           17427  sdc     R  3972458600 4096      13.58
-0.000818000    java           17427  sdc     R  3972459408 4096       0.35
-0.007098000    java           17416  sdc     R  3972401824 4096       5.81
-0.007896000    java           17416  sdc     R  3972489960 4096       0.34
-0.008920000    java           17416  sdc     R  3972489896 4096       0.34
-0.009487000    java           17427  sdc     R  3972401880 4096       0.32
-0.010238000    java           17416  sdc     R  3972488368 4096       0.37
-0.010596000    java           17427  sdc     R  3972488376 4096       0.34
-0.011236000    java           17410  sdc     R  3972488424 4096       0.32
-0.011825000    java           17427  sdc     R  3972488576 16384      0.65
-... time passes
-8.032687000    java           18279  sdc     R  10899712  122880     3.01
-8.033175000    java           18279  sdc     R  10899952  8192       0.46
-8.073295000    java           18279  sdc     R  23384320  122880     3.01
-8.073768000    java           18279  sdc     R  23384560  8192       0.46
-
-
-

With biosnoop you see every single IO and how long they take. This data -can be used to construct the latency distributions in biolatency but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (128kb) of read_ahead_kb. To improve point read -performance you may may want to decrease read_ahead_kb on fast data volumes -such as SSDs while keeping the a higher value like 128kb value is probably -right for HDs. There are tradeoffs involved, see queue-sysfs docs for more -information, but regardless biosnoop is useful for understanding how -Cassandra uses drives.

-
-
-

vmtouch

-

Sometimes it’s useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -vmtouch.

-

First install it:

-
$ git clone https://github.com/hoytech/vmtouch.git
-$ cd vmtouch
-$ make
-
-
-

Then run it on the Cassandra data directory:

-
$ ./vmtouch /var/lib/cassandra/data/
-           Files: 312
-     Directories: 92
-  Resident Pages: 62503/64308  244M/251M  97.2%
-         Elapsed: 0.005657 seconds
-
-
-

In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn’t really matter unless reads are missing the -cache (per e.g. cachestat), in which case having -additional memory may help read performance.

-
-
-

CPU Flamegraphs

-

Cassandra often uses a lot of CPU, but telling what it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -CPU Flamegraphs -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a “compaction problem dropping -tombstones” or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -Java Flamegraphs.

-

Generally:

-
    -
  1. Enable the -XX:+PreserveFramePointer option in Cassandra’s -jvm.options configuation file. This has a negligible performance impact -but allows you actually see what Cassandra is doing.
  2. -
  3. Run perf to get some data.
  4. -
  5. Send that data through the relevant scripts in the FlameGraph toolset and -convert the data into a pretty flamegraph. View the resulting SVG image in -a browser or other image browser.
  6. -
-

For example just cloning straight off github we first install the -perf-map-agent to the location of our JVMs (assumed to be -/usr/lib/jvm):

-
$ sudo bash
-$ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
-$ cd /usr/lib/jvm
-$ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent
-$ cd perf-map-agent
-$ cmake .
-$ make
-
-
-

Now to get a flamegraph:

-
$ git clone --depth=1 https://github.com/brendangregg/FlameGraph
-$ sudo bash
-$ cd FlameGraph
-$ # Record traces of Cassandra and map symbols for all java processes
-$ perf record -F 49 -a -g -p <CASSANDRA PID> -- sleep 30; ./jmaps
-$ # Translate the data
-$ perf script > cassandra_stacks
-$ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \
-    ./flamegraph.pl --color=java --hash > cassandra_flames.svg
-
-
-

The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser.

-
-
-

Packet Capture

-

Sometimes you have to understand what queries a Cassandra node is performing -right now to troubleshoot an issue. For these times trusty packet capture -tools like tcpdump and Wireshark can be very helpful to dissect packet captures. -Wireshark even has native CQL support although it sometimes has -compatibility issues with newer Cassandra protocol releases.

-

To get a packet capture first capture some packets:

-
$ sudo tcpdump -U -s0 -i <INTERFACE> -w cassandra.pcap -n "tcp port 9042"
-
-
-

Now open it up with wireshark:

-
$ wireshark cassandra.pcap
-
-
-

If you don’t see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> Decode as -> select CQL from the -dropdown for port 9042.

-

If you don’t want to do this manually or use a GUI, you can also use something -like cqltrace to ease obtaining and -parsing CQL packet captures.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/.buildinfo b/src/doc/4.0-alpha4/.buildinfo deleted file mode 100644 index 85d42d7ad..000000000 --- a/src/doc/4.0-alpha4/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: d5f8c6bf7cfe3d297464374abc607379 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/4.0-alpha4/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml b/src/doc/4.0-alpha4/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml deleted file mode 100644 index fc5db0814..000000000 --- a/src/doc/4.0-alpha4/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Keyspace Name -keyspace: stresscql - -# The CQL for creating a keyspace (optional if it already exists) -# Would almost always be network topology unless running something locall -keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -# Table name -table: blogposts - -# The CQL for creating a table you wish to stress (optional if it already exists) -table_definition: | - CREATE TABLE blogposts ( - domain text, - published_date timeuuid, - url text, - author text, - title text, - body text, - PRIMARY KEY(domain, published_date) - ) WITH CLUSTERING ORDER BY (published_date DESC) - AND compaction = { 'class':'LeveledCompactionStrategy' } - AND comment='A table to hold blog posts' - -### Column Distribution Specifications ### - -columnspec: - - name: domain - size: gaussian(5..100) #domain names are relatively short - population: uniform(1..10M) #10M possible domains to pick from - - - name: published_date - cluster: fixed(1000) #under each domain we will have max 1000 posts - - - name: url - size: uniform(30..300) - - - name: title #titles shouldn't go beyond 200 chars - size: gaussian(10..200) - - - name: author - size: uniform(5..20) #author names should be short - - - name: body - size: gaussian(100..5000) #the body of the blog post can be long - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # Our partition key is the domain so only insert one per batch - - select: fixed(1)/1000 # We have 1000 posts per domain so 1/1000 will allow 1 post per batch - - batchtype: UNLOGGED # Unlogged batches - - -# -# A list of queries you wish to run against the schema -# -queries: - singlepost: - cql: select * from blogposts where domain = ? LIMIT 1 - fields: samerow - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow diff --git a/src/doc/4.0-alpha4/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml b/src/doc/4.0-alpha4/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml deleted file mode 100644 index 17161af27..000000000 --- a/src/doc/4.0-alpha4/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml +++ /dev/null @@ -1,44 +0,0 @@ -spacenam: example # idenitifier for this spec if running with multiple yaml files -keyspace: example - -# Would almost always be network topology unless running something locally -keyspace_definition: | - CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -table: staff_activities - -# The table under test. Start with a partition per staff member -# Is this a good idea? -table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when) - ) - -columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -insert: - # we only update a single partition in any given insert - partitions: fixed(1) - # we want to insert a single row per partition and we have between 20 and 500 - # rows per partition - select: fixed(1)/500 - batchtype: UNLOGGED # Single partition unlogged batches are essentially noops - -queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - diff --git a/src/doc/4.0-alpha4/_images/Figure_1_backups.jpg b/src/doc/4.0-alpha4/_images/Figure_1_backups.jpg deleted file mode 100644 index 160013d76..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_1_backups.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_1_data_model.jpg b/src/doc/4.0-alpha4/_images/Figure_1_data_model.jpg deleted file mode 100644 index a3b330e7a..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_1_data_model.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_1_guarantees.jpg b/src/doc/4.0-alpha4/_images/Figure_1_guarantees.jpg deleted file mode 100644 index 859342da5..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_1_guarantees.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_1_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_1_read_repair.jpg deleted file mode 100644 index d771550a4..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_1_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_2_data_model.jpg b/src/doc/4.0-alpha4/_images/Figure_2_data_model.jpg deleted file mode 100644 index 7acdeac02..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_2_data_model.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_2_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_2_read_repair.jpg deleted file mode 100644 index 29a912b49..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_2_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_3_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_3_read_repair.jpg deleted file mode 100644 index f5cc1897e..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_3_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_4_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_4_read_repair.jpg deleted file mode 100644 index 25bdb347d..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_4_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_5_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_5_read_repair.jpg deleted file mode 100644 index d9c04857f..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_5_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/Figure_6_read_repair.jpg b/src/doc/4.0-alpha4/_images/Figure_6_read_repair.jpg deleted file mode 100644 index 6bb4d1e32..000000000 Binary files a/src/doc/4.0-alpha4/_images/Figure_6_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_chebotko_logical.png b/src/doc/4.0-alpha4/_images/data_modeling_chebotko_logical.png deleted file mode 100644 index e54b5f274..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_chebotko_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_chebotko_physical.png b/src/doc/4.0-alpha4/_images/data_modeling_chebotko_physical.png deleted file mode 100644 index bfdaec552..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_chebotko_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_bucketing.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_bucketing.png deleted file mode 100644 index 8b53e38f9..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_bucketing.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_erd.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_erd.png deleted file mode 100644 index e86fe68f3..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_erd.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_logical.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_logical.png deleted file mode 100644 index e920f1248..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_physical.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_physical.png deleted file mode 100644 index 2d20a6ddb..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_queries.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_queries.png deleted file mode 100644 index 2434db39d..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_queries.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_hotel_relational.png b/src/doc/4.0-alpha4/_images/data_modeling_hotel_relational.png deleted file mode 100644 index 43e784eea..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_hotel_relational.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_reservation_logical.png b/src/doc/4.0-alpha4/_images/data_modeling_reservation_logical.png deleted file mode 100644 index 0460633b6..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_reservation_logical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/data_modeling_reservation_physical.png b/src/doc/4.0-alpha4/_images/data_modeling_reservation_physical.png deleted file mode 100644 index 1e6e76c16..000000000 Binary files a/src/doc/4.0-alpha4/_images/data_modeling_reservation_physical.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_commit.png b/src/doc/4.0-alpha4/_images/docs_commit.png deleted file mode 100644 index d90d96a88..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_commit.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_create_branch.png b/src/doc/4.0-alpha4/_images/docs_create_branch.png deleted file mode 100644 index a04cb54f3..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_create_branch.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_create_file.png b/src/doc/4.0-alpha4/_images/docs_create_file.png deleted file mode 100644 index b51e37035..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_create_file.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_editor.png b/src/doc/4.0-alpha4/_images/docs_editor.png deleted file mode 100644 index 5b9997bcc..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_editor.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_fork.png b/src/doc/4.0-alpha4/_images/docs_fork.png deleted file mode 100644 index 20a592a98..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_fork.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_pr.png b/src/doc/4.0-alpha4/_images/docs_pr.png deleted file mode 100644 index 211eb25ef..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_pr.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/docs_preview.png b/src/doc/4.0-alpha4/_images/docs_preview.png deleted file mode 100644 index 207f0ac43..000000000 Binary files a/src/doc/4.0-alpha4/_images/docs_preview.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug0.png b/src/doc/4.0-alpha4/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug1.png b/src/doc/4.0-alpha4/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug2.png b/src/doc/4.0-alpha4/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug3.png b/src/doc/4.0-alpha4/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug4.png b/src/doc/4.0-alpha4/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug5.png b/src/doc/4.0-alpha4/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/eclipse_debug6.png b/src/doc/4.0-alpha4/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/4.0-alpha4/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/example-stress-graph.png b/src/doc/4.0-alpha4/_images/example-stress-graph.png deleted file mode 100644 index a65b08b16..000000000 Binary files a/src/doc/4.0-alpha4/_images/example-stress-graph.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_images/hints.svg b/src/doc/4.0-alpha4/_images/hints.svg deleted file mode 100644 index 5e952e796..000000000 --- a/src/doc/4.0-alpha4/_images/hints.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/src/doc/4.0-alpha4/_images/ring.svg b/src/doc/4.0-alpha4/_images/ring.svg deleted file mode 100644 index d0db8c579..000000000 --- a/src/doc/4.0-alpha4/_images/ring.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - ... - diff --git a/src/doc/4.0-alpha4/_images/vnodes.svg b/src/doc/4.0-alpha4/_images/vnodes.svg deleted file mode 100644 index 71b4fa2d8..000000000 --- a/src/doc/4.0-alpha4/_images/vnodes.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/architecture/dynamo.rst.txt b/src/doc/4.0-alpha4/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index 5b17d9a7c..000000000 --- a/src/doc/4.0-alpha4/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,537 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo -====== - -Apache Cassandra relies on a number of techniques from Amazon's `Dynamo -`_ -distributed storage key-value system. Each node in the Dynamo system has three -main components: - -- Request coordination over a partitioned dataset -- Ring membership and failure detection -- A local persistence (storage) engine - -Cassandra primarily draws from the first two clustering components, -while using a storage engine based on a Log Structured Merge Tree -(`LSM `_). -In particular, Cassandra relies on Dynamo style: - -- Dataset partitioning using consistent hashing -- Multi-master replication using versioned data and tunable consistency -- Distributed cluster membership and failure detection via a gossip protocol -- Incremental scale-out on commodity hardware - -Cassandra was designed this way to meet large-scale (PiB+) business-critical -storage requirements. In particular, as applications demanded full global -replication of petabyte scale datasets along with always available low-latency -reads and writes, it became imperative to design a new kind of database model -as the relational database systems of the time struggled to meet the new -requirements of global scale applications. - -Dataset Partitioning: Consistent Hashing ----------------------------------------- - -Cassandra achieves horizontal scalability by -`partitioning `_ -all data stored in the system using a hash function. Each partition is replicated -to multiple physical nodes, often across failure domains such as racks and even -datacenters. As every replica can independently accept mutations to every key -that it owns, every key must be versioned. Unlike in the original Dynamo paper -where deterministic versions and vector clocks were used to reconcile concurrent -updates to a key, Cassandra uses a simpler last write wins model where every -mutation is timestamped (including deletes) and then the latest version of data -is the "winning" value. Formally speaking, Cassandra uses a Last-Write-Wins Element-Set -conflict-free replicated data type for each CQL row (a.k.a `LWW-Element-Set CRDT -`_) -to resolve conflicting mutations on replica sets. - - .. _consistent-hashing-token-ring: - -Consistent Hashing using a Token Ring -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra partitions data over storage nodes using a special form of hashing -called `consistent hashing `_. -In naive data hashing, you typically allocate keys to buckets by taking a hash -of the key modulo the number of buckets. For example, if you want to distribute -data to 100 nodes using naive hashing you might assign every node to a bucket -between 0 and 100, hash the input key modulo 100, and store the data on the -associated bucket. In this naive scheme, however, adding a single node might -invalidate almost all of the mappings. - -Cassandra instead maps every node to one or more tokens on a continuous hash -ring, and defines ownership by hashing a key onto the ring and then "walking" -the ring in one direction, similar to the `Chord -`_ -algorithm. The main difference of consistent hashing to naive data hashing is -that when the number of nodes (buckets) to hash into changes, consistent -hashing only has to move a small fraction of the keys. - -For example, if we have an eight node cluster with evenly spaced tokens, and -a replication factor (RF) of 3, then to find the owning nodes for a key we -first hash that key to generate a token (which is just the hash of the key), -and then we "walk" the ring in a clockwise fashion until we encounter three -distinct nodes, at which point we have found all the replicas of that key. -This example of an eight node cluster with `RF=3` can be visualized as follows: - -.. figure:: images/ring.svg - :scale: 75 % - :alt: Dynamo Ring - -You can see that in a Dynamo like system, ranges of keys, also known as **token -ranges**, map to the same physical set of nodes. In this example, all keys that -fall in the token range excluding token 1 and including token 2 (`range(t1, t2]`) -are stored on nodes 2, 3 and 4. - -Multiple Tokens per Physical Node (a.k.a. `vnodes`) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Simple single token consistent hashing works well if you have many physical -nodes to spread data over, but with evenly spaced tokens and a small number of -physical nodes, incremental scaling (adding just a few nodes of capacity) is -difficult because there are no token selections for new nodes that can leave -the ring balanced. Cassandra seeks to avoid token imbalance because uneven -token ranges lead to uneven request load. For example, in the previous example -there is no way to add a ninth token without causing imbalance; instead we -would have to insert ``8`` tokens in the midpoints of the existing ranges. - -The Dynamo paper advocates for the use of "virtual nodes" to solve this -imbalance problem. Virtual nodes solve the problem by assigning multiple -tokens in the token ring to each physical node. By allowing a single physical -node to take multiple positions in the ring, we can make small clusters look -larger and therefore even with a single physical node addition we can make it -look like we added many more nodes, effectively taking many smaller pieces of -data from more ring neighbors when we add even a single node. - -Cassandra introduces some nomenclature to handle these concepts: - -- **Token**: A single position on the `dynamo` style hash ring. -- **Endpoint**: A single physical IP and port on the network. -- **Host ID**: A unique identifier for a single "physical" node, usually - present at one `Endpoint` and containing one or more `Tokens`. -- **Virtual Node** (or **vnode**): A `Token` on the hash ring owned by the same - physical node, one with the same `Host ID`. - -The mapping of **Tokens** to **Endpoints** gives rise to the **Token Map** -where Cassandra keeps track of what ring positions map to which physical -endpoints. For example, in the following figure we can represent an eight node -cluster using only four physical nodes by assigning two tokens to every node: - -.. figure:: images/vnodes.svg - :scale: 75 % - :alt: Virtual Tokens Ring - - -Multiple tokens per physical node provide the following benefits: - -1. When a new node is added it accepts approximately equal amounts of data from - other nodes in the ring, resulting in equal distribution of data across the - cluster. -2. When a node is decommissioned, it loses data roughly equally to other members - of the ring, again keeping equal distribution of data across the cluster. -3. If a node becomes unavailable, query load (especially token aware query load), - is evenly distributed across many other nodes. - -Multiple tokens, however, can also have disadvantages: - -1. Every token introduces up to ``2 * (RF - 1)`` additional neighbors on the - token ring, which means that there are more combinations of node failures - where we lose availability for a portion of the token ring. The more tokens - you have, `the higher the probability of an outage - `_. -2. Cluster-wide maintenance operations are often slowed. For example, as the - number of tokens per node is increased, the number of discrete repair - operations the cluster must do also increases. -3. Performance of operations that span token ranges could be affected. - -Note that in Cassandra ``2.x``, the only token allocation algorithm available -was picking random tokens, which meant that to keep balance the default number -of tokens per node had to be quite high, at ``256``. This had the effect of -coupling many physical endpoints together, increasing the risk of -unavailability. That is why in ``3.x +`` the new deterministic token allocator -was added which intelligently picks tokens such that the ring is optimally -balanced while requiring a much lower number of tokens per physical node. - - -Multi-master Replication: Versioned Data and Tunable Consistency ----------------------------------------------------------------- - -Cassandra replicates every partition of data to many nodes across the cluster -to maintain high availability and durability. When a mutation occurs, the -coordinator hashes the partition key to determine the token range the data -belongs to and then replicates the mutation to the replicas of that data -according to the :ref:`Replication Strategy `. - -All replication strategies have the notion of a **replication factor** (``RF``), -which indicates to Cassandra how many copies of the partition should exist. -For example with a ``RF=3`` keyspace, the data will be written to three -distinct **replicas**. Replicas are always chosen such that they are distinct -physical nodes which is achieved by skipping virtual nodes if needed. -Replication strategies may also choose to skip nodes present in the same failure -domain such as racks or datacenters so that Cassandra clusters can tolerate -failures of whole racks and even datacenters of nodes. - -.. _replication-strategy: - -Replication Strategy -^^^^^^^^^^^^^^^^^^^^ - -Cassandra supports pluggable **replication strategies**, which determine which -physical nodes act as replicas for a given token range. Every keyspace of -data has its own replication strategy. All production deployments should use -the :ref:`network-topology-strategy` while the :ref:`simple-strategy` replication -strategy is useful only for testing clusters where you do not yet know the -datacenter layout of the cluster. - -.. _network-topology-strategy: - -``NetworkTopologyStrategy`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``NetworkTopologyStrategy`` allows a replication factor to be specified for each -datacenter in the cluster. Even if your cluster only uses a single datacenter, -``NetworkTopologyStrategy`` should be preferred over ``SimpleStrategy`` to make it -easier to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified individually by -datacenter, ``NetworkTopologyStrategy`` also attempts to choose replicas within a -datacenter from different racks as specified by the :ref:`Snitch `. If -the number of racks is greater than or equal to the replication factor for the -datacenter, each replica is guaranteed to be chosen from a different rack. -Otherwise, each rack will hold at least one replica, but some racks may hold -more than one. Note that this rack-aware behavior has some potentially -`surprising implications -`_. For example, if -there are not an even number of nodes in each rack, the data load on the -smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a brand new rack, it will be considered a replica for the entire ring. -For this reason, many operators choose to configure all nodes in a single -availability zone or similar failure domain as a single "rack". - -.. _simple-strategy: - -``SimpleStrategy`` -~~~~~~~~~~~~~~~~~~ - -``SimpleStrategy`` allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -``SimpleStrategy`` treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _transient-replication: - -Transient Replication -~~~~~~~~~~~~~~~~~~~~~ - -Transient replication is an experimental feature in Cassandra 4.0 not present -in the original Dynamo paper. It allows you to configure a subset of replicas -to only replicate data that hasn't been incrementally repaired. This allows you -to decouple data redundancy from availability. For instance, if you have a -keyspace replicated at rf 3, and alter it to rf 5 with 2 transient replicas, -you go from being able to tolerate one failed replica to being able to tolerate -two, without corresponding increase in storage usage. This is because 3 nodes -will replicate all the data for a given token range, and the other 2 will only -replicate data that hasn't been incrementally repaired. - -To use transient replication, you first need to enable it in -``cassandra.yaml``. Once enabled, both ``SimpleStrategy`` and -``NetworkTopologyStrategy`` can be configured to transiently replicate data. -You configure it by specifying replication factor as -``/` in the read path and -`Hinted handoff ` in the write path. - -These techniques are only best-effort, however, and to guarantee eventual -consistency Cassandra implements `anti-entropy repair ` where replicas -calculate hierarchical hash-trees over their datasets called `Merkle Trees -`_ that can then be compared across -replicas to identify mismatched data. Like the original Dynamo paper Cassandra -supports "full" repairs where replicas hash their entire dataset, create Merkle -trees, send them to each other and sync any ranges that don't match. - -Unlike the original Dynamo paper, Cassandra also implements sub-range repair -and incremental repair. Sub-range repair allows Cassandra to increase the -resolution of the hash trees (potentially down to the single partition level) -by creating a larger number of trees that span only a portion of the data -range. Incremental repair allows Cassandra to only repair the partitions that -have changed since the last repair. - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and -availability through **Consistency Levels**. Cassandra's consistency levels -are a version of Dynamo's ``R + W > N`` consistency mechanism where operators -could configure the number of nodes that must participate in reads (``R``) -and writes (``W``) to be larger than the replication factor (``N``). In -Cassandra, you instead choose from a menu of common consistency levels which -allow the operator to pick ``R`` and ``W`` behavior without knowing the -replication factor. Generally writes will be visible to subsequent reads when -the read consistency level contains enough nodes to guarantee a quorum intersection -with the write consistency level. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations **are always sent to all replicas**, regardless of consistency -level. The consistency level simply controls how many responses the coordinator -waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to -enough replicas to satisfy the consistency level. The one exception to this is -when speculative retry may issue a redundant read request to an extra replica -if the original replicas have not responded within a specified time window. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels such that the replica -sets overlap, resulting in all acknowledged writes being visible to subsequent -reads. This is typically expressed in the same terms Dynamo does, in that ``W + -R > RF``, where ``W`` is the write consistency level, ``R`` is the read -consistency level, and ``RF`` is the replication factor. For example, if ``RF -= 3``, a ``QUORUM`` request will require responses from at least ``2/3`` -replicas. If ``QUORUM`` is used for both writes and reads, at least one of the -replicas is guaranteed to participate in *both* the write and the read request, -which in turn guarantees that the quorums will overlap and the write will be -visible to the read. - -In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a -weaker but still useful guarantee: reads are guaranteed to see the latest write -from within the same datacenter. This is often sufficient as clients homed to -a single datacenter will read their own writes. - -If this type of strong consistency isn't required, lower consistency levels -like ``LOCAL_ONE`` or ``ONE`` may be used to improve throughput, latency, and -availability. With replication spanning multiple datacenters, ``LOCAL_ONE`` is -typically less available than ``ONE`` but is faster as a rule. Indeed ``ONE`` -will succeed if a single replica is available in any datacenter. - -Distributed Cluster Membership and Failure Detection ----------------------------------------------------- - -The replication protocols and dataset partitioning rely on knowing which nodes -are alive and dead in the cluster so that write and read operations can be -optimally routed. In Cassandra liveness information is shared in a distributed -fashion through a failure detection mechanism based on a gossip protocol. - -.. _gossip: - -Gossip -^^^^^^ - -Gossip is how Cassandra propagates basic cluster bootstrapping information such -as endpoint membership and internode network protocol versions. In Cassandra's -gossip system, nodes exchange state information not only about themselves but -also about other nodes they know about. This information is versioned with a -vector clock of ``(generation, version)`` tuples, where the generation is a -monotonic timestamp and version is a logical clock the increments roughly every -second. These logical clocks allow Cassandra gossip to ignore old versions of -cluster state just by inspecting the logical clocks presented with gossip -messages. - -Every node in the Cassandra cluster runs the gossip task independently and -periodically. Every second, every node in the cluster: - -1. Updates the local node's heartbeat state (the version) and constructs the - node's local view of the cluster gossip endpoint state. -2. Picks a random other node in the cluster to exchange gossip endpoint state - with. -3. Probabilistically attempts to gossip with any unreachable nodes (if one exists) -4. Gossips with a seed node if that didn't happen in step 2. - -When an operator first bootstraps a Cassandra cluster they designate certain -nodes as "seed" nodes. Any node can be a seed node and the only difference -between seed and non-seed nodes is seed nodes are allowed to bootstrap into the -ring without seeing any other seed nodes. Furthermore, once a cluster is -bootstrapped, seed nodes become "hotspots" for gossip due to step 4 above. - -As non-seed nodes must be able to contact at least one seed node in order to -bootstrap into the cluster, it is common to include multiple seed nodes, often -one for each rack or datacenter. Seed nodes are often chosen using existing -off-the-shelf service discovery mechanisms. - -.. note:: - Nodes do not have to agree on the seed nodes, and indeed once a cluster is - bootstrapped, newly launched nodes can be configured to use any existing - nodes as "seeds". The only advantage to picking the same nodes as seeds - is it increases their usefullness as gossip hotspots. - -Currently, gossip also propagates token metadata and schema *version* -information. This information forms the control plane for scheduling data -movements and schema pulls. For example, if a node sees a mismatch in schema -version in gossip state, it will schedule a schema sync task with the other -nodes. As token information propagates via gossip it is also the control plane -for teaching nodes which endpoints own what data. - -Ring Membership and Failure Detection -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Gossip forms the basis of ring membership, but the **failure detector** -ultimately makes decisions about if nodes are ``UP`` or ``DOWN``. Every node in -Cassandra runs a variant of the `Phi Accrual Failure Detector -`_, -in which every node is constantly making an independent decision of if their -peer nodes are available or not. This decision is primarily based on received -heartbeat state. For example, if a node does not see an increasing heartbeat -from a node for a certain amount of time, the failure detector "convicts" that -node, at which point Cassandra will stop routing reads to it (writes will -typically be written to hints). If/when the node starts heartbeating again, -Cassandra will try to reach out and connect, and if it can open communication -channels it will mark that node as available. - -.. note:: - UP and DOWN state are local node decisions and are not propagated with - gossip. Heartbeat state is propagated with gossip, but nodes will not - consider each other as "UP" until they can successfully message each other - over an actual network channel. - -Cassandra will never remove a node from gossip state without explicit -instruction from an operator via a decommission operation or a new node -bootstrapping with a ``replace_address_first_boot`` option. This choice is -intentional to allow Cassandra nodes to temporarily fail without causing data -to needlessly re-balance. This also helps to prevent simultaneous range -movements, where multiple replicas of a token range are moving at the same -time, which can violate monotonic consistency and can even cause data loss. - -Incremental Scale-out on Commodity Hardware --------------------------------------------- - -Cassandra scales-out to meet the requirements of growth in data size and -request rates. Scaling-out means adding additional nodes to the ring, and -every additional node brings linear improvements in compute and storage. In -contrast, scaling-up implies adding more capacity to the existing database -nodes. Cassandra is also capable of scale-up, and in certain environments it -may be preferable depending on the deployment. Cassandra gives operators the -flexibility to chose either scale-out or scale-up. - -One key aspect of Dynamo that Cassandra follows is to attempt to run on -commodity hardware, and many engineering choices are made under this -assumption. For example, Cassandra assumes nodes can fail at any time, -auto-tunes to make the best use of CPU and memory resources available and makes -heavy use of advanced compression and caching techniques to get the most -storage out of limited memory and storage capabilities. - -Simple Query Model -^^^^^^^^^^^^^^^^^^ - -Cassandra, like Dynamo, chooses not to provide cross-partition transactions -that are common in SQL Relational Database Management Systems (RDBMS). This -both gives the programmer a simpler read and write API, and allows Cassandra to -more easily scale horizontally since multi-partition transactions spanning -multiple nodes are notoriously difficult to implement and typically very -latent. - -Instead, Cassanda chooses to offer fast, consistent, latency at any scale for -single partition operations, allowing retrieval of entire partitions or only -subsets of partitions based on primary key filters. Furthermore, Cassandra does -support single partition compare and swap functionality via the lightweight -transaction CQL API. - -Simple Interface for Storing Records -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra, in a slight departure from Dynamo, chooses a storage interface that -is more sophisticated then "simple key value" stores but significantly less -complex than SQL relational data models. Cassandra presents a wide-column -store interface, where partitions of data contain multiple rows, each of which -contains a flexible set of individually typed columns. Every row is uniquely -identified by the partition key and one or more clustering keys, and every row -can have as many columns as needed. - -This allows users to flexibly add new columns to existing datasets as new -requirements surface. Schema changes involve only metadata changes and run -fully concurrently with live workloads. Therefore, users can safely add columns -to existing Cassandra databases while remaining confident that query -performance will not degrade. diff --git a/src/doc/4.0-alpha4/_sources/architecture/guarantees.rst.txt b/src/doc/4.0-alpha4/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index 3cff808ec..000000000 --- a/src/doc/4.0-alpha4/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,76 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _guarantees: - -Guarantees -============== -Apache Cassandra is a highly scalable and reliable database. Cassandra is used in web based applications that serve large number of clients and the quantity of data processed is web-scale (Petabyte) large. Cassandra makes some guarantees about its scalability, availability and reliability. To fully understand the inherent limitations of a storage system in an environment in which a certain level of network partition failure is to be expected and taken into account when designing the system it is important to first briefly introduce the CAP theorem. - -What is CAP? -^^^^^^^^^^^^^ -According to the CAP theorem it is not possible for a distributed data store to provide more than two of the following guarantees simultaneously. - -- Consistency: Consistency implies that every read receives the most recent write or errors out -- Availability: Availability implies that every request receives a response. It is not guaranteed that the response contains the most recent write or data. -- Partition tolerance: Partition tolerance refers to the tolerance of a storage system to failure of a network partition. Even if some of the messages are dropped or delayed the system continues to operate. - -CAP theorem implies that when using a network partition, with the inherent risk of partition failure, one has to choose between consistency and availability and both cannot be guaranteed at the same time. CAP theorem is illustrated in Figure 1. - -.. figure:: Figure_1_guarantees.jpg - -Figure 1. CAP Theorem - -High availability is a priority in web based applications and to this objective Cassandra chooses Availability and Partition Tolerance from the CAP guarantees, compromising on data Consistency to some extent. - -Cassandra makes the following guarantees. - -- High Scalability -- High Availability -- Durability -- Eventual Consistency of writes to a single table -- Lightweight transactions with linearizable consistency -- Batched writes across multiple tables are guaranteed to succeed completely or not at all -- Secondary indexes are guaranteed to be consistent with their local replicas data - -High Scalability -^^^^^^^^^^^^^^^^^ -Cassandra is a highly scalable storage system in which nodes may be added/removed as needed. Using gossip-based protocol a unified and consistent membership list is kept at each node. - -High Availability -^^^^^^^^^^^^^^^^^^^ -Cassandra guarantees high availability of data by implementing a fault-tolerant storage system. Failure detection in a node is detected using a gossip-based protocol. - -Durability -^^^^^^^^^^^^ -Cassandra guarantees data durability by using replicas. Replicas are multiple copies of a data stored on different nodes in a cluster. In a multi-datacenter environment the replicas may be stored on different datacenters. If one replica is lost due to unrecoverable node/datacenter failure the data is not completely lost as replicas are still available. - -Eventual Consistency -^^^^^^^^^^^^^^^^^^^^^^ -Meeting the requirements of performance, reliability, scalability and high availability in production Cassandra is an eventually consistent storage system. Eventually consistent implies that all updates reach all replicas eventually. Divergent versions of the same data may exist temporarily but they are eventually reconciled to a consistent state. Eventual consistency is a tradeoff to achieve high availability and it involves some read and write latencies. - -Lightweight transactions with linearizable consistency -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Data must be read and written in a sequential order. Paxos consensus protocol is used to implement lightweight transactions. Paxos protocol implements lightweight transactions that are able to handle concurrent operations using linearizable consistency. Linearizable consistency is sequential consistency with real-time constraints and it ensures transaction isolation with compare and set (CAS) transaction. With CAS replica data is compared and data that is found to be out of date is set to the most consistent value. Reads with linearizable consistency allow reading the current state of the data, which may possibly be uncommitted, without making a new addition or update. - -Batched Writes -^^^^^^^^^^^^^^^ - -The guarantee for batched writes across multiple tables is that they will eventually succeed, or none will. Batch data is first written to batchlog system data, and when the batch data has been successfully stored in the cluster the batchlog data is removed. The batch is replicated to another node to ensure the full batch completes in the event the coordinator node fails. - -Secondary Indexes -^^^^^^^^^^^^^^^^^^ -A secondary index is an index on a column and is used to query a table that is normally not queryable. Secondary indexes when built are guaranteed to be consistent with their local replicas. diff --git a/src/doc/4.0-alpha4/_sources/architecture/index.rst.txt b/src/doc/4.0-alpha4/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/4.0-alpha4/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/4.0-alpha4/_sources/architecture/overview.rst.txt b/src/doc/4.0-alpha4/_sources/architecture/overview.rst.txt deleted file mode 100644 index e5fcbe3b5..000000000 --- a/src/doc/4.0-alpha4/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,114 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _overview: - -Overview -======== - -Apache Cassandra is an open source, distributed, NoSQL database. It presents -a partitioned wide column storage model with eventually consistent semantics. - -Apache Cassandra was initially designed at `Facebook -`_ -using a staged event-driven architecture (`SEDA -`_) to implement a combination of -Amazon’s `Dynamo -`_ -distributed storage and replication techniques combined with Google's `Bigtable -`_ -data and storage engine model. Dynamo and Bigtable were both developed to meet -emerging requirements for scalable, reliable and highly available storage -systems, but each had areas that could be improved. - -Cassandra was designed as a best in class combination of both systems to meet -emerging large scale, both in data footprint and query volume, storage -requirements. As applications began to require full global replication and -always available low-latency reads and writes, it became imperative to design a -new kind of database model as the relational database systems of the time -struggled to meet the new requirements of global scale applications. - -Systems like Cassandra are designed for these challenges and seek the -following design objectives: - -- Full multi-master database replication -- Global availability at low latency -- Scaling out on commodity hardware -- Linear throughput increase with each additional processor -- Online load balancing and cluster growth -- Partitioned key-oriented queries -- Flexible schema - -Features --------- - -Cassandra provides the Cassandra Query Language (CQL), an SQL-like language, -to create and update database schema and access data. CQL allows users to -organize data within a cluster of Cassandra nodes using: - -- **Keyspace**: defines how a dataset is replicated, for example in which - datacenters and how many copies. Keyspaces contain tables. -- **Table**: defines the typed schema for a collection of partitions. Cassandra - tables have flexible addition of new columns to tables with zero downtime. - Tables contain partitions, which contain partitions, which contain columns. -- **Partition**: defines the mandatory part of the primary key all rows in - Cassandra must have. All performant queries supply the partition key in - the query. -- **Row**: contains a collection of columns identified by a unique primary key - made up of the partition key and optionally additional clustering keys. -- **Column**: A single datum with a type which belong to a row. - -CQL supports numerous advanced features over a partitioned dataset such as: - -- Single partition lightweight transactions with atomic compare and set - semantics. -- User-defined types, functions and aggregates -- Collection types including sets, maps, and lists. -- Local secondary indices -- (Experimental) materialized views - -Cassandra explicitly chooses not to implement operations that require cross -partition coordination as they are typically slow and hard to provide highly -available global semantics. For example Cassandra does not support: - -- Cross partition transactions -- Distributed joins -- Foreign keys or referential integrity. - -Operating ---------- - -Apache Cassandra configuration settings are configured in the ``cassandra.yaml`` -file that can be edited by hand or with the aid of configuration management tools. -Some settings can be manipulated live using an online interface, but others -require a restart of the database to take effect. - -Cassandra provides tools for managing a cluster. The ``nodetool`` command -interacts with Cassandra's live control interface, allowing runtime manipulation -of many settings from ``cassandra.yaml``. The ``auditlogviewer`` is used -to view the audit logs. The ``fqltool`` is used to view, replay and compare -full query logs. The ``auditlogviewer`` and ``fqltool`` are new tools in -Apache Cassandra 4.0. - -In addition, Cassandra supports out of the box atomic snapshot functionality, -which presents a point in time snapshot of Cassandra's data for easy -integration with many backup tools. Cassandra also supports incremental backups -where data can be backed up as it is written. - -Apache Cassandra 4.0 has added several new features including virtual tables. -transient replication, audit logging, full query logging, and support for Java -11. Two of these features are experimental: transient replication and Java 11 -support. diff --git a/src/doc/4.0-alpha4/_sources/architecture/storage_engine.rst.txt b/src/doc/4.0-alpha4/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index 23b738de7..000000000 --- a/src/doc/4.0-alpha4/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables. - -All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the "commitlog_segment_size_in_mb" option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running "nodetool drain" before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup. - -- ``commitlog_segment_size_in_mb``: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. - -***NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*** - -*Default Value:* 32 - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied. - -- ``commitlog_sync``: may be either “periodic” or “batch.” - - - ``batch``: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait "commitlog_sync_batch_window_in_ms" milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason. - - - ``commitlog_sync_batch_window_in_ms``: Time to wait between "batch" fsyncs - *Default Value:* 2 - - - ``periodic``: In periodic mode, writes are immediately ack'ed, and the CommitLog is simply synced every "commitlog_sync_period_in_ms" milliseconds. - - - ``commitlog_sync_period_in_ms``: Time to wait between "periodic" fsyncs - *Default Value:* 10000 - -*Default Value:* batch - -*** NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using "batch" mode, it is recommended to store commitlogs in a separate, dedicated device.** - - -- ``commitlog_directory``: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -- ``commitlog_compression``: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported. - -(Default Value: (complex option):: - - # - class_name: LZ4Compressor - # parameters: - # - - -- ``commitlog_total_space_in_mb``: Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume. - -*Default Value:* 8192 - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. - -SSTable Versions -^^^^^^^^^^^^^^^^ - -This section was created using the following -`gist `_ -which utilized this original -`source `_. - -The version numbers, to date are: - -Version 0 -~~~~~~~~~ - -* b (0.7.0): added version to sstable filenames -* c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings -* d (0.7.0): row size in data component becomes a long instead of int -* e (0.7.0): stores undecorated keys in data and index components -* f (0.7.0): switched bloom filter implementations in data component -* g (0.8): tracks flushed-at context in metadata component - -Version 1 -~~~~~~~~~ - -* h (1.0): tracks max client timestamp in metadata component -* hb (1.0.3): records compression ration in metadata component -* hc (1.0.4): records partitioner in metadata component -* hd (1.0.10): includes row tombstones in maxtimestamp -* he (1.1.3): includes ancestors generation in metadata component -* hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) -* ia (1.2.0): - - * column indexes are promoted to the index file - * records estimated histogram of deletion times in tombstones - * bloom filter (keys and columns) upgraded to Murmur3 -* ib (1.2.1): tracks min client timestamp in metadata component -* ic (1.2.5): omits per-row bloom filter of column names - -Version 2 -~~~~~~~~~ - -* ja (2.0.0): - - * super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format) - * tracks max local deletiontime in sstable metadata - * records bloom_filter_fp_chance in metadata component - * remove data size and column count from data file (CASSANDRA-4180) - * tracks max/min column values (according to comparator) -* jb (2.0.1): - - * switch from crc32 to adler32 for compression checksums - * checksum the compressed data -* ka (2.1.0): - - * new Statistics.db file format - * index summaries can be downsampled and the sampling level is persisted - * switch uncompressed checksums to adler32 - * tracks presense of legacy (local and remote) counter shards -* la (2.2.0): new file name format -* lb (2.2.7): commit log lower bound included - -Version 3 -~~~~~~~~~ - -* ma (3.0.0): - - * swap bf hash order - * store rows natively -* mb (3.0.7, 3.7): commit log lower bound included -* mc (3.0.8, 3.9): commit log intervals included - -Example Code -~~~~~~~~~~~~ - -The following example is useful for finding all sstables that do not match the "ib" SSTable version - -.. code-block:: bash - - find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots" diff --git a/src/doc/4.0-alpha4/_sources/bugs.rst.txt b/src/doc/4.0-alpha4/_sources/bugs.rst.txt deleted file mode 100644 index 32d676f9d..000000000 --- a/src/doc/4.0-alpha4/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs -============== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``cassandra`` :ref:`Slack room `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/4.0-alpha4/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/4.0-alpha4/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index 5ea6d3fe7..000000000 --- a/src/doc/4.0-alpha4/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,2050 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -Replica factor is determined via the replication strategy used by the specified -keyspace. - -*Default Value:* KEYSPACE - -``allocate_tokens_for_local_replication_factor`` ------------------------------------------------- -*This option is commented out by default.* - -Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS. - -*Default Value:* 3 - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``network_authorizer`` ----------------------- - -Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}. - -- AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization. -- CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllNetworkAuthorizer - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using. - -The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down the node and kill the JVM, so the node can be replaced. - -stop - shut down the node, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic", "group", or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed. - - -*Default Value:* 2 - -``commitlog_sync_group_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes. - - -*Default Value:* 1000 - -``commitlog_sync`` ------------------- - -the default option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``periodic_commitlog_sync_lag_block_in_ms`` -------------------------------------------- -*This option is commented out by default.* - -When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete. - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``seed_provider`` ------------------ -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1:7000" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_space_in_mb`` ------------------------------- -*This option is commented out by default.* - -Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_frame_block_size_in_kb`` -------------------------------------------- -*This option is commented out by default.* - -If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed. - -*Default Value:* 32 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_allow_older_protocols`` ------------------------------------------- - -Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored. - -*Default Value:* true - -``native_transport_idle_timeout_in_ms`` ---------------------------------------- -*This option is commented out by default.* - -Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period. - -Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side. - -Idle connection timeouts are disabled by default. - -*Default Value:* 60000 - -``rpc_address`` ---------------- - -The address or interface to bind the native transport server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``concurrent_validations`` --------------------------- -*This option is commented out by default.* - -Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default) - -*Default Value:* 0 - -``concurrent_materialized_view_builders`` ------------------------------------------ - -Number of simultaneous materialized view builder tasks to allow. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_entire_sstables`` --------------------------- -*This option is commented out by default.* - -When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696. - -*Default Value:* true - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms. - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms. - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``internode_application_send_queue_capacity_in_bytes`` ------------------------------------------------------- -*This option is commented out by default.* - -Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details. - -The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000 - -The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000 - -The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000 - -Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received. - -The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth. - -The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster. - - -*Default Value:* 4194304 #4MiB - -``internode_application_send_queue_reserve_endpoint_capacity_in_bytes`` ------------------------------------------------------------------------ -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_send_queue_reserve_global_capacity_in_bytes`` ---------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``internode_application_receive_queue_capacity_in_bytes`` ---------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 4194304 #4MiB - -``internode_application_receive_queue_reserve_endpoint_capacity_in_bytes`` --------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_receive_queue_reserve_global_capacity_in_bytes`` ------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``slow_query_log_timeout_in_ms`` --------------------------------- - - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- -*This option is commented out by default.* - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: It is generally assumed that users have setup NTP on their clusters, and that clocks are modestly in sync, -since this is a requirement for general correctness of last write wins. - -*Default Value:* true - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``streaming_connections_per_host`` ----------------------------------- -*This option is commented out by default.* - -Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files). - -*Default Value:* 1 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html - -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - # set to true for allowing secure incoming connections - enabled: false - # If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port - optional: false - # if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used - # during upgrade to 4.0; otherwise, set to false. - enable_legacy_ssl_storage_port: false - # on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true. - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client-to-server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``gc_warn_threshold_in_ms`` ---------------------------- -*This option is commented out by default.* - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature. - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``ideal_consistency_level`` ---------------------------- -*This option is commented out by default.* - -Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability. - -*Default Value:* EACH_QUORUM - -``automatic_sstable_upgrade`` ------------------------------ -*This option is commented out by default.* - -Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version - -*Default Value:* false - -``max_concurrent_automatic_sstable_upgrades`` ---------------------------------------------- -*This option is commented out by default.* -Limit the number of concurrent sstable upgrades - -*Default Value:* 1 - -``audit_logging_options`` -------------------------- - -Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options. - -``full_query_logging_options`` ------------------------------- -*This option is commented out by default.* - - -default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog - -``corrupted_tombstone_strategy`` --------------------------------- -*This option is commented out by default.* - -validate tombstones on reads and compaction -can be either "disabled", "warn" or "exception" - -*Default Value:* disabled - -``diagnostic_events_enabled`` ------------------------------ - -Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX. - -*Default Value:* false - -``native_transport_flush_in_batches_legacy`` --------------------------------------------- -*This option is commented out by default.* - -Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating. - -*Default Value:* false - -``repaired_data_tracking_for_range_reads_enabled`` --------------------------------------------------- - -Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads - -*Default Value:* false - -``repaired_data_tracking_for_partition_reads_enabled`` ------------------------------------------------------- - -*Default Value:* false - -``report_unconfirmed_repaired_data_mismatches`` ------------------------------------------------ -If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_transient_replication`` --------------------------------- - -Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use. - -*Default Value:* false diff --git a/src/doc/4.0-alpha4/_sources/configuration/index.rst.txt b/src/doc/4.0-alpha4/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/4.0-alpha4/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/4.0-alpha4/_sources/contactus.rst.txt b/src/doc/4.0-alpha4/_sources/contactus.rst.txt deleted file mode 100644 index 3ed9004dd..000000000 --- a/src/doc/4.0-alpha4/_sources/contactus.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or :ref:`Slack rooms `. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _slack: - -Slack ------ -To chat with developers or users in real-time, join our rooms on `ASF Slack `__: - -- ``cassandra`` - for user questions and general discussions. -- ``cassandra-dev`` - strictly for questions or discussions related to Cassandra development. - diff --git a/src/doc/4.0-alpha4/_sources/cql/appendices.rst.txt b/src/doc/4.0-alpha4/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/4.0-alpha4/_sources/cql/changes.rst.txt b/src/doc/4.0-alpha4/_sources/cql/changes.rst.txt deleted file mode 100644 index 6691f156a..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,211 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.5 -^^^^^ - -- Adds support for arithmetic operators (:jira:`11935`) -- Adds support for ``+`` and ``-`` operations on dates (:jira:`11936`) -- Adds ``currentTimestamp``, ``currentDate``, ``currentTime`` and ``currentTimeUUID`` functions (:jira:`13132`) - - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/4.0-alpha4/_sources/cql/ddl.rst.txt b/src/doc/4.0-alpha4/_sources/cql/ddl.rst.txt deleted file mode 100644 index 88df05b4c..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,852 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -.. _replication-strategy: - -``SimpleStrategy`` -"""""""""""""""""" - -A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -``NetworkTopologyStrategy``. ``SimpleStrategy`` supports a single mandatory argument: - -========================= ====== ======= ============================================= -sub-option type since description -========================= ====== ======= ============================================= -``'replication_factor'`` int all The number of replicas to store per range -========================= ====== ======= ============================================= - -``NetworkTopologyStrategy`` -""""""""""""""""""""""""""" - -A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options: - -===================================== ====== ====== ============================================= -sub-option type since description -===================================== ====== ====== ============================================= -``''`` int all The number of replicas to store per range in - the provided datacenter. -``'replication_factor'`` int 4.0 The number of replicas to use as a default - per datacenter if not specifically provided. - Note that this always defers to existing - definitions or explicit datacenter settings. - For example, to have three replicas per - datacenter, supply this with a value of 3. -===================================== ====== ====== ============================================= - -Note that when ``ALTER`` ing keyspaces and supplying ``replication_factor``, -auto-expansion will only *add* new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying ``replication_factor``, -explicitly zero out the datacenter you want to have zero replicas. - -An example of auto-expanding datacenters with two datacenters: ``DC1`` and ``DC2``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true; - - -An example of auto-expanding and overriding a datacenter:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true; - -An example that excludes a datacenter while using ``replication_factor``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ; - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true; - -If transient replication has been enabled, transient replicas can be configured for both -``SimpleStrategy`` and ``NetworkTopologyStrategy`` by defining replication factors in the format ``'/'`` - -For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:: - - CREATE KEYSPACE some_keysopace - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'}; - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records'; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, b, c) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -| ``speculative_retry`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``cdc`` | *boolean*| false | Create a Change Data Capture (CDC) log on the table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``additional_write_policy`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``memtable_flush_period_in_ms``| *simple* | 0 | Time (in ms) before Cassandra flushes memtables to disk. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair`` | *simple* | BLOCKING | Sets read repair behavior (see below) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _speculative-retry-options: - -Speculative retry options -######################### - -By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ``ONE``, a quorum for ``QUORUM``, and so on. -``speculative_retry`` determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. Speculative retries are used to reduce the latency. The speculative_retry option may be -used to configure rapid read protection with which a coordinator sends more requests than needed to satisfy the Consistency level. - -Pre-4.0 speculative Retry Policy takes a single string as a parameter, this can be ``NONE``, ``ALWAYS``, ``99PERCENTILE`` (PERCENTILE), ``50MS`` (CUSTOM). - -Examples of setting speculative retry are: - -:: - - ALTER TABLE users WITH speculative_retry = '10ms'; - - -Or, - -:: - - ALTER TABLE users WITH speculative_retry = '99PERCENTILE'; - -The problem with these settings is when a single host goes into an unavailable state this drags up the percentiles. This means if we -are set to use ``p99`` alone, we might not speculate when we intended to to because the value at the specified percentile has gone so high. -As a fix 4.0 adds support for hybrid ``MIN()``, ``MAX()`` speculative retry policies (`CASSANDRA-14293 -`_). This means if the normal ``p99`` for the -table is <50ms, we will still speculate at this value and not drag the tail latencies up... but if the ``p99th`` goes above what we know we -should never exceed we use that instead. - -In 4.0 the values (case-insensitive) discussed in the following table are supported: - -============================ ======================== ============================================================================= - Format Example Description -============================ ======================== ============================================================================= - ``XPERCENTILE`` 90.5PERCENTILE Coordinators record average per-table response times for all replicas. - If a replica takes longer than ``X`` percent of this table's average - response time, the coordinator queries an additional replica. - ``X`` must be between 0 and 100. - ``XP`` 90.5P Synonym for ``XPERCENTILE`` - ``Yms`` 25ms If a replica takes more than ``Y`` milliseconds to respond, - the coordinator queries an additional replica. - ``MIN(XPERCENTILE,YMS)`` MIN(99PERCENTILE,35MS) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is lower at the time of calculation. - Parameters are ``XPERCENTILE``, ``XP``, or ``Yms``. - This is helpful to help protect against a single slow instance; in the - happy case the 99th percentile is normally lower than the specified - fixed value however, a slow host may skew the percentile very high - meaning the slower the cluster gets, the higher the value of the percentile, - and the higher the calculated time used to determine if we should - speculate or not. This allows us to set an upper limit that we want to - speculate at, but avoid skewing the tail latencies by speculating at the - lower value when the percentile is less than the specified fixed upper bound. - ``MAX(XPERCENTILE,YMS)`` MAX(90.5P,25ms) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is higher at the time of calculation. - ``ALWAYS`` Coordinators always query all replicas. - ``NEVER`` Coordinators never query additional replicas. -============================ =================== ============================================================================= - -As of version 4.0 speculative retry allows more friendly params (`CASSANDRA-13876 -`_). The ``speculative_retry`` is more flexible with case. As an example a -value does not have to be ``NONE``, and the following are supported alternatives. - -:: - - alter table users WITH speculative_retry = 'none'; - alter table users WITH speculative_retry = 'None'; - -The text component is case insensitive and for ``nPERCENTILE`` version 4.0 allows ``nP``, for instance ``99p``. -In a hybrid value for speculative retry, one of the two values must be a fixed millisecond value and the other a percentile value. - -Some examples: - -:: - - min(99percentile,50ms) - max(99p,50MS) - MAX(99P,50ms) - MIN(99.9PERCENTILE,50ms) - max(90percentile,100MS) - MAX(100.0PERCENTILE,60ms) - -Two values of the same kind cannot be specified such as ``min(90percentile,99percentile)`` as it wouldn’t be a hybrid value. -This setting does not affect reads with consistency level ``ALL`` because they already query all replicas. - -Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default ``99PERCENTILE``. - - -``additional_write_policy`` specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas. - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). The default is ``'SizeTieredCompactionStrategy'``. Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. Compression is configured on a per-table -basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor, DeflateCompressor and ZstdCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - - ``enabled`` true Enable/disable sstable compression. If the ``enabled`` option is set to ``false`` no other - options must be specified. - - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read. The default value is an optimal value for compressing tables. Chunk length must - be a power of 2 because so is assumed so when computing the chunk number from an uncompressed - file offset. Block size may be adjusted based on read/write access patterns such as: - - - How much data is typically requested at once - - Average size of rows in the table - - ``crc_check_chance`` 1.0 Determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. - - ``compression_level`` 3 Compression level. It is only applicable for ``ZstdCompressor`` and accepts values between - ``-131072`` and ``22``. -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -Caching optimizes the use of cache memory of a table. The cached data is weighed by size and access frequency. The ``caching`` -options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Read Repair options -################### - -The ``read_repair`` options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back - in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of - replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. - Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement - is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it - is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a - batch, but then select a single row by specifying the clustering column in a SELECT statement. - -The available read repair settings are: - -Blocking -```````` -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity - -None -```` - -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table'; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/4.0-alpha4/_sources/cql/definitions.rst.txt b/src/doc/4.0-alpha4/_sources/cql/definitions.rst.txt deleted file mode 100644 index 3df6f2099..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,234 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `arithmetic_operation` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - arithmetic_operation: '-' `term` | `term` ('+' | '-' | '*' | '/' | '%') `term` - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- An arithmetic operation between terms. see :ref:`the section on arithmetic operations ` -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/4.0-alpha4/_sources/cql/dml.rst.txt b/src/doc/4.0-alpha4/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/4.0-alpha4/_sources/cql/functions.rst.txt b/src/doc/4.0-alpha4/_sources/cql/functions.rst.txt deleted file mode 100644 index 965125a79..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,581 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``currentTimeUUID`` is an alias of ``now``. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Datetime functions -`````````````````` - -Retrieving the current date/time -################################ - -The following functions can be used to retrieve the date/time at the time where the function is invoked: - -===================== =============== - Function name Output type -===================== =============== - ``currentTimestamp`` ``timestamp`` - ``currentDate`` ``date`` - ``currentTime`` ``time`` - ``currentTimeUUID`` ``timeUUID`` -===================== =============== - -For example the last 2 days of data can be retrieved using:: - - SELECT * FROM myTable WHERE date >= currentDate() - 2d - -Time conversion functions -######################### - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/4.0-alpha4/_sources/cql/index.rst.txt b/src/doc/4.0-alpha4/_sources/cql/index.rst.txt deleted file mode 100644 index b4c21cf6c..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - operators - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/4.0-alpha4/_sources/cql/indexes.rst.txt b/src/doc/4.0-alpha4/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/4.0-alpha4/_sources/cql/json.rst.txt b/src/doc/4.0-alpha4/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/4.0-alpha4/_sources/cql/mvs.rst.txt b/src/doc/4.0-alpha4/_sources/cql/mvs.rst.txt deleted file mode 100644 index 200090a60..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. note:: By default, materialized views are built in a single thread. The initial build can be parallelized by - increasing the number of threads specified by the property ``concurrent_materialized_view_builders`` in - ``cassandra.yaml``. This property can also be manipulated at runtime through both JMX and the - ``setconcurrentviewbuilders`` and ``getconcurrentviewbuilders`` nodetool commands. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. - -MV Limitations -``````````````` - -.. Note:: Removal of columns not selected in the Materialized View (via ``UPDATE base SET unselected_column = null`` or - ``DELETE unselected_column FROM base``) may shadow missed updates to other columns received by hints or repair. - For this reason, we advise against doing deletions on base columns not selected in views until this is - fixed on CASSANDRA-13826. diff --git a/src/doc/4.0-alpha4/_sources/cql/operators.rst.txt b/src/doc/4.0-alpha4/_sources/cql/operators.rst.txt deleted file mode 100644 index 1faf0d045..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/operators.rst.txt +++ /dev/null @@ -1,74 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _arithmetic_operators: - -Arithmetic Operators --------------------- - -CQL supports the following operators: - -=============== ======================================================================================================= - Operator Description -=============== ======================================================================================================= - \- (unary) Negates operand - \+ Addition - \- Substraction - \* Multiplication - / Division - % Returns the remainder of a division -=============== ======================================================================================================= - -.. _number-arithmetic: - -Number Arithmetic -^^^^^^^^^^^^^^^^^ - -All arithmetic operations are supported on numeric types or counters. - -The return type of the operation will be based on the operand types: - -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - left/right tinyint smallint int bigint counter float double varint decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - **tinyint** tinyint smallint int bigint bigint float double varint decimal - **smallint** smallint smallint int bigint bigint float double varint decimal - **int** int int int bigint bigint float double varint decimal - **bigint** bigint bigint bigint bigint bigint double double varint decimal - **counter** bigint bigint bigint bigint bigint double double varint decimal - **float** float float float double double float double decimal decimal - **double** double double double double double double double decimal decimal - **varint** varint varint varint decimal decimal decimal decimal decimal decimal - **decimal** decimal decimal decimal decimal decimal decimal decimal decimal decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - -``*``, ``/`` and ``%`` operators have a higher precedence level than ``+`` and ``-`` operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression. - -.. _datetime--arithmetic: - -Datetime Arithmetic -^^^^^^^^^^^^^^^^^^^ - -A ``duration`` can be added (+) or substracted (-) from a ``timestamp`` or a ``date`` to create a new -``timestamp`` or ``date``. So for instance:: - - SELECT * FROM myTable WHERE t = '2017-01-01' - 2d - -will select all the records with a value of ``t`` which is in the last 2 days of 2016. diff --git a/src/doc/4.0-alpha4/_sources/cql/security.rst.txt b/src/doc/4.0-alpha4/_sources/cql/security.rst.txt deleted file mode 100644 index 429a1ef0d..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/security.rst.txt +++ /dev/null @@ -1,538 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - :| ACCESS TO DATACENTERS `set_literal` - :| ACCESS TO ALL DATACENTERS - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'}; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ``ACCESS TO ALL DATACENTERS`` can be used for -explicitness, but there's no functional difference. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ``ACCESS TO ALL DATACENTERS`` clause. - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. note:: DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain - connected and will retain the ability to perform any database actions which do not require :ref:`authorization`. - However, if authorization is enabled, :ref:`permissions` of the dropped role are also revoked, - subject to the :ref:`caching options` configured in :ref:`cassandra.yaml`. - Should a dropped role be subsequently recreated and have new :ref:`permissions` or - :ref:`roles` granted to it, any client sessions still connected will acquire the newly granted - permissions and roles. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -Because of their function in normal driver operations, certain tables cannot have their `SELECT` permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:: - -* `system_schema.keyspaces` -* `system_schema.columns` -* `system_schema.tables` -* `system.local` -* `system.peers` - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/4.0-alpha4/_sources/cql/triggers.rst.txt b/src/doc/4.0-alpha4/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/4.0-alpha4/_sources/cql/types.rst.txt b/src/doc/4.0-alpha4/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/4.0-alpha4/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_conceptual.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_conceptual.rst.txt deleted file mode 100644 index 8749b799e..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_conceptual.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. conceptual_data_modeling - -Conceptual Data Modeling -^^^^^^^^^^^^^^^^^^^^^^^^ - -First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra. - -Let's use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about. - -For example, let's use a domain that is easily understood and that -everyone can relate to: making hotel reservations. - -The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances. - -The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection. - -.. image:: images/data_modeling_hotel_erd.png - -Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_logical.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_logical.rst.txt deleted file mode 100644 index 27fa4beb7..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_logical.rst.txt +++ /dev/null @@ -1,219 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Logical Data Modeling -===================== - -Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model. - -To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with ``_by_``. For example, ``hotels_by_poi``. - -Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering. - -The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads. - -Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static. - -Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models. - -Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model. - -.. image:: images/data_modeling_chebotko_logical.png - -Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as **K** for partition key -columns and **C**\ ↑ or **C**\ ↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support. - -Hotel Logical Data Model ------------------------- - -The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you'll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access. - -.. image:: images/data_modeling_hotel_logical.png - -Let’s explore the details of each of these tables. - -The first query Q1 is to find hotels near a point of interest, so you’ll -call this table ``hotels_by_poi``. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search. - -You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column. - -An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data. - -Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the ``hotels_by_poi`` table, but you added -only those attributes that were required by the application workflow. - -From the workflow diagram, you know that the ``hotels_by_poi`` table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -``hotel_id`` from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called ``hotels``. - -Another option would have been to store a set of ``poi_names`` in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application. - -Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -``pois_by_hotel`` table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness. - -At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the ``hotel_id`` as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the ``available_rooms_by_hotel_date`` table. - -To support searching over a range, use :ref:`clustering columns -` to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important. - -The design of the ``available_rooms_by_hotel_date`` table is an instance -of the **wide partition** pattern. This -pattern is sometimes called the **wide row** pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query. - -In order to round out the shopping portion of the data model, add the -``amenities_by_room`` table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates. - -Reservation Logical Data Model ------------------------------- - -Now let's switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys. - -.. image:: images/data_modeling_reservation_logical.png - -In order to satisfy Q6, the ``reservations_by_guest`` table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well. - -Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on. - -The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date. - -Finally, you create a ``guests`` table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the ``guests`` table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example. - - -Patterns and Anti-Patterns --------------------------- - -As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern. - -The **time series** pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments. - -The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application. - -One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now :ref:`tombstones ` that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance. - -The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_physical.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_physical.rst.txt deleted file mode 100644 index 758400496..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_physical.rst.txt +++ /dev/null @@ -1,117 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Physical Data Modeling -====================== - -Once you have a logical data model defined, creating the physical model -is a relatively simple process. - -You walk through each of the logical model tables, assigning types to -each item. You can use any valid :ref:`CQL data type `, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design. - -After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let's cover the data -modeling process in more detail by working through an example. - -Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table. - -.. image:: images/data_modeling_chebotko_physical.png - -The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern. - -Hotel Physical Data Model -------------------------- - -Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -``hotel`` keyspace to contain tables for hotel and availability -data, and a ``reservation`` keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns. - -For the ``hotels`` table, use Cassandra’s ``text`` type to -represent the hotel’s ``id``. For the address, create an -``address`` user defined type. Use the ``text`` type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries. - -While it would make sense to use the ``uuid`` type for attributes such -as the ``hotel_id``, this document uses mostly ``text`` attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like "AZ123" or "NY229". This example uses these values -for ``hotel_ids``, while acknowledging they are not necessarily globally -unique. - -You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these ``uuids`` as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type. - -As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure: - -.. image:: images/data_modeling_hotel_physical.png - -Note that the ``address`` type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the ``hotels`` and ``hotels_by_poi`` tables. - -User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the ``address`` -user-defined type. This can reduce complexity in the design. - -Remember that the scope of a UDT is the keyspace in which it is defined. -To use ``address`` in the ``reservation`` keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design. - -Reservation Physical Data Model -------------------------------- - -Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you're going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature. - -.. image:: images/data_modeling_reservation_physical.png - -Note that the ``address`` type is reproduced in this keyspace and -``guest_id`` is modeled as a ``uuid`` type in all of the tables. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_queries.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_queries.rst.txt deleted file mode 100644 index d0119944f..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_queries.rst.txt +++ /dev/null @@ -1,85 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Defining Application Queries -============================ - -Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following: - -- Q1. Find hotels near a given point of interest. - -- Q2. Find information about a given hotel, such as its name and - location. - -- Q3. Find points of interest near a given hotel. - -- Q4. Find an available room in a given date range. - -- Q5. Find the rate and amenities for a room. - -It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example. - -Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases. - -You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations: - -- Q6. Lookup a reservation by confirmation number. - -- Q7. Lookup a reservation by hotel, date, and guest name. - -- Q8. Lookup all reservations by guest name. - -- Q9. View guest details. - -All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries. - -.. image:: images/data_modeling_hotel_queries.png - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_rdbms.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_rdbms.rst.txt deleted file mode 100644 index 7d67d69fc..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_rdbms.rst.txt +++ /dev/null @@ -1,171 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -RDBMS Design -============ - -When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables. - -The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation). - -.. image:: images/data_modeling_hotel_relational.png - -.. design_differences_between_rdbms_and_cassandra - -Design Differences Between RDBMS and Cassandra -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database. - -No joins -~~~~~~~~ - -You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead. - -No referential integrity -~~~~~~~~~~~~~~~~~~~~~~~~ - -Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available. - -Denormalization -~~~~~~~~~~~~~~~ - -In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances. - -A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems. - -In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it. - -Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as :ref:`materialized views ` -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table. - -Query-first design -~~~~~~~~~~~~~~~~~~ - -Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true. - -By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them. - -Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS. - -Designing for optimal storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table. - -A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance. - -Sorting is a design decision -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In an RDBMS, you can easily change the order in which records are -returned to you by using ``ORDER BY`` in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns. - -In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the ``CREATE TABLE`` command. The CQL ``SELECT`` statement does support -``ORDER BY`` semantics, but only in the order specified by the -clustering columns. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_refining.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_refining.rst.txt deleted file mode 100644 index 13a276ed7..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_refining.rst.txt +++ /dev/null @@ -1,218 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. role:: raw-latex(raw) - :format: latex -.. - -Evaluating and Refining Data Models -=================================== - -Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance. - -Calculating Partition Size --------------------------- - -The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit. - -In order to calculate the size of partitions, use the following -formula: - -.. math:: N_v = N_r (N_c - N_{pk} - N_s) + N_s - -The number of values (or cells) in the partition (N\ :sub:`v`) is equal to -the number of static columns (N\ :sub:`s`) plus the product of the number -of rows (N\ :sub:`r`) and the number of of values per row. The number of -values per row is defined as the number of columns (N\ :sub:`c`) minus the -number of primary key columns (N\ :sub:`pk`) and static columns -(N\ :sub:`s`). - -The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast. - -Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the ``available_rooms_by_hotel_date`` table. The table has -four columns total (N\ :sub:`c` = 4), including three primary key columns -(N\ :sub:`pk` = 3) and no static columns (N\ :sub:`s` = 0). Plugging these -values into the formula, the result is: - -.. math:: N_v = N_r (4 - 3 - 0) + 0 = 1N_r - -Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let's assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel. - -Since there is a partition for each hotel, the estimated number of rows -per partition is as follows: - -.. math:: N_r = 100 rooms/hotel \times 730 days = 73,000 rows - -This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you'll see how to do shortly. - -When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems. - -Calculating Size on Disk ------------------------- - -In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -S\ :sub:`t` of a partition: - -.. math:: S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) + - -.. math:: N_v\times sizeOf\big(t_{avg}\big) - -This is a bit more complex than the previous formula, but let's break it -down a bit at a time. Let’s take a look at the notation first: - -- In this formula, c\ :sub:`k` refers to partition key columns, - c\ :sub:`s` to static columns, c\ :sub:`r` to regular columns, and - c\ :sub:`c` to clustering columns. - -- The term t\ :sub:`avg` refers to the average number of bytes of - metadata stored per cell, such as timestamps. It is typical to use an - estimate of 8 bytes for this value. - -- You'll recognize the number of rows N\ :sub:`r` and number of values - N\ :sub:`v` from previous calculations. - -- The **sizeOf()** function refers to the size in bytes of the CQL data - type of each referenced column. - -The first term asks you to sum the size of the partition key columns. For -this example, the ``available_rooms_by_hotel_date`` table has a single -partition key column, the ``hotel_id``, which is of type -``text``. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes. - -The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes. - -The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the ``date``, which is 4 bytes, and the ``room_number``, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean ``is_available``, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB). - -The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB. - -Adding these terms together, you get a final estimate: - -.. math:: Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB - -This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage. - -Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size. - -Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster. - -Breaking Up Large Partitions ----------------------------- - -As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both. - -The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic. - -Continuing to examine the available rooms example, if you add the ``date`` -column to the partition key for the ``available_rooms_by_hotel_date`` -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes. - -Another technique known as **bucketing** is often used to break the data -into moderate-size partitions. For example, you could bucketize the -``available_rooms_by_hotel_date`` table by adding a ``month`` column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -``month`` column is partially duplicative of the ``date``, it provides -a nice way of grouping related data in a partition that will not get -too large. - -.. image:: images/data_modeling_hotel_bucketing.png - -If you really felt strongly about preserving a wide partition design, you -could instead add the ``room_id`` to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_schema.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_schema.rst.txt deleted file mode 100644 index 1876ec3fa..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_schema.rst.txt +++ /dev/null @@ -1,144 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Defining Database Schema -======================== - -Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -``hotel`` keyspace, using CQL’s comment feature to document the query -pattern supported by each table:: - - CREATE KEYSPACE hotel WITH replication = - {‘class’: ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE hotel.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE hotel.hotels_by_poi ( - poi_name text, - hotel_id text, - name text, - phone text, - address frozen
, - PRIMARY KEY ((poi_name), hotel_id) ) - WITH comment = ‘Q1. Find hotels near given poi’ - AND CLUSTERING ORDER BY (hotel_id ASC) ; - - CREATE TABLE hotel.hotels ( - id text PRIMARY KEY, - name text, - phone text, - address frozen
, - pois set ) - WITH comment = ‘Q2. Find information about a hotel’; - - CREATE TABLE hotel.pois_by_hotel ( - poi_name text, - hotel_id text, - description text, - PRIMARY KEY ((hotel_id), poi_name) ) - WITH comment = Q3. Find pois near a hotel’; - - CREATE TABLE hotel.available_rooms_by_hotel_date ( - hotel_id text, - date date, - room_number smallint, - is_available boolean, - PRIMARY KEY ((hotel_id), date, room_number) ) - WITH comment = ‘Q4. Find available rooms by hotel date’; - - CREATE TABLE hotel.amenities_by_room ( - hotel_id text, - room_number smallint, - amenity_name text, - description text, - PRIMARY KEY ((hotel_id, room_number), amenity_name) ) - WITH comment = ‘Q5. Find amenities for a room’; - - -Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column ``poi_name``. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL. - -Similarly, here is the schema for the ``reservation`` keyspace:: - - CREATE KEYSPACE reservation WITH replication = {‘class’: - ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE reservation.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE reservation.reservations_by_confirmation ( - confirm_number text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - guest_id uuid, - PRIMARY KEY (confirm_number) ) - WITH comment = ‘Q6. Find reservations by confirmation number’; - - CREATE TABLE reservation.reservations_by_hotel_date ( - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((hotel_id, start_date), room_number) ) - WITH comment = ‘Q7. Find reservations by hotel and date’; - - CREATE TABLE reservation.reservations_by_guest ( - guest_last_name text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((guest_last_name), hotel_id) ) - WITH comment = ‘Q8. Find reservations by guest name’; - - CREATE TABLE reservation.guests ( - guest_id uuid PRIMARY KEY, - first_name text, - last_name text, - title text, - emails set, - phone_numbers list, - addresses map, - confirm_number text ) - WITH comment = ‘Q9. Find guest by ID’; - -You now have a complete Cassandra schema for storing data for a hotel -application. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_tools.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_tools.rst.txt deleted file mode 100644 index 46fad3346..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/data_modeling_tools.rst.txt +++ /dev/null @@ -1,64 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Data Modeling Tools -============================= - -There are several tools available to help you design and -manage your Cassandra schema and build queries. - -* `Hackolade `_ - is a data modeling tool that supports schema design for Cassandra and - many other NoSQL databases. Hackolade supports the unique concepts of - CQL such as partition keys and clustering columns, as well as data types - including collections and UDTs. It also provides the ability to create - Chebotko diagrams. - -* `Kashlev Data Modeler `_ is a Cassandra - data modeling tool that automates the data modeling methodology - described in this documentation, including identifying - access patterns, conceptual, logical, and physical data modeling, and - schema generation. It also includes model patterns that you can - optionally leverage as a starting point for your designs. - -* DataStax DevCenter is a tool for managing - schema, executing queries and viewing results. While the tool is no - longer actively supported, it is still popular with many developers and - is available as a `free download `_. - DevCenter features syntax highlighting for CQL commands, types, and name - literals. DevCenter provides command completion as you type out CQL - commands and interprets the commands you type, highlighting any errors - you make. The tool provides panes for managing multiple CQL scripts and - connections to multiple clusters. The connections are used to run CQL - commands against live clusters and view the results. The tool also has a - query trace feature that is useful for gaining insight into the - performance of your queries. - -* IDE Plugins - There are CQL plugins available for several Integrated - Development Environments (IDEs), such as IntelliJ IDEA and Apache - NetBeans. These plugins typically provide features such as schema - management and query execution. - -Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/index.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/index.rst.txt deleted file mode 100644 index 2f799dc32..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,36 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -************* - -.. toctree:: - :maxdepth: 2 - - intro - data_modeling_conceptual - data_modeling_rdbms - data_modeling_queries - data_modeling_logical - data_modeling_physical - data_modeling_refining - data_modeling_schema - data_modeling_tools - - - - - diff --git a/src/doc/4.0-alpha4/_sources/data_modeling/intro.rst.txt b/src/doc/4.0-alpha4/_sources/data_modeling/intro.rst.txt deleted file mode 100644 index 630a7d1b5..000000000 --- a/src/doc/4.0-alpha4/_sources/data_modeling/intro.rst.txt +++ /dev/null @@ -1,146 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Introduction -============ - -Apache Cassandra stores data in tables, with each table consisting of rows and columns. CQL (Cassandra Query Language) is used to query the data stored in tables. Apache Cassandra data model is based around and optimized for querying. Cassandra does not support relational data modeling intended for relational databases. - -What is Data Modeling? -^^^^^^^^^^^^^^^^^^^^^^ - -Data modeling is the process of identifying entities and their relationships. In relational databases, data is placed in normalized tables with foreign keys used to reference related data in other tables. Queries that the application will make are driven by the structure of the tables and related data are queried as table joins. - -In Cassandra, data modeling is query-driven. The data access patterns and application queries determine the structure and organization of data which then used to design the database tables. - -Data is modeled around specific queries. Queries are best designed to access a single table, which implies that all entities involved in a query must be in the same table to make data access (reads) very fast. Data is modeled to best suit a query or a set of queries. A table could have one or more entities as best suits a query. As entities do typically have relationships among them and queries could involve entities with relationships among them, a single entity may be included in multiple tables. - -Query-driven modeling -^^^^^^^^^^^^^^^^^^^^^ - -Unlike a relational database model in which queries make use of table joins to get data from multiple tables, joins are not supported in Cassandra so all required fields (columns) must be grouped together in a single table. Since each query is backed by a table, data is duplicated across multiple tables in a process known as denormalization. Data duplication and a high write throughput are used to achieve a high read performance. - -Goals -^^^^^ - -The choice of the primary key and partition key is important to distribute data evenly across the cluster. Keeping the number of partitions read for a query to a minimum is also important because different partitions could be located on different nodes and the coordinator would need to send a request to each node adding to the request overhead and latency. Even if the different partitions involved in a query are on the same node, fewer partitions make for a more efficient query. - -Partitions -^^^^^^^^^^ - -Apache Cassandra is a distributed database that stores data across a cluster of nodes. A partition key is used to partition data among the nodes. Cassandra partitions data over the storage nodes using a variant of consistent hashing for data distribution. Hashing is a technique used to map data with which given a key, a hash function generates a hash value (or simply a hash) that is stored in a hash table. A partition key is generated from the first field of a primary key. Data partitioned into hash tables using partition keys provides for rapid lookup. Fewer the partitions used for a query faster is the response time for the query. - -As an example of partitioning, consider table ``t`` in which ``id`` is the only field in the primary key. - -:: - - CREATE TABLE t ( - id int, - k int, - v text, - PRIMARY KEY (id) - ); - -The partition key is generated from the primary key ``id`` for data distribution across the nodes in a cluster. - -Consider a variation of table ``t`` that has two fields constituting the primary key to make a composite or compound primary key. - -:: - - CREATE TABLE t ( - id int, - c text, - k int, - v text, - PRIMARY KEY (id,c) - ); - -For the table ``t`` with a composite primary key the first field ``id`` is used to generate the partition key and the second field ``c`` is the clustering key used for sorting within a partition. Using clustering keys to sort data makes retrieval of adjacent data more efficient. - -In general, the first field or component of a primary key is hashed to generate the partition key and the remaining fields or components are the clustering keys that are used to sort data within a partition. Partitioning data improves the efficiency of reads and writes. The other fields that are not primary key fields may be indexed separately to further improve query performance. - -The partition key could be generated from multiple fields if they are grouped as the first component of a primary key. As another variation of the table ``t``, consider a table with the first component of the primary key made of two fields grouped using parentheses. - -:: - - CREATE TABLE t ( - id1 int, - id2 int, - c1 text, - c2 text - k int, - v text, - PRIMARY KEY ((id1,id2),c1,c2) - ); - -For the preceding table ``t`` the first component of the primary key constituting fields ``id1`` and ``id2`` is used to generate the partition key and the rest of the fields ``c1`` and ``c2`` are the clustering keys used for sorting within a partition. - -Comparing with Relational Data Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Relational databases store data in tables that have relations with other tables using foreign keys. A relational database’s approach to data modeling is table-centric. Queries must use table joins to get data from multiple tables that have a relation between them. Apache Cassandra does not have the concept of foreign keys or relational integrity. Apache Cassandra’s data model is based around designing efficient queries; queries that don’t involve multiple tables. Relational databases normalize data to avoid duplication. Apache Cassandra in contrast de-normalizes data by duplicating data in multiple tables for a query-centric data model. If a Cassandra data model cannot fully integrate the complexity of relationships between the different entities for a particular query, client-side joins in application code may be used. - -Examples of Data Modeling -^^^^^^^^^^^^^^^^^^^^^^^^^ - -As an example, a ``magazine`` data set consists of data for magazines with attributes such as magazine id, magazine name, publication frequency, publication date, and publisher. A basic query (Q1) for magazine data is to list all the magazine names including their publication frequency. As not all data attributes are needed for Q1 the data model would only consist of ``id`` ( for partition key), magazine name and publication frequency as shown in Figure 1. - -.. figure:: images/Figure_1_data_model.jpg - -Figure 1. Data Model for Q1 - -Another query (Q2) is to list all the magazine names by publisher. For Q2 the data model would consist of an additional attribute ``publisher`` for the partition key. The ``id`` would become the clustering key for sorting within a partition. Data model for Q2 is illustrated in Figure 2. - -.. figure:: images/Figure_2_data_model.jpg - -Figure 2. Data Model for Q2 - -Designing Schema -^^^^^^^^^^^^^^^^ - -After the conceptual data model has been created a schema may be designed for a query. For Q1 the following schema may be used. - -:: - - CREATE TABLE magazine_name (id int PRIMARY KEY, name text, publicationFrequency text) - -For Q2 the schema definition would include a clustering key for sorting. - -:: - - CREATE TABLE magazine_publisher (publisher text,id int,name text, publicationFrequency text, - PRIMARY KEY (publisher, id)) WITH CLUSTERING ORDER BY (id DESC) - -Data Model Analysis -^^^^^^^^^^^^^^^^^^^ - -The data model is a conceptual model that must be analyzed and optimized based on storage, capacity, redundancy and consistency. A data model may need to be modified as a result of the analysis. Considerations or limitations that are used in data model analysis include: - -- Partition Size -- Data Redundancy -- Disk space -- Lightweight Transactions (LWT) - -The two measures of partition size are the number of values in a partition and partition size on disk. Though requirements for these measures may vary based on the application a general guideline is to keep number of values per partition to below 100,000 and disk space per partition to below 100MB. - -Data redundancies as duplicate data in tables and multiple partition replicates are to be expected in the design of a data model , but nevertheless should be kept in consideration as a parameter to keep to the minimum. LWT transactions (compare-and-set, conditional update) could affect performance and queries using LWT should be kept to the minimum. - -Using Materialized Views -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: Materialized views (MVs) are experimental in the latest (4.0) release. - -Materialized views (MVs) could be used to implement multiple queries for a single table. A materialized view is a table built from data from another table, the base table, with new primary key and new properties. Changes to the base table data automatically add and update data in a MV. Different queries may be implemented using a materialized view as an MV's primary key differs from the base table. Queries are optimized by the primary key definition. diff --git a/src/doc/4.0-alpha4/_sources/development/ci.rst.txt b/src/doc/4.0-alpha4/_sources/development/ci.rst.txt deleted file mode 100644 index 77360aea9..000000000 --- a/src/doc/4.0-alpha4/_sources/development/ci.rst.txt +++ /dev/null @@ -1,72 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Jenkins CI Environment -********************** - -About CI testing and Apache Cassandra -===================================== - -Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the `dtest `_ scripts written in Python. As outlined in :doc:`testing`, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at `builds.apache.org `_, running `Jenkins `_. - - - -Setting up your own Jenkins server -================================== - -Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution. - -Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment. - -Required plugins ----------------- - -The following plugins need to be installed additionally to the standard plugins (git, ant, ..). - -You can install any missing plugins through the install manager. - -Go to ``Manage Jenkins -> Manage Plugins -> Available`` and install the following plugins and respective dependencies: - -* Job DSL -* Javadoc Plugin -* description setter plugin -* Throttle Concurrent Builds Plug-in -* Test stability history -* Hudson Post build task - - -Setup seed job --------------- - -Config ``New Item`` - -* Name it ``Cassandra-Job-DSL`` -* Select ``Freestyle project`` - -Under ``Source Code Management`` select Git using the repository: ``https://github.com/apache/cassandra-builds`` - -Under ``Build``, confirm ``Add build step`` -> ``Process Job DSLs`` and enter at ``Look on Filesystem``: ``jenkins-dsl/cassandra_job_dsl_seed.groovy`` - -Generated jobs will be created based on the Groovy script's default settings. You may want to override settings by checking ``This project is parameterized`` and add ``String Parameter`` for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches). - -**When done, confirm "Save"** - -You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message `"Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use"`. Goto ``Manage Jenkins`` -> ``In-process Script Approval`` to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates. - -Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label "cassandra", once the job is to be run. Please make sure to make any executors available by selecting ``Build Executor Status`` -> ``Configure`` -> Add "``cassandra``" as label and save. - -Executors need to have "JDK 1.8 (latest)" installed. This is done under ``Manage Jenkins -> Global Tool Configuration -> JDK Installations…``. Executors also need to have the virtualenv package installed on their system. - diff --git a/src/doc/4.0-alpha4/_sources/development/code_style.rst.txt b/src/doc/4.0-alpha4/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/4.0-alpha4/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/4.0-alpha4/_sources/development/dependencies.rst.txt b/src/doc/4.0-alpha4/_sources/development/dependencies.rst.txt deleted file mode 100644 index 6dd1cc46b..000000000 --- a/src/doc/4.0-alpha4/_sources/development/dependencies.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dependency Management -********************* - -Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the :doc:`ci` and reported related issues on Jira/ML, in case of any project dependency changes. - -As Cassandra is an Apache product, all included libraries must follow Apache's `software license requirements `_. - -Required steps to add or update libraries -========================================= - -* Add or replace jar file in ``lib`` directory -* Add or update ``lib/license`` files -* Update dependencies in ``build.xml`` - - * Add to ``parent-pom`` with correct version - * Add to ``all-pom`` if simple Cassandra dependency (see below) - - -POM file types -============== - -* **parent-pom** - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here. -* **build-deps-pom(-sources)** + **coverage-deps-pom** - used by ``ant build`` compile target. Listed dependenices will be resolved and copied to ``build/lib/{jar,sources}`` by executing the ``maven-ant-tasks-retrieve-build`` target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution. -* **test-deps-pom** - refered by ``maven-ant-tasks-retrieve-test`` to retrieve and save dependencies to ``build/test/lib``. Exclusively used during JUnit test execution. -* **all-pom** - pom for `cassandra-all.jar `_ that can be installed or deployed to public maven repos via ``ant publish`` - - -Troubleshooting and conflict resolution -======================================= - -Here are some useful commands that may help you out resolving conflicts. - -* ``ant realclean`` - gets rid of the build directory, including build artifacts. -* ``mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j`` - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ``ant mvn-install``. -* ``rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/`` - removes cached local Cassandra maven artifacts - - diff --git a/src/doc/4.0-alpha4/_sources/development/documentation.rst.txt b/src/doc/4.0-alpha4/_sources/development/documentation.rst.txt deleted file mode 100644 index c623d54b9..000000000 --- a/src/doc/4.0-alpha4/_sources/development/documentation.rst.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -Working on Documentation -************************* - -How Cassandra is documented -=========================== - -The official Cassandra documentation lives in the project's git repository. We use a static site generator, `Sphinx `_, to create pages hosted at `cassandra.apache.org `_. You'll also find developer centric content about Cassandra internals in our retired `wiki `_ (not covered by this guide). - -Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses `reStructuredText `_ for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at `existing documents <..>`_ to get a better idea how we use reStructuredText to write our documents. - -So how do you actually start making contributions? - -GitHub based work flow -====================== - -*Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)* - -Follow these steps to contribute using GitHub. It's assumed that you're logged in with an existing account. - -1. Fork the GitHub mirror of the `Cassandra repository `_ - -.. image:: images/docs_fork.png - -2. Create a new branch that you can use to make your edits. It's recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work. - -.. image:: images/docs_create_branch.png - -3. Navigate to document sources ``doc/source`` to find the ``.rst`` file to edit. The URL of the document should correspond to the directory structure. New files can be created using the "Create new file" button: - -.. image:: images/docs_create_file.png - -4. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing ``.rst`` files to get a better idea what format elements to use. - -.. image:: images/docs_editor.png - -Make sure to preview added content before committing any changes. - -.. image:: images/docs_preview.png - -5. Commit your work when you're done. Make sure to add a short description of all your edits since the last time you committed before. - -.. image:: images/docs_commit.png - -6. Finally if you decide that you're done working on your branch, it's time to create a pull request! - -.. image:: images/docs_pr.png - -Afterwards the GitHub Cassandra mirror will list your pull request and you're done. Congratulations! Please give us some time to look at your suggested changes before we get back to you. - - -Jira based work flow -==================== - -*Recommended for major changes* - -Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same `contribution guides `_ as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed. - -Working on documents locally using Sphinx -========================================= - -*Recommended for advanced editing* - -Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at ``doc/README.md``. Setup is very easy (at least on OSX and Linux). - -Notes for committers -==================== - -Please feel free to get involved and merge pull requests created on the GitHub mirror if you're a committer. As this is a read-only repository, you won't be able to merge a PR directly on GitHub. You'll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub. - -You may use a git work flow like this:: - - git remote add github https://github.com/apache/cassandra.git - git fetch github pull//head: - git checkout - -Now either rebase or squash the commit, e.g. for squashing:: - - git reset --soft origin/trunk - git commit --author - -Make sure to add a proper commit message including a "Closes #" text to automatically close the PR. - -Publishing ----------- - -Details for building and publishing of the site at cassandra.apache.org can be found `here `_. - diff --git a/src/doc/4.0-alpha4/_sources/development/gettingstarted.rst.txt b/src/doc/4.0-alpha4/_sources/development/gettingstarted.rst.txt deleted file mode 100644 index c2f5ef36e..000000000 --- a/src/doc/4.0-alpha4/_sources/development/gettingstarted.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _gettingstarted: - -Getting Started -************************* - -Initial Contributions -======================== - -Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we'd suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work. - * Add to or update the documentation - * Answer questions on the user list - * Review and test a submitted patch - * Investigate and fix a reported bug - * Create unit tests and d-tests - -Updating documentation -======================== - -The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (:ref:`patches`). - -Answering questions on the user list -==================================== - -Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the `community `_ page for details on how to subscribe to the mailing list. - -Reviewing and testing a submitted patch -======================================= - -Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in :ref:`_development_how_to_review` or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, "I tested this performance enhacement on our application's standard production load test and found a 3% improvement.") - -Investigate and/or fix a reported bug -===================================== - -Often, the hardest work in fixing a bug is reproducing it. Even if you don't have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (:ref:`patches`). - -Create unit tests and Dtests -============================ - -Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See :ref:`testing` and :ref:`patches` for more detail. - - - diff --git a/src/doc/4.0-alpha4/_sources/development/how_to_commit.rst.txt b/src/doc/4.0-alpha4/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index dff39832d..000000000 --- a/src/doc/4.0-alpha4/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``-atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/4.0-alpha4/_sources/development/how_to_review.rst.txt b/src/doc/4.0-alpha4/_sources/development/how_to_review.rst.txt deleted file mode 100644 index 4778b6946..000000000 --- a/src/doc/4.0-alpha4/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _how_to_review: - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/4.0-alpha4/_sources/development/ide.rst.txt b/src/doc/4.0-alpha4/_sources/development/ide.rst.txt deleted file mode 100644 index 97c73ae61..000000000 --- a/src/doc/4.0-alpha4/_sources/development/ide.rst.txt +++ /dev/null @@ -1,185 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -| - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -| - -Opening Cassandra in Apache NetBeans -======================================= - -`Apache NetBeans `_ is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans. - -Open Cassandra as a Project (C* 4.0 and newer) ------------------------------------------------ - -Please clone and build Cassandra as described above and execute the following steps: - -1. Start Apache NetBeans - -2. Open the NetBeans project from the `ide/` folder of the checked out Cassandra directory using the menu item "Open Project…" in NetBeans' File menu - -The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant `build.xml` script. - - * Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu. - * Profile Project is available via the Profile menu. In the opened Profiler tab, click the green "Profile" button. - * Cassandra's code style is honored in `ide/nbproject/project.properties` - -The `JAVA8_HOME` system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute. - -| - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/4.0-alpha4/_sources/development/index.rst.txt b/src/doc/4.0-alpha4/_sources/development/index.rst.txt deleted file mode 100644 index ffa7134dd..000000000 --- a/src/doc/4.0-alpha4/_sources/development/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contributing to Cassandra -************************* - -.. toctree:: - :maxdepth: 2 - - gettingstarted - ide - testing - patches - code_style - how_to_review - how_to_commit - documentation - ci - dependencies - release_process diff --git a/src/doc/4.0-alpha4/_sources/development/patches.rst.txt b/src/doc/4.0-alpha4/_sources/development/patches.rst.txt deleted file mode 100644 index 92c05531e..000000000 --- a/src/doc/4.0-alpha4/_sources/development/patches.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _patches: - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue marked as `Low Hanging Fruit `_ Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or :ref:`Slack `. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -4.0 Code freeze (see below) -3.11 Critical bug fixes only -3.0 Critical bug fixes only -2.2 Critical bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -4.0 Code Freeze -""""""""""""""" - -Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance: - - * Bug fixes - * Measurable performance improvements - * Changes not distributed as part of the release such as: - * Testing related improvements and fixes - * Build and infrastructure related changes - * Documentation - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. Please note that only user-impacting items `should `_ be listed in CHANGES.txt. If you fix a test that does not affect users and does not require changes in runtime code, then no CHANGES.txt entry is necessary. - - :: - - - - patch by ; reviewed by for CASSANDRA-##### - - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/4.0-alpha4/_sources/development/release_process.rst.txt b/src/doc/4.0-alpha4/_sources/development/release_process.rst.txt deleted file mode 100644 index 8b06e81a7..000000000 --- a/src/doc/4.0-alpha4/_sources/development/release_process.rst.txt +++ /dev/null @@ -1,248 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. release_process: - -Release Process -*************** - -.. contents:: :depth: 3 - -|  -| - -.. attention:: - - WORK IN PROGRESS - * A number of these steps still have been finalised/tested. - * The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org - - -The steps for Release Managers to create, vote and publish releases for Apache Cassandra. - -While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release. - - -Prerequisites -============= - -Background docs - * `ASF Release Policy `_ - * `ASF Release Distribution Policy `_ - * `ASF Release Best Practices `_ - - -A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools. - - -Create and publish your GPG key -------------------------------- - -To create a GPG key, follow the `guidelines `_. -Include your public key in:: - - https://dist.apache.org/repos/dist/release/cassandra/KEYS - - -Publish your GPG key in a PGP key server, such as `MIT Keyserver `_. - - -Create Release Artifacts -======================== - -Any committer can perform the following steps to create and call a vote on a proposed release. - -Check that there are no open urgent jira tickets currently being worked on. Also check with a PMC that there's security vulnerabilities currently being worked on in private.' -Current project habit is to check the timing for a new release on the dev mailing lists. - -Perform the Release -------------------- - -Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:: - - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh` - - # After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout, - # on the branch/commit that we wish to tag for the tentative release along with version number to tag. - # For example here might be `3.11` and `3.11.3` - cd ~/git/cassandra/ - git checkout cassandra- - ../cassandra-builds/cassandra-release/prepare_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Call for a Vote". - -After validating the uploaded artifacts in staging, increment the version number in Cassandra on the `cassandra-` - - cd ~/git/cassandra/ - git checkout cassandra- - edit build.xml # update ` ` - edit debian/changelog # add entry for new version - edit CHANGES.txt # add entry for new version - git commit -m "Update version to " build.xml debian/changelog CHANGES.txt - git push - -Call for a Vote -=============== - -Fill out the following email template and send to the dev mailing list:: - - I propose the following artifacts for release as . - - sha1: - - Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative - - Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// - - Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ - - The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ - - The vote will be open for 72 hours (longer if needed). - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative - - - -Post-vote operations -==================== - -Any PMC can perform the following steps to formalize and publish a successfully voted release. - -Publish Artifacts ------------------ - -Run the following commands to publish the voted release artifacts:: - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - # edit the variables at the top of `finish_release.sh` - - # After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, - # on the tentative release tag that we wish to tag for the final release version number tag. - cd ~/git/cassandra/ - git checkout -tentative - ../cassandra-builds/cassandra-release/finish_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Send Release Announcement". -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout. - - -Promote Nexus Repository ------------------------- - - * Login to `Nexus repository `_ again. - * Click on "Staging" and then on the repository with id "cassandra-staging". - * Find your closed staging repository, right click on it and choose "Promote". - * Select the "Releases" repository and click "Promote". - * Next click on "Repositories", select the "Releases" repository and validate that your artifacts exist as you expect them. - -Sign and Upload Distribution Packages to Bintray ---------------------------------------- - -Run the following command:: - - cd ~/git - # FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org - svn mv https://dist.apache.org/repos/dist/dev/cassandra/ https://dist.apache.org/repos/dist/release/cassandra/ - - # Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on - svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat - cd cassandra-dist-redhat/x/ - createrepo . - gpg --detach-sign --armor repodata/repomd.xml - for f in `find repodata/ -name *.bz2`; do - gpg --detach-sign --armor $f; - done - - svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist- - cd cassandra-dist- - cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist- - - -Update and Publish Website --------------------------- - -See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate. - -Release version in JIRA ------------------------ - -Release the JIRA version. - - * In JIRA go to the version that you want to release and release it. - * Create a new version, if it has not been done before. - -Update to Next Development Version ----------------------------------- - -Edit and commit ``build.xml`` so the base.version property points to the next version. - -Wait for Artifacts to Sync --------------------------- - -Wait for the artifacts to sync at https://downloads.apache.org/cassandra/ - -Send Release Announcement -------------------------- - -Fill out the following email template and send to both user and dev mailing lists:: - - The Cassandra team is pleased to announce the release of Apache Cassandra version . - - Apache Cassandra is a fully distributed database. It is the right choice - when you need scalability and high availability without compromising - performance. - - http://cassandra.apache.org/ - - Downloads of source and binary distributions are listed in our download - section: - - http://cassandra.apache.org/download/ - - This version is release[1] on the series. As always, - please pay attention to the release notes[2] and let us know[3] if you - were to encounter any problem. - - Enjoy! - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= - [3]: https://issues.apache.org/jira/browse/CASSANDRA - -Update Slack Cassandra topic ---------------------------- - -Update topic in ``cassandra`` :ref:`Slack room ` - /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don't ask to ask - -Tweet from @Cassandra ---------------------- - -Tweet the new release, from the @Cassandra account - -Delete Old Releases -------------------- - -As described in `When to Archive `_. -Also check people.apache.org as previous release scripts used it. diff --git a/src/doc/4.0-alpha4/_sources/development/testing.rst.txt b/src/doc/4.0-alpha4/_sources/development/testing.rst.txt deleted file mode 100644 index 7f38fe590..000000000 --- a/src/doc/4.0-alpha4/_sources/development/testing.rst.txt +++ /dev/null @@ -1,98 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _testing: - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -If you see an error like this:: - - Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: - org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader - AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] - -You will need to install the ant-optional package since it contains the ``JUnitTask`` class. - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -See :ref:`cassandra_stress` - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/4.0-alpha4/_sources/faq/index.rst.txt b/src/doc/4.0-alpha4/_sources/faq/index.rst.txt deleted file mode 100644 index acb7538d6..000000000 --- a/src/doc/4.0-alpha4/_sources/faq/index.rst.txt +++ /dev/null @@ -1,299 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair -full`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. Note that you will need to run a full repair (``-full``) to make sure that already repaired - sstables are not skipped. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/getting_started/configuring.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index e71eeedbe..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the default configuration file present at ``./conf/cassandra.yaml`` is enough, -you shouldn't need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/4.0-alpha4/_sources/getting_started/drivers.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index 9a2c1567a..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ - -Perl -^^^^ - -- `Cassandra::Client and DBD::Cassandra `__ - -Elixir -^^^^^^ - -- `Xandra `__ -- `CQEx `__ - -Dart -^^^^ - -- `dart_cassandra_cql `__ diff --git a/src/doc/4.0-alpha4/_sources/getting_started/index.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/index.rst.txt deleted file mode 100644 index a699aee97..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - production - - diff --git a/src/doc/4.0-alpha4/_sources/getting_started/installing.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/installing.rst.txt deleted file mode 100644 index f3a22f21a..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,324 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -These are the instructions for deploying the supported releases of Apache Cassandra on Linux servers. - -Cassandra runs on a wide array of Linux distributions including (but not limited to): - -- Ubuntu, most notably LTS releases 16.04 to 18.04 -- CentOS & RedHat Enterprise Linux (RHEL) including 6.6 to 7.7 -- Amazon Linux AMIs including 2016.09 through to Linux 2 -- Debian versions 8 & 9 -- SUSE Enterprise Linux 12 - -This is not an exhaustive list of operating system platforms, nor is it prescriptive. However users will be -well-advised to conduct exhaustive tests of their own particularly for less-popular distributions of Linux. -Deploying on older versions is not recommended unless you have previous experience with the older distribution -in a production environment. - -Prerequisites -^^^^^^^^^^^^^ - -- Install the latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. -- **NOTE**: *Experimental* support for Java 11 was added in Cassandra 4.0 (`CASSANDRA-9608 `__). - Running Cassandra on Java 11 is *experimental*. Do so at your own risk. For more information, see - `NEWS.txt `__. -- For using cqlsh, the latest version of `Python 2.7 `__ or Python 3.6+. To verify that you have - the correct version of Python installed, type ``python --version``. - -Choosing an installation method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For most users, installing the binary tarball is the simplest choice. The tarball unpacks all its contents -into a single location with binaries and configuration files located in their own subdirectories. The most -obvious attribute of the tarball installation is it does not require ``root`` permissions and can be -installed on any Linux distribution. - -Packaged installations require ``root`` permissions. Install the RPM build on CentOS and RHEL-based -distributions if you want to install Cassandra using YUM. Install the Debian build on Ubuntu and other -Debian-based distributions if you want to install Cassandra using APT. Note that both the YUM and APT -methods required ``root`` permissions and will install the binaries and configuration files as the -``cassandra`` OS user. - -Installing the binary tarball -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10) - OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode) - -2. Download the binary tarball from one of the mirrors on the `Apache Cassandra Download `__ - site. For example, to download 4.0: - -:: - - $ curl -OL http://apache.mirror.digitalpacific.com.au/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz - -NOTE: The mirrors only host the latest versions of each major supported release. To download an earlier -version of Cassandra, visit the `Apache Archives `__. - -3. OPTIONAL: Verify the integrity of the downloaded tarball using one of the methods `here `__. - For example, to verify the hash of the downloaded file using GPG: - -:: - - $ gpg --print-md SHA256 apache-cassandra-4.0.0-bin.tar.gz - apache-cassandra-4.0.0-bin.tar.gz: 28757DDE 589F7041 0F9A6A95 C39EE7E6 - CDE63440 E2B06B91 AE6B2006 14FA364D - -Compare the signature with the SHA256 file from the Downloads site: - -:: - - $ curl -L https://downloads.apache.org/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz.sha256 - 28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d - -4. Unpack the tarball: - -:: - - $ tar xzvf apache-cassandra-4.0.0-bin.tar.gz - -The files will be extracted to the ``apache-cassandra-4.0.0/`` directory. This is the tarball installation -location. - -5. Located in the tarball installation location are the directories for the scripts, binaries, utilities, configuration, data and log files: - -:: - - / - bin/ - conf/ - data/ - doc/ - interface/ - javadoc/ - lib/ - logs/ - pylib/ - tools/ - -For information on how to configure your installation, see -`Configuring Cassandra `__. - -6. Start Cassandra: - -:: - - $ cd apache-cassandra-4.0.0/ - $ bin/cassandra - -NOTE: This will run Cassandra as the authenticated Linux user. - -You can monitor the progress of the startup with: - -:: - - $ tail -f logs/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -7. Check the status of Cassandra: - -:: - - $ bin/nodetool status - -The status column in the output should report UN which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ bin/cqlsh - -Installing the Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10) - OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode) - -2. Add the Apache repository of Cassandra to the file ``cassandra.sources.list``. The latest major version - is 4.0 and the corresponding distribution name is ``40x`` (with an "x" as the suffix). - For older releases use ``311x`` for C* 3.11 series, ``30x`` for 3.0, ``22x`` for 2.2 and ``21x`` for 2.1. - For example, to add the repository for version 4.0 (``40x``): - -:: - - $ echo "deb http://www.apache.org/dist/cassandra/debian 40x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - deb http://www.apache.org/dist/cassandra/debian 40x main - -3. Add the Apache Cassandra repository keys to the list of trusted keys on the server: - -:: - - $ curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 100 266k 100 266k 0 0 320k 0 --:--:-- --:--:-- --:--:-- 320k - OK - -4. Update the package index from sources: - -:: - - $ sudo apt-get update - -5. Install Cassandra with APT: - -:: - - $ sudo apt-get install cassandra - - -NOTE: A new Linux user ``cassandra`` will get created as part of the installation. The Cassandra service -will also be run as this user. - -6. The Cassandra service gets started automatically after installation. Monitor the progress of - the startup with: - -:: - - $ tail -f /var/log/cassandra/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -NOTE: For information on how to configure your installation, see -`Configuring Cassandra `__. - -7. Check the status of Cassandra: - -:: - - $ nodetool status - -The status column in the output should report ``UN`` which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ cqlsh - -Installing the RPM packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_232-b09) - OpenJDK 64-Bit Server VM (build 25.232-b09, mixed mode) - -2. Add the Apache repository of Cassandra to the file ``/etc/yum.repos.d/cassandra.repo`` (as the ``root`` - user). The latest major version is 4.0 and the corresponding distribution name is ``40x`` (with an "x" as the suffix). - For older releases use ``311x`` for C* 3.11 series, ``30x`` for 3.0, ``22x`` for 2.2 and ``21x`` for 2.1. - For example, to add the repository for version 4.0 (``40x``): - -:: - - [cassandra] - name=Apache Cassandra - baseurl=https://downloads.apache.org/cassandra/redhat/40x/ - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=https://downloads.apache.org/cassandra/KEYS - -3. Update the package index from sources: - -:: - - $ sudo yum update - -4. Install Cassandra with YUM: - -:: - - $ sudo yum install cassandra - - -NOTE: A new Linux user ``cassandra`` will get created as part of the installation. The Cassandra service -will also be run as this user. - -5. Start the Cassandra service: - -:: - - $ sudo service cassandra start - -6. Monitor the progress of the startup with: - -:: - - $ tail -f /var/log/cassandra/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -NOTE: For information on how to configure your installation, see -`Configuring Cassandra `__. - -7. Check the status of Cassandra: - -:: - - $ nodetool status - -The status column in the output should report ``UN`` which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ cqlsh - -Further installation info -^^^^^^^^^^^^^^^^^^^^^^^^^ - -For help with installation issues, see the `Troubleshooting `__ section. - - diff --git a/src/doc/4.0-alpha4/_sources/getting_started/production.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/production.rst.txt deleted file mode 100644 index fe0c4a591..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/production.rst.txt +++ /dev/null @@ -1,156 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Production Recommendations ----------------------------- - -The ``cassandra.yaml`` and ``jvm.options`` files have a number of notes and recommendations for production usage. This page -expands on some of the notes in these files with additional information. - -Tokens -^^^^^^^ - -Using more than 1 token (referred to as vnodes) allows for more flexible expansion and more streaming peers when -bootstrapping new nodes into the cluster. This can limit the negative impact of streaming (I/O and CPU overhead) -as well as allow for incremental cluster expansion. - -As a tradeoff, more tokens will lead to sharing data with more peers, which can result in decreased availability. To learn more about this we -recommend reading `this paper `_. - -The number of tokens can be changed using the following setting: - -``num_tokens: 16`` - - -Here are the most common token counts with a brief explanation of when and why you would use each one. - -+-------------+---------------------------------------------------------------------------------------------------+ -| Token Count | Description | -+=============+===================================================================================================+ -| 1 | Maximum availablility, maximum cluster size, fewest peers, | -| | but inflexible expansion. Must always | -| | double size of cluster to expand and remain balanced. | -+-------------+---------------------------------------------------------------------------------------------------+ -| 4 | A healthy mix of elasticity and availability. Recommended for clusters which will eventually | -| | reach over 30 nodes. Requires adding approximately 20% more nodes to remain balanced. | -| | Shrinking a cluster may result in cluster imbalance. | -+-------------+---------------------------------------------------------------------------------------------------+ -| 16 | Best for heavily elastic clusters which expand and shrink regularly, but may have issues | -| | availability with larger clusters. Not recommended for clusters over 50 nodes. | -+-------------+---------------------------------------------------------------------------------------------------+ - - -In addition to setting the token count, it's extremely important that ``allocate_tokens_for_local_replication_factor`` be -set as well, to ensure even token allocation. - -.. _read-ahead: - -Read Ahead -^^^^^^^^^^^ - -Read ahead is an operating system feature that attempts to keep as much data loaded in the page cache as possible. The -goal is to decrease latency by using additional throughput on reads where the latency penalty is high due to seek times -on spinning disks. By leveraging read ahead, the OS can pull additional data into memory without the cost of additional -seeks. This works well when available RAM is greater than the size of the hot dataset, but can be problematic when the -hot dataset is much larger than available RAM. The benefit of read ahead decreases as the size of your hot dataset gets -bigger in proportion to available memory. - -With small partitions (usually tables with no partition key, but not limited to this case) and solid state drives, read -ahead can increase disk usage without any of the latency benefits, and in some cases can result in up to -a 5x latency and throughput performance penalty. Read heavy, key/value tables with small (under 1KB) rows are especially -prone to this problem. - -We recommend the following read ahead settings: - -+----------------+-------------------------+ -| Hardware | Initial Recommendation | -+================+=========================+ -|Spinning Disks | 64KB | -+----------------+-------------------------+ -|SSD | 4KB | -+----------------+-------------------------+ - -Read ahead can be adjusted on Linux systems by using the `blockdev` tool. - -For example, we can set read ahead of ``/dev/sda1` to 4KB by doing the following:: - - blockdev --setra 8 /dev/sda1 - -**Note**: blockdev accepts the number of 512 byte sectors to read ahead. The argument of 8 above is equivilent to 4KB. - -Since each system is different, use the above recommendations as a starting point and tuning based on your SLA and -throughput requirements. To understand how read ahead impacts disk resource usage we recommend carefully reading through the -:ref:`troubleshooting ` portion of the documentation. - - -Compression -^^^^^^^^^^^^ - -Compressed data is stored by compressing fixed size byte buffers and writing the data to disk. The buffer size is -determined by the ``chunk_length_in_kb`` element in the compression map of the schema settings. - -The default setting is 16KB starting with Cassandra 4.0. - -Since the entire compressed buffer must be read off disk, using too high of a compression chunk length can lead to -significant overhead when reading small records. Combined with the default read ahead setting this can result in massive -read amplification for certain workloads. - -LZ4Compressor is the default and recommended compression algorithm. - -There is additional information on this topic on `The Last Pickle Blog `_. - -Compaction -^^^^^^^^^^^^ - -There are different :ref:`compaction ` strategies available for different workloads. -We recommend reading up on the different strategies to understand which is the best for your environment. Different tables -may (and frequently do) use different compaction strategies on the same cluster. - -Encryption -^^^^^^^^^^^ - -It is significantly easier to set up peer to peer encryption and client server encryption when setting up your production -cluster as opposed to setting it up once the cluster is already serving production traffic. If you are planning on using network encryption -eventually (in any form), we recommend setting it up now. Changing these configurations down the line is not impossible, -but mistakes can result in downtime or data loss. - -Ensure Keyspaces are Created with NetworkTopologyStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Production clusters should never use SimpleStrategy. Production keyspaces should use the NetworkTopologyStrategy (NTS). - -For example:: - - create KEYSPACE mykeyspace WITH replication = - {'class': 'NetworkTopologyStrategy', 'datacenter1': 3}; - -NetworkTopologyStrategy allows Cassandra to take advantage of multiple racks and data centers. - -Configure Racks and Snitch -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -**Correctly configuring or changing racks after a cluster has been provisioned is an unsupported process**. Migrating from -a single rack to multiple racks is also unsupported and can result in data loss. - -Using ``GossipingPropertyFileSnitch`` is the most flexible solution for on premise or mixed cloud environments. ``Ec2Snitch`` -is reliable for AWS EC2 only environments. - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/getting_started/querying.rst.txt b/src/doc/4.0-alpha4/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/4.0-alpha4/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/4.0-alpha4/_sources/index.rst.txt b/src/doc/4.0-alpha4/_sources/index.rst.txt deleted file mode 100644 index 302f8e7fa..000000000 --- a/src/doc/4.0-alpha4/_sources/index.rst.txt +++ /dev/null @@ -1,43 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - new/index - architecture/index - cql/index - data_modeling/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - plugins/index - - bugs - contactus diff --git a/src/doc/4.0-alpha4/_sources/new/auditlogging.rst.txt b/src/doc/4.0-alpha4/_sources/new/auditlogging.rst.txt deleted file mode 100644 index 0a15a9f6c..000000000 --- a/src/doc/4.0-alpha4/_sources/new/auditlogging.rst.txt +++ /dev/null @@ -1,440 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Audit Logging -------------- - -Audit Logging is a new feature in Apache Cassandra 4.0 (`CASSANDRA-12151 -`_). All database activity is logged to a directory in the local filesystem and the audit log files are rolled periodically. All database operations are monitored and recorded. Audit logs are stored in local directory files instead of the database itself as it provides several benefits, some of which are: - -- No additional database capacity is needed to store audit logs -- No query tool is required while storing the audit logs in the database would require a query tool -- Latency of database operations is not affected; no performance impact -- It is easier to implement file based logging than database based logging - -What does Audit Logging Log? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Audit logging logs: - -1. All authentication which includes successful and failed login attempts -2. All database command requests to CQL. Both failed and successful CQL is logged - -More specifically an audit log entry could be one of two types: - -a) CQL Audit Log Entry Type or -b) Common Audit Log Entry Type - -Each of these types comprises of several database operations. The CQL Audit Log Entry Type could be one of the following; the category of the CQL audit log entry type is listed in parentheses. - -1. SELECT(QUERY), -2. UPDATE(DML), -3. DELETE(DML), -4. TRUNCATE(DDL), -5. CREATE_KEYSPACE(DDL), -6. ALTER_KEYSPACE(DDL), -7. DROP_KEYSPACE(DDL), -8. CREATE_TABLE(DDL), -9. DROP_TABLE(DDL), -10. PREPARE_STATEMENT(PREPARE), -11. DROP_TRIGGER(DDL), -12. LIST_USERS(DCL), -13. CREATE_INDEX(DDL), -14. DROP_INDEX(DDL), -15. GRANT(DCL), -16. REVOKE(DCL), -17. CREATE_TYPE(DDL), -18. DROP_AGGREGATE(DDL), -19. ALTER_VIEW(DDL), -20. CREATE_VIEW(DDL), -21. DROP_ROLE(DCL), -22. CREATE_FUNCTION(DDL), -23. ALTER_TABLE(DDL), -24. BATCH(DML), -25. CREATE_AGGREGATE(DDL), -26. DROP_VIEW(DDL), -27. DROP_TYPE(DDL), -28. DROP_FUNCTION(DDL), -29. ALTER_ROLE(DCL), -30. CREATE_TRIGGER(DDL), -31. LIST_ROLES(DCL), -32. LIST_PERMISSIONS(DCL), -33. ALTER_TYPE(DDL), -34. CREATE_ROLE(DCL), -35. USE_KEYSPACE (OTHER). - -The Common Audit Log Entry Type could be one of the following; the category of the Common audit log entry type is listed in parentheses. - -1. REQUEST_FAILURE(ERROR), -2. LOGIN_ERROR(AUTH), -3. UNAUTHORIZED_ATTEMPT(AUTH), -4. LOGIN_SUCCESS (AUTH). - -What Audit Logging does not Log? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging does not log: - -1. Configuration changes made in ``cassandra.yaml`` -2. Nodetool Commands - -Audit Logging is Flexible and Configurable -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging is flexible and configurable in ``cassandra.yaml`` as follows: - -- Keyspaces and tables to be monitored and audited may be specified. -- Users to be included/excluded may be specified. By default all users are audit logged. -- Categories of operations to audit or exclude may be specified. -- The frequency at which to roll the log files may be specified. Default frequency is hourly. - -Configuring Audit Logging -^^^^^^^^^^^^^^^^^^^^^^^^^ -Audit Logging is configured on each node separately. Audit Logging is configured in ``cassandra.yaml`` in the ``audit_logging_options`` setting. -The settings may be same/different on each node. - -Enabling Audit Logging -********************** -Audit logging is enabled by setting the ``enabled`` option to ``true`` in the ``audit_logging_options`` setting. - -:: - - audit_logging_options: - enabled: true - -Setting the Logger -****************** -The audit logger is set with the ``logger`` option. - -:: - - logger: BinAuditLogger - -Two types of audit loggers are supported: ``FileAuditLogger`` and ``BinAuditLogger``. -``BinAuditLogger`` is the default setting. The ``BinAuditLogger`` is an efficient way to log events to file in a binary format. - -``FileAuditLogger`` is synchronous, file-based audit logger; just uses the standard logging mechanism. ``FileAuditLogger`` logs events to ``audit/audit.log`` file using ``slf4j`` logger. - -The ``NoOpAuditLogger`` is a No-Op implementation of the audit logger to be used as a default audit logger when audit logging is disabled. - -Setting the Audit Logs Directory -******************************** -The audit logs directory is set with the ``audit_logs_dir`` option. A new directory is not created automatically and an existing directory must be set. Audit Logs directory can be configured using ``cassandra.logdir.audit`` system property or default is set to ``cassandra.logdir + /audit/``. A user created directory may be set. As an example, create a directory for the audit logs and set its permissions. - -:: - - sudo mkdir –p /cassandra/audit/logs/hourly - sudo chmod -R 777 /cassandra/audit/logs/hourly - -Set the directory for the audit logs directory using the ``audit_logs_dir`` option. - -:: - - audit_logs_dir: "/cassandra/audit/logs/hourly" - - -Setting Keyspaces to Audit -************************** -Set the keyspaces to include with the ``included_keyspaces`` option and the keyspaces to exclude with the ``excluded_keyspaces`` option. By default all keyspaces are included. By default, ``system``, ``system_schema`` and ``system_virtual_schema`` are excluded. - -:: - - # included_keyspaces: - # excluded_keyspaces: system, system_schema, system_virtual_schema - -Setting Categories to Audit -*************************** - -The categories of database operations to be included are specified with the ``included_categories`` option as a comma separated list. By default all supported categories are included. The categories of database operations to be excluded are specified with ``excluded_categories`` option as a comma separated list. By default no category is excluded. - -:: - - # included_categories: - # excluded_categories: - -The supported categories for audit log are: - -1. QUERY -2. DML -3. DDL -4. DCL -5. OTHER -6. AUTH -7. ERROR -8. PREPARE - -Setting Users to Audit -********************** - -Users to audit log are set with the ``included_users`` and ``excluded_users`` options. The ``included_users`` option specifies a comma separated list of users to include explicitly and by default all users are included. The ``excluded_users`` option specifies a comma separated list of users to exclude explicitly and by default no user is excluded. - -:: - - # included_users: - # excluded_users: - -Setting the Roll Frequency -*************************** -The ``roll_cycle`` option sets the frequency at which the audit log file is rolled. Supported values are ``MINUTELY``, ``HOURLY``, and ``DAILY``. Default value is ``HOURLY``, which implies that after every hour a new audit log file is created. - -:: - - roll_cycle: HOURLY - -An audit log file could get rolled for other reasons as well such as a log file reaches the configured size threshold. - -Setting Archiving Options -************************* - -The archiving options are for archiving the rolled audit logs. The ``archive`` command to use is set with the ``archive_command`` option and the ``max_archive_retries`` sets the maximum # of tries of failed archive commands. - -:: - - # archive_command: - # max_archive_retries: 10 - -Default archive command is ``"/path/to/script.sh %path"`` where ``%path`` is replaced with the file being rolled: - -Other Settings -*************** - -The other audit logs settings are as follows. - -:: - - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - -The ``block`` option specifies whether the audit logging should block if the logging falls behind or should drop log records. - -The ``max_queue_weight`` option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. - -The ``max_log_size`` option sets the maximum size of the rolled files to retain on disk before deleting the oldest. - -Using Nodetool to Enable Audit Logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``nodetool enableauditlog`` command may be used to enable audit logs and it overrides the settings in ``cassandra.yaml``. The ``nodetool enableauditlog`` command syntax is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] enableauditlog - [--excluded-categories ] - [--excluded-keyspaces ] - [--excluded-users ] - [--included-categories ] - [--included-keyspaces ] - [--included-users ] [--logger ] - -OPTIONS - --excluded-categories - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - - --excluded-keyspaces - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used - - --excluded-users - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - - -h , --host - Node hostname or ip address - - --included-categories - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - - --included-keyspaces - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - - --included-users - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - - --logger - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -u , --username - Remote jmx agent username - - -The ``nodetool disableauditlog`` command disables audit log. The command syntax is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] disableauditlog - -OPTIONS - -h , --host - Node hostname or ip address - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -u , --username - Remote jmx agent username - -Viewing the Audit Logs -^^^^^^^^^^^^^^^^^^^^^^ -An audit log event comprises of a keyspace that is being audited, the operation that is being logged, the scope and the user. An audit log entry comprises of the following attributes concatenated with a "|". - -:: - - type (AuditLogEntryType): Type of request - source (InetAddressAndPort): Source IP Address from which request originated - user (String): User name - timestamp (long ): Timestamp of the request - batch (UUID): Batch of request - keyspace (String): Keyspace on which request is made - scope (String): Scope of request such as Table/Function/Aggregate name - operation (String): Database operation such as CQL command - options (QueryOptions): CQL Query options - state (QueryState): State related to a given query - -Some of these attributes may not be applicable to a given request and not all of these options must be set. - -An Audit Logging Demo -^^^^^^^^^^^^^^^^^^^^^^ -To demonstrate audit logging enable and configure audit logs with following settings. - -:: - - audit_logging_options: - enabled: true - logger: BinAuditLogger - audit_logs_dir: "/cassandra/audit/logs/hourly" - # included_keyspaces: - # excluded_keyspaces: system, system_schema, system_virtual_schema - # included_categories: - # excluded_categories: - # included_users: - # excluded_users: - roll_cycle: HOURLY - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled: - # archive_command: - # max_archive_retries: 10 - -Create the audit log directory ``/cassandra/audit/logs/hourly`` and set its permissions as discussed earlier. Run some CQL commands such as create a keyspace, create a table and query a table. Any supported CQL commands may be run as discussed in section **What does Audit Logging Log?**. Change directory (with ``cd`` command) to the audit logs directory. - -:: - - cd /cassandra/audit/logs/hourly - -List the files/directories and some ``.cq4`` files should get listed. These are the audit logs files. - -:: - - [ec2-user@ip-10-0-2-238 hourly]$ ls -l - total 28 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 03:01 20190802-02.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 03:01 20190802-03.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 03:01 directory-listing.cq4t - -The ``auditlogviewer`` tool is used to dump audit logs. Run the ``auditlogviewer`` tool. Audit log files directory path is a required argument. The output should be similar to the following output. - -:: - - [ec2-user@ip-10-0-2-238 hourly]$ auditlogviewer /cassandra/audit/logs/hourly - WARN 03:12:11,124 Using Pauser.sleepy() as not enough processors, have 2, needs 8+ - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427328|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE AuditLogKeyspace; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427329|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE "auditlogkeyspace" - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711446279|type :SELECT|category:QUERY|ks:auditlogkeyspace|scope:t|operation:SELECT * FROM t; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564713878834|type :DROP_TABLE|category:DDL|ks:auditlogkeyspace|scope:t|operation:DROP TABLE IF EXISTS - AuditLogKeyspace.t; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42382|timestamp:1564714618360|ty - pe:REQUEST_FAILURE|category:ERROR|operation:CREATE KEYSPACE AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};; Cannot add - existing keyspace "auditlogkeyspace" - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714690968|type :DROP_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:DROP KEYSPACE AuditLogKeyspace; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42406|timestamp:1564714708329|ty pe:CREATE_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:CREATE KEYSPACE - AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714870678|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE auditlogkeyspace; - [ec2-user@ip-10-0-2-238 hourly]$ - - -The ``auditlogviewer`` tool usage syntax is as follows. - -:: - - ./auditlogviewer - Audit log files directory path is a required argument. - usage: auditlogviewer [...] [options] - -- - View the audit log contents in human readable format - -- - Options are: - -f,--follow Upon reaching the end of the log continue indefinitely - waiting for more records - -h,--help display this help message - -r,--roll_cycle How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -Diagnostic events for user audit logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Any native transport enabled client is able to subscribe to diagnostic events that are raised around authentication and CQL operations. These events can then be consumed and used by external tools to implement a Cassandra user auditing solution. - diff --git a/src/doc/4.0-alpha4/_sources/new/fqllogging.rst.txt b/src/doc/4.0-alpha4/_sources/new/fqllogging.rst.txt deleted file mode 100644 index 881f39fa8..000000000 --- a/src/doc/4.0-alpha4/_sources/new/fqllogging.rst.txt +++ /dev/null @@ -1,689 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Full Query Logging ------------------- - -Apache Cassandra 4.0 adds a new feature to support a means of logging all queries as they were invoked (`CASSANDRA-13983 -`_). For correctness testing it's useful to be able to capture production traffic so that it can be replayed against both the old and new versions of Cassandra while comparing the results. - -Cassandra 4.0 includes an implementation of a full query logging (FQL) that uses chronicle-queue to implement a rotating log of queries. Some of the features of FQL are: - -- Single thread asynchronously writes log entries to disk to reduce impact on query latency -- Heap memory usage bounded by a weighted queue with configurable maximum weight sitting in front of logging thread -- If the weighted queue is full producers can be blocked or samples can be dropped -- Disk utilization is bounded by deleting old log segments once a configurable size is reached -- The on disk serialization uses a flexible schema binary format (chronicle-wire) making it easy to skip unrecognized fields, add new ones, and omit old ones. -- Can be enabled and configured via JMX, disabled, and reset (delete on disk data), logging path is configurable via both JMX and YAML -- Introduce new ``fqltool`` in ``/bin`` that currently implements ``Dump`` which can dump in a readable format full query logs as well as follow active full query logs. FQL ``Replay`` and ``Compare`` are also available. - -Cassandra 4.0 has a binary full query log based on Chronicle Queue that can be controlled using ``nodetool enablefullquerylog``, ``disablefullquerylog``, and ``resetfullquerylog``. The log contains all queries invoked, approximate time they were invoked, any parameters necessary to bind wildcard values, and all query options. A readable version of the log can be dumped or tailed using the new ``bin/fqltool`` utility. The full query log is designed to be safe to use in production and limits utilization of heap memory and disk space with limits you can specify when enabling the log. - -Objective -^^^^^^^^^^ -Full Query Logging logs all requests to the CQL interface. The full query logs could be used for debugging, performance benchmarking, testing and auditing CQL queries. The audit logs also include CQL requests but full query logging is dedicated to CQL requests only with features such as FQL Replay and FQL Compare that are not available in audit logging. - -Full Query Logger -^^^^^^^^^^^^^^^^^^ -The Full Query Logger is a logger that logs entire query contents after the query finishes. FQL only logs the queries that successfully complete. The other queries (e.g. timed out, failed) are not to be logged. Queries are logged in one of two modes: single query or batch of queries. The log for an invocation of a batch of queries includes the following attributes: - -:: - - type - The type of the batch - queries - CQL text of the queries - values - Values to bind to as parameters for the queries - queryOptions - Options associated with the query invocation - queryState - Timestamp state associated with the query invocation - batchTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked - -The log for single CQL query includes the following attributes: - -:: - - query - CQL query text - queryOptions - Options associated with the query invocation - queryState - Timestamp state associated with the query invocation - queryTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked - -Full query logging is backed up by ``BinLog``. BinLog is a quick and dirty binary log. Its goal is good enough performance, predictable footprint, simplicity in terms of implementation and configuration and most importantly minimal impact on producers of log records. Performance safety is accomplished by feeding items to the binary log using a weighted queue and dropping records if the binary log falls sufficiently far behind. Simplicity and good enough performance is achieved by using a single log writing thread as well as Chronicle Queue to handle writing the log, making it available for readers, as well as log rolling. - -Weighted queue is a wrapper around any blocking queue that turns it into a blocking weighted queue. The queue will weigh each element being added and removed. Adding to the queue is blocked if adding would violate the weight bound. If an element weighs in at larger than the capacity of the queue then exactly one such element will be allowed into the queue at a time. If the weight of an object changes after it is added it could create issues. Checking weight should be cheap so memorize expensive to compute weights. If weight throws that can also result in leaked permits so it's always a good idea to memorize weight so it doesn't throw. In the interests of not writing unit tests for methods no one uses there is a lot of ``UnsupportedOperationException``. If you need them then add them and add proper unit tests to ``WeightedQueueTest``. "Good" tests. 100% coverage including exception paths and resource leaks. - - -The FQL tracks information about store files: - -- Store files as they are added and their storage impact. Delete them if over storage limit. -- The files in the chronicle queue that have already rolled -- The number of bytes in store files that have already rolled - -FQL logger sequence is as follows: - -1. Start the consumer thread that writes log records. Can only be done once. -2. Offer a record to the log. If the in memory queue is full the record will be dropped and offer will return false. -3. Put a record into the log. If the in memory queue is full the putting thread will be blocked until there is space or it is interrupted. -4. Clean up the buffers on thread exit, finalization will check again once this is no longer reachable ensuring there are no stragglers in the queue. -5. Stop the consumer thread that writes log records. Can be called multiple times. - -Next, we shall demonstrate full query logging with an example. - - -Configuring Full Query Logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Full Query Logger default options are configured on a per node basis in ``cassandra.yaml`` with following configuration property. - -:: - - full_query_logging_options: - -As an example setup create a three node Cassandra 4.0 cluster. The ``nodetool status`` command lists the nodes in the cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool status - Datacenter: us-east-1 - ===================== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- AddressLoad Tokens Owns (effective) Host ID Rack - UN 10.0.1.115 442.42 KiB 25632.6% b64cb32a-b32a-46b4-9eeb-e123fa8fc287 us-east-1b - UN 10.0.3.206 559.52 KiB 25631.9% 74863177-684b-45f4-99f7-d1006625dc9e us-east-1d - UN 10.0.2.238 587.87 KiB 25635.5% 4dcdadd2-41f9-4f34-9892-1f20868b27c7 us-east-1c - - -In subsequent sub-sections we shall discuss enabling and configuring full query logging. - -Setting the FQL Directory -************************* - -A dedicated directory path must be provided to write full query log data to when the full query log is enabled. The directory for FQL must exist, and have permissions set. The full query log will recursively delete the contents of this path at times. It is recommended not to place links in this directory to other sections of the filesystem. The ``full_query_log_dir`` property in ``cassandra.yaml`` is pre-configured. - -:: - - full_query_log_dir: /tmp/cassandrafullquerylog - -The ``log_dir`` option may be used to configure the FQL directory if the ``full_query_log_dir`` is not set. - -:: - - full_query_logging_options: - # log_dir: - -Create the FQL directory if it does not exist and set its permissions. - -:: - - sudo mkdir -p /tmp/cassandrafullquerylog - sudo chmod -R 777 /tmp/cassandrafullquerylog - -Setting the Roll Cycle -********************** - -The ``roll_cycle`` option sets how often to roll FQL log segments so they can potentially be reclaimed. Supported values are ``MINUTELY``, ``HOURLY`` and ``DAILY``. Default setting is ``HOURLY``. - -:: - - roll_cycle: HOURLY - -Setting Other Options -********************* - -The ``block`` option specifies whether the FQL should block if the FQL falls behind or should drop log records. Default value of ``block`` is ``true``. The ``max_queue_weight`` option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. The ``max_log_size`` option sets the maximum size of the rolled files to retain on disk before deleting the oldest file. The ``archive_command`` option sets the archive command to execute on rolled log files. The ``max_archive_retries`` option sets the max number of retries of failed archive commands. - -:: - - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file - being rolled: - # archive_command: - # max_archive_retries: 10 - -The ``max_queue_weight`` must be > 0. Similarly ``max_log_size`` must be > 0. An example full query logging options is as follows. - -:: - - full_query_log_dir: /tmp/cassandrafullquerylog - - # default options for full query logging - these can be overridden from command line when - executing - # nodetool enablefullquerylog - # nodetool enablefullquerylog - #full_query_logging_options: - # log_dir: - roll_cycle: HOURLY - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file - being rolled: - # archive_command: - # max_archive_retries: 10 - -The ``full_query_log_dir`` setting is not within the ``full_query_logging_options`` but still is for full query logging. - -Enabling Full Query Logging -*************************** - -Full Query Logging is enabled on a per-node basis. . The ``nodetool enablefullquerylog`` command is used to enable full query logging. Defaults for the options are configured in ``cassandra.yaml`` and these can be overridden from command line. - -The syntax of the nodetool enablefullquerylog command is as follows: - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] enablefullquerylog - [--archive-command ] [--blocking] - [--max-archive-retries ] - [--max-log-size ] [--max-queue-weight ] - [--path ] [--roll-cycle ] - - OPTIONS - --archive-command - Command that will handle archiving rolled full query log files. - Format is "/path/to/script.sh %path" where %path will be replaced - with the file to archive - - --blocking - If the queue is full whether to block producers or drop samples. - - -h , --host - Node hostname or ip address - - --max-archive-retries - Max number of archive retries. - - --max-log-size - How many bytes of log data to store before dropping segments. Might - not be respected if a log file hasn't rolled so it can be deleted. - - --max-queue-weight - Maximum number of bytes of query data to queue to disk before - blocking or dropping samples. - - -p , --port - Remote jmx agent port number - - --path - Path to store the full query log at. Will have it's contents - recursively deleted. - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - --roll-cycle - How often to roll the log file (MINUTELY, HOURLY, DAILY). - - -u , --username - Remote jmx agent username - -Run the following command on each node in the cluster. - -:: - - nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - -After the full query logging has been enabled run some CQL statements to generate full query logs. - -Running CQL Statements -^^^^^^^^^^^^^^^^^^^^^^^ - -Start CQL interface with ``cqlsh`` command. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cqlsh - Connected to Cassandra Cluster at 127.0.0.1:9042. - [cqlsh 5.0.1 | Cassandra 4.0-SNAPSHOT | CQL spec 3.4.5 | Native protocol v4] - Use HELP for help. - cqlsh> - -Run some CQL statements. Create a keyspace. Create a table and add some data. Query the table. - -:: - - cqlsh> CREATE KEYSPACE AuditLogKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - cqlsh> USE AuditLogKeyspace; - cqlsh:auditlogkeyspace> CREATE TABLE t ( - ...id int, - ...k int, - ...v text, - ...PRIMARY KEY (id) - ... ); - cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 1, 'val1'); - cqlsh:auditlogkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 0 | 1 | val1 - - (1 rows) - cqlsh:auditlogkeyspace> - -Viewing the Full Query Logs -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``fqltool`` is used to view the full query logs. The ``fqltool`` has the following usage syntax. - -:: - - fqltool [] - - The most commonly used fqltool commands are: - compare Compare result files generated by fqltool replay - dump Dump the contents of a full query log - help Display help information - replay Replay full query logs - - See 'fqltool help ' for more information on a specific command. - -The ``fqltool dump`` command is used to dump (list) the contents of a full query log. Run the ``fqltool dump`` command after some CQL statements have been run. - -The full query logs get listed. Truncated output is as follows: - -:: - - [ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ fqltool dump ./ - WARN [main] 2019-08-02 03:07:53,635 Slf4jExceptionHandler.java:42 - Using Pauser.sleepy() as not enough processors, have 2, needs 8+ - Type: single-query - Query start time: 1564708322030 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system.peers - Values: - - Type: single-query - Query start time: 1564708322054 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system.local WHERE key='local' - Values: - - Type: single-query - Query start time: 1564708322109 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.keyspaces - Values: - - Type: single-query - Query start time: 1564708322116 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.tables - Values: - - Type: single-query - Query start time: 1564708322139 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.columns - Values: - - Type: single-query - Query start time: 1564708322142 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.functions - Values: - - Type: single-query - Query start time: 1564708322141 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.aggregates - Values: - - Type: single-query - Query start time: 1564708322143 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.types - Values: - - Type: single-query - Query start time: 1564708322144 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.indexes - Values: - - Type: single-query - Query start time: 1564708322142 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.triggers - Values: - - Type: single-query - Query start time: 1564708322145 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.views - Values: - - Type: single-query - Query start time: 1564708345408 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: CREATE KEYSPACE AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - Values: - - Type: single-query - Query start time: 1564708345675 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708345 - Query: SELECT peer, rpc_address, schema_version FROM system.peers - Values: - - Type: single-query - Query start time: 1564708345676 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708345 - Query: SELECT schema_version FROM system.local WHERE key='local' - Values: - - Type: single-query - Query start time: 1564708346323 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708346 - Query: SELECT * FROM system_schema.keyspaces WHERE keyspace_name = 'auditlogkeyspace' - Values: - - Type: single-query - Query start time: 1564708360873 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: USE AuditLogKeyspace; - Values: - - Type: single-query - Query start time: 1564708360874 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: USE "auditlogkeyspace" - Values: - - Type: single-query - Query start time: 1564708378837 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: CREATE TABLE t ( - id int, - k int, - v text, - PRIMARY KEY (id) - ); - Values: - - Type: single-query - Query start time: 1564708379247 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708379 - Query: SELECT * FROM system_schema.tables WHERE keyspace_name = 'auditlogkeyspace' AND table_name = 't' - Values: - - Type: single-query - Query start time: 1564708379255 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708379 - Query: SELECT * FROM system_schema.views WHERE keyspace_name = 'auditlogkeyspace' AND view_name = 't' - Values: - - Type: single-query - Query start time: 1564708397144 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708397 - Query: INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - Values: - - Type: single-query - Query start time: 1564708397167 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708397 - Query: INSERT INTO t (id, k, v) VALUES (0, 1, 'val1'); - Values: - - Type: single-query - Query start time: 1564708434782 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708434 - Query: SELECT * FROM t; - Values: - - [ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ - - - -Full query logs are generated on each node. Enabling of full query logging on one node and the log files generated on the node are as follows: - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@52.1.243.83 - Last login: Fri Aug 2 00:14:53 2019 from 75.155.255.51 - [ec2-user@ip-10-0-3-206 ~]$ sudo mkdir /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ cd /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 01:24 20190802-01.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 01:23 directory-listing.cq4t - [ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ - -Enabling of full query logging on another node and the log files generated on the node are as follows: - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@3.86.103.229 - Last login: Fri Aug 2 00:13:04 2019 from 75.155.255.51 - [ec2-user@ip-10-0-1-115 ~]$ sudo mkdir /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ cd /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 01:24 20190802-01.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 01:23 directory-listing.cq4t - [ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ - -The ``nodetool resetfullquerylog`` resets the full query logger if it is enabled. Also deletes any generated files in the last used full query log path as well as the one configured in ``cassandra.yaml``. It stops the full query log and cleans files in the configured full query log directory from ``cassandra.yaml`` as well as JMX. - -Full Query Replay -^^^^^^^^^^^^^^^^^ -The ``fqltool`` provides the ``replay`` command (`CASSANDRA-14618 -`_) to replay the full query logs. The FQL replay could be run on a different machine or even a different cluster for testing, debugging and performance benchmarking. - -The main objectives of ``fqltool replay`` are: - -- To be able to compare different runs of production traffic against different versions/configurations of Cassandra. -- Take FQL logs from several machines and replay them in "order" by the timestamps recorded. -- Record the results from each run to be able to compare different runs (against different clusters/versions/etc). -- If fqltool replay is run against 2 or more clusters, the results could be compared. - -The FQL replay could also be used on the same node on which the full query log are generated to recreate a dropped database object. - - The syntax of ``fqltool replay`` is as follows: - -:: - - fqltool replay [--keyspace ] [--results ] - [--store-queries ] --target ... [--] - [...] - - OPTIONS - --keyspace - Only replay queries against this keyspace and queries without - keyspace set. - - --results - Where to store the results of the queries, this should be a - directory. Leave this option out to avoid storing results. - - --store-queries - Path to store the queries executed. Stores queries in the same order - as the result sets are in the result files. Requires --results - - --target - Hosts to replay the logs to, can be repeated to replay to more - hosts. - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [...] - Paths containing the full query logs to replay. - -As an example of using ``fqltool replay``, drop a keyspace. - -:: - - cqlsh:auditlogkeyspace> DROP KEYSPACE AuditLogKeyspace; - -Subsequently run ``fqltool replay``. The directory to store results of queries and the directory to store the queries run are specified and these directories must be created and permissions set before running ``fqltool replay``. The ``--results`` and ``--store-queries`` directories are optional but if ``--store-queries`` is to be set the ``--results`` must also be set. - -:: - - [ec2-user@ip-10-0-2-238 cassandra]$ fqltool replay --keyspace AuditLogKeyspace --results - /cassandra/fql/logs/results/replay --store-queries /cassandra/fql/logs/queries/replay -- - target 3.91.56.164 -- /tmp/cassandrafullquerylog - -Describe the keyspaces after running ``fqltool replay`` and the keyspace that was dropped gets listed again. - -:: - - cqlsh:auditlogkeyspace> DESC KEYSPACES; - - system_schema system system_distributed system_virtual_schema - system_auth auditlogkeyspace system_traces system_views - - cqlsh:auditlogkeyspace> - -Full Query Compare -^^^^^^^^^^^^^^^^^^ -The ``fqltool compare`` command (`CASSANDRA-14619 -`_) is used to compare result files generated by ``fqltool replay``. The ``fqltool compare`` command that can take the recorded runs from ``fqltool replay`` and compares them, it should output any differences and potentially all queries against the mismatching partition up until the mismatch. - -The ``fqltool compare`` could be used for comparing result files generated by different versions of Cassandra or different Cassandra configurations as an example. The command usage is as follows: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ fqltool help compare - NAME - fqltool compare - Compare result files generated by fqltool replay - - SYNOPSIS - fqltool compare --queries [--] [...] - - OPTIONS - --queries - Directory to read the queries from. It is produced by the fqltool - replay --store-queries option. - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [...] - Directories containing result files to compare. - -The ``fqltool compare`` stores each row as a separate chronicle document to be able to avoid reading up the entire result set in memory when comparing document formats: - -To mark the start of a new result set: - -:: - - ------------------- - version: int16 - type: column_definitions - column_count: int32; - column_definition: text, text - column_definition: text, text - .... - -------------------- - - -To mark a failed query set: - -:: - - --------------------- - version: int16 - type: query_failed - message: text - --------------------- - -To mark a row set: - -:: - - -------------------- - version: int16 - type: row - row_column_count: int32 - column: bytes - --------------------- - -To mark the end of a result set: - -:: - - ------------------- - version: int16 - type: end_resultset - ------------------- - - -Performance Overhead of FQL -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In performance testing FQL appears to have little or no overhead in ``WRITE`` only workloads, and a minor overhead in ``MIXED`` workload. diff --git a/src/doc/4.0-alpha4/_sources/new/index.rst.txt b/src/doc/4.0-alpha4/_sources/new/index.rst.txt deleted file mode 100644 index 5ef867ba1..000000000 --- a/src/doc/4.0-alpha4/_sources/new/index.rst.txt +++ /dev/null @@ -1,32 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -New Features in Apache Cassandra 4.0 -==================================== - -This section covers the new features in Apache Cassandra 4.0. - -.. toctree:: - :maxdepth: 2 - - java11 - virtualtables - auditlogging - fqllogging - messaging - streaming - transientreplication - diff --git a/src/doc/4.0-alpha4/_sources/new/java11.rst.txt b/src/doc/4.0-alpha4/_sources/new/java11.rst.txt deleted file mode 100644 index df906d409..000000000 --- a/src/doc/4.0-alpha4/_sources/new/java11.rst.txt +++ /dev/null @@ -1,274 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Support for Java 11 -------------------- - -In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions. - -One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (`CASSANDRA-9608 -`_). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0. - -**Note**: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use. - -Support Matrix -^^^^^^^^^^^^^^ - -The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis. - -Table 1 : Support Matrix for Java - -+---------------+--------------+-----------------+ -| | Java 8 (Run) | Java 11 (Run) | -+---------------+--------------+-----------------+ -| Java 8 (Build)|Supported |Supported | -+---------------+--------------+-----------------+ -| Java 11(Build)| Not Supported|Supported | -+---------------+--------------+-----------------+ - -Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0. - -Using Java 8 to Build -^^^^^^^^^^^^^^^^^^^^^ - -To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows: - -:: - -$ sudo yum install java-1.8.0-openjdk-devel - -Set ``JAVA_HOME`` and ``JRE_HOME`` environment variables in the shell bash script. First, open the bash script: - -:: - -$ sudo vi ~/.bashrc - -Set the environment variables including the ``PATH``. - -:: - - $ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies. - -:: - - $ git clone https://github.com/apache/cassandra.git - -If Cassandra is already running stop Cassandra with the following command. - -:: - - [ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon - -Build the source code from the ``cassandra`` directory, which has the ``build.xml`` build script. The Apache Ant uses the Java version set in the ``JAVA_HOME`` environment variable. - -:: - - $ cd ~/cassandra - $ ant - -Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for ``CASSANDRA_HOME`` in the bash script. Also add the ``CASSANDRA_HOME/bin`` to the ``PATH`` variable. - -:: - - $ export CASSANDRA_HOME=~/cassandra - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin - -To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the ``CASSANDRA_HOME/bin`` directory, which is in the ``PATH`` env variable. - -:: - - $ cassandra - -The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet: - -:: - - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3- - 146.ec2.internal:7000:7001 - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK - 64-Bit Server VM/11.0.3 - INFO [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size: - 1004.000MiB/1004.000MiB - -The following output indicates a single node Cassandra 4.0 cluster has started. - -:: - - INFO [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on - address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl) - ... - ... - INFO [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any - peers but continuing anyway since node is in its own seed list - INFO [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state - INFO [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip - INFO [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled, - when pool is exhausted (max is 251.000MiB) it will allocate on heap - INFO [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto - bootstrap because it is configured to be a seed node. - ... - ... - INFO [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring - INFO [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state - jump to NORMAL - -Using Java 11 to Build -^^^^^^^^^^^^^^^^^^^^^^ -If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command. - -:: - - $ yum install java-11-openjdk-devel - -Set the environment variables in the bash script for Java 11. The first command is to open the bash script. - -:: - - $ sudo vi ~/.bashrc - $ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -To build source code with Java 11 one of the following two options must be used. - - 1. Include Apache Ant command-line option ``-Duse.jdk=11`` as follows: - :: - - $ ant -Duse.jdk=11 - - 2. Set environment variable ``CASSANDRA_USE_JDK11`` to ``true``: - :: - - $ export CASSANDRA_USE_JDK11=true - -As an example, set the environment variable ``CASSANDRA_USE_JDK11`` to ``true``. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - -Or, set the command-line option. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true - -The build output should include the following. - -:: - - _build_java: - [echo] Compiling for Java 11 - ... - ... - build: - - _main-jar: - [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar - ... - ... - _build-test: - [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes - [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes - ... - ... - jar: - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF - [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar - - BUILD SUCCESSFUL - Total time: 1 minute 3 seconds - [ec2-user@ip-172-30-3-146 cassandra]$ - -Common Issues -^^^^^^^^^^^^^^ -One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must - be set when building from java 11 - Total time: 1 second - [ec2-user@ip-172-30-3-146 cassandra]$ - -The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - [ec2-user@ip-172-30-3-146 ~]$ cassandra - ... - ... - Error: A JNI error has occurred, please check your installation and try again - Exception in thread "main" java.lang.UnsupportedClassVersionError: - org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of - the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes - class file versions up to 52.0 - at java.lang.ClassLoader.defineClass1(Native Method) - at java.lang.ClassLoader.defineClass(ClassLoader.java:763) - at ... - ... - -The ``CASSANDRA_USE_JDK11`` variable or the command-line option ``-Duse.jdk11`` cannot be used to build with Java 8. To demonstrate set ``JAVA_HOME`` to version 8. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - -Set the ``CASSANDRA_USE_JDK11=true`` or command-line option ``-Duse.jdk11=true``. Subsequently, run Apache Ant to start the build. The build fails with error message listed. - -:: - - [ec2-user@ip-172-30-3-146 ~]$ cd - cassandra - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot - be set when building from java 8 - - Total time: 0 seconds - diff --git a/src/doc/4.0-alpha4/_sources/new/messaging.rst.txt b/src/doc/4.0-alpha4/_sources/new/messaging.rst.txt deleted file mode 100644 index 755c9d106..000000000 --- a/src/doc/4.0-alpha4/_sources/new/messaging.rst.txt +++ /dev/null @@ -1,257 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Improved Internode Messaging ------------------------------- - - -Apache Cassandra 4.0 has added several new improvements to internode messaging. - -Optimized Internode Messaging Protocol -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The internode messaging protocol has been optimized (`CASSANDRA-14485 -`_). Previously the ``IPAddressAndPort`` of the sender was included with each message that was sent even though the ``IPAddressAndPort`` had already been sent once when the initial connection/session was established. In Cassandra 4.0 ``IPAddressAndPort`` has been removed from every separate message sent and only sent when connection/session is initiated. - -Another improvement is that at several instances (listed) a fixed 4-byte integer value has been replaced with ``vint`` as a ``vint`` is almost always less than 1 byte: - -- The ``paramSize`` (the number of parameters in the header) -- Each individual parameter value -- The ``payloadSize`` - - -NIO Messaging -^^^^^^^^^^^^^^^ -In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to non-blocking I/O (NIO) via Netty (`CASSANDRA-8457 -`_). - -As serialization format, each message contains a header with several fixed fields, an optional key-value parameters section, and then the message payload itself. Note: the IP address in the header may be either IPv4 (4 bytes) or IPv6 (16 bytes). - - The diagram below shows the IPv4 address for brevity. - -:: - - 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6 - 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | PROTOCOL MAGIC | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Message ID | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Timestamp | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Addr len | IP Address (IPv4) / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Verb / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Parameters size / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Parameter data / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Payload size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | / - / Payload / - / | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -An individual parameter has a String key and a byte array value. The key is serialized with its length, encoded as two bytes, followed by the UTF-8 byte encoding of the string. The body is serialized with its length, encoded as four bytes, followed by the bytes of the value. - -Resource limits on Queued Messages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -System stability is improved by enforcing strict resource limits (`CASSANDRA-15066 -`_) on the number of outbound messages that are queued, measured by the ``serializedSize`` of the message. There are three separate limits imposed simultaneously to ensure that progress is always made without any reasonable combination of failures impacting a node’s stability. - -1. Global, per-endpoint and per-connection limits are imposed on messages queued for delivery to other nodes and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire size of the message being sent or received. -2. The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. Each node-pair has three links: urgent, small and large. So any given node may have a maximum of ``N*3 * (internode_application_send_queue_capacity_in_bytes + internode_application_receive_queue_capacity_in_bytes)`` messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens nodes should need to communicate with significant bandwidth. -3. The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, on all links to or from a single node in the cluster. The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, on all links to or from any node in the cluster. The following configuration settings have been added to ``cassandra.yaml`` for resource limits on queued messages. - -:: - - internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB - internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB - internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB - internode_application_receive_queue_capacity_in_bytes: 4194304 #4MiB - internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB - internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB - -Virtual Tables for Messaging Metrics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Metrics is improved by keeping metrics using virtual tables for inter-node inbound and outbound messaging (`CASSANDRA-15066 -`_). For inbound messaging a virtual table (``internode_inbound``) has been added to keep metrics for: - -- Bytes and count of messages that could not be serialized or flushed due to an error -- Bytes and count of messages scheduled -- Bytes and count of messages successfully processed -- Bytes and count of messages successfully received -- Nanos and count of messages throttled -- Bytes and count of messages expired -- Corrupt frames recovered and unrecovered - -A separate virtual table (``internode_outbound``) has been added for outbound inter-node messaging. The outbound virtual table keeps metrics for: - -- Bytes and count of messages pending -- Bytes and count of messages sent -- Bytes and count of messages expired -- Bytes and count of messages that could not be sent due to an error -- Bytes and count of messages overloaded -- Active Connection Count -- Connection Attempts -- Successful Connection Attempts - -Hint Messaging -^^^^^^^^^^^^^^ - -A specialized version of hint message that takes an already encoded in a ``ByteBuffer`` hint and sends it verbatim has been added. It is an optimization for when dispatching a hint file of the current messaging version to a node of the same messaging version, which is the most common case. It saves on extra ``ByteBuffer`` allocations one redundant hint deserialization-serialization cycle. - -Internode Application Timeout -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A configuration setting has been added to ``cassandra.yaml`` for the maximum continuous period a connection may be unwritable in application space. - -:: - -# internode_application_timeout_in_ms = 30000 - -Some other new features include logging of message size to trace message for tracing a query. - -Paxos prepare and propose stage for local requests optimized -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In pre-4.0 Paxos prepare and propose messages always go through entire ``MessagingService`` stack in Cassandra even if request is to be served locally, we can enhance and make local requests severed w/o involving ``MessagingService``. Similar things are done elsewhere in Cassandra which skips ``MessagingService`` stage for local requests. - -This is what it looks like in pre 4.0 if we have tracing on and run a light-weight transaction: - -:: - - Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11 - 21:55:18.971000 | A.B.C.D | 15045 - … REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] | - 2017-09-11 21:55:18.976000 | A.B.C.D | 20270 - … Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 | - A.B.C.D | 20372 - -Same thing applies for Propose stage as well. - -In version 4.0 Paxos prepare and propose stage for local requests are optimized (`CASSANDRA-13862 -`_). - -Quality Assurance -^^^^^^^^^^^^^^^^^ - -Several other quality assurance improvements have been made in version 4.0 (`CASSANDRA-15066 -`_). - -Framing -******* -Version 4.0 introduces framing to all internode messages, i.e. the grouping of messages into a single logical payload with headers and trailers; these frames are guaranteed to either contain at most one message, that is split into its own unique sequence of frames (for large messages), or that a frame contains only complete messages. - -Corruption prevention -********************* -Previously, intra-datacenter internode messages would be unprotected from corruption by default, as only LZ4 provided any integrity checks. All messages to post 4.0 nodes are written to explicit frames, which may be: - -- LZ4 encoded -- CRC protected - -The Unprotected option is still available. - -Resilience -********** -For resilience, all frames are written with a separate CRC protected header, of 8 and 6 bytes respectively. If corruption occurs in this header, the connection must be reset, as before. If corruption occurs anywhere outside of the header, the corrupt frame will be skipped, leaving the connection intact and avoiding the loss of any messages unnecessarily. - -Previously, any issue at any point in the stream would result in the connection being reset, with the loss of any in-flight messages. - -Efficiency -********** -The overall memory usage, and number of byte shuffles, on both inbound and outbound messages is reduced. - -Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB), that is filled before any compressed frame can be produced. Our frame encoders avoid this redundant copy, as well as freeing 192KiB per endpoint. - -Inbound, frame decoders guarantee only to copy the number of bytes necessary to parse a frame, and to never store more bytes than necessary. This improvement applies twice to LZ4 connections, improving both the message decode and the LZ4 frame decode. - -Inbound Path -************ -Version 4.0 introduces several improvements to the inbound path. - -An appropriate message handler is used based on whether large or small messages are expected on a particular connection as set in a flag. ``NonblockingBufferHandler``, running on event loop, is used for small messages, and ``BlockingBufferHandler``, running off event loop, for large messages. The single implementation of ``InboundMessageHandler`` handles messages of any size effectively by deriving size of the incoming message from the byte stream. In addition to deriving size of the message from the stream, incoming message expiration time is proactively read, before attempting to deserialize the entire message. If it’s expired at the time when a message is encountered the message is just skipped in the byte stream altogether. -And if a message fails to be deserialized while still on the receiving side - say, because of table id or column being unknown - bytes are skipped, without dropping the entire connection and losing all the buffered messages. An immediately reply back is sent to the coordinator node with the failure reason, rather than waiting for the coordinator callback to expire. This logic is extended to a corrupted frame; a corrupted frame is safely skipped over without dropping the connection. - -Inbound path imposes strict limits on memory utilization. Specifically, the memory occupied by all parsed, but unprocessed messages is bound - on per-connection, per-endpoint, and global basis. Once a connection exceeds its local unprocessed capacity and cannot borrow any permits from per-endpoint and global reserve, it simply stops processing further messages, providing natural backpressure - until sufficient capacity is regained. - -Outbound Connections -******************** - -Opening a connection -++++++++++++++++++++ -A consistent approach is adopted for all kinds of failure to connect, including: refused by endpoint, incompatible versions, or unexpected exceptions; - -- Retry forever, until either success or no messages waiting to deliver. -- Wait incrementally longer periods before reconnecting, up to a maximum of 1s. -- While failing to connect, no reserve queue limits are acquired. - -Closing a connection -++++++++++++++++++++ -- Correctly drains outbound messages that are waiting to be delivered (unless disconnected and fail to reconnect). -- Messages written to a closing connection are either delivered or rejected, with a new connection being opened if the old is irrevocably closed. -- Unused connections are pruned eventually. - -Reconnecting -++++++++++++ - -We sometimes need to reconnect a perfectly valid connection, e.g. if the preferred IP address changes. We ensure that the underlying connection has no in-progress operations before closing it and reconnecting. - -Message Failure -++++++++++++++++ -Propagates to callbacks instantly, better preventing overload by reclaiming committed memory. - -Expiry -~~~~~~~~ -- No longer experiences head-of-line blocking (e.g. undroppable message preventing all droppable messages from being expired). -- While overloaded, expiry is attempted eagerly on enqueuing threads. -- While disconnected we schedule regular pruning, to handle the case where messages are no longer being sent, but we have a large backlog to expire. - -Overload -~~~~~~~~~ -- Tracked by bytes queued, as opposed to number of messages. - -Serialization Errors -~~~~~~~~~~~~~~~~~~~~~ -- Do not result in the connection being invalidated; the message is simply completed with failure, and then erased from the frame. -- Includes detected mismatch between calculated serialization size to actual. - -Failures to flush to network, perhaps because the connection has been reset are not currently notified to callback handlers, as the necessary information has been discarded, though it would be possible to do so in future if we decide it is worth our while. - -QoS -+++++ -"Gossip" connection has been replaced with a general purpose "Urgent" connection, for any small messages impacting system stability. - -Metrics -+++++++ -We track, and expose via Virtual Table and JMX, the number of messages and bytes that: we could not serialize or flush due to an error, we dropped due to overload or timeout, are pending, and have successfully sent. - -Added a Message size limit -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra pre-4.0 doesn't protect the server from allocating huge buffers for the inter-node Message objects. Adding a message size limit would be good to deal with issues such as a malfunctioning cluster participant. Version 4.0 introduced max message size config param, akin to max mutation size - set to endpoint reserve capacity by default. - -Recover from unknown table when deserializing internode messages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -As discussed in (`CASSANDRA-9289 -`_) it would be nice to gracefully recover from seeing an unknown table in a message from another node. Pre-4.0, we close the connection and reconnect, which can cause other concurrent queries to fail. -Version 4.0 fixes the issue by wrapping message in-stream with -``TrackedDataInputPlus``, catching -``UnknownCFException``, and skipping the remaining bytes in this message. TCP won't be closed and it will remain connected for other messages. diff --git a/src/doc/4.0-alpha4/_sources/new/streaming.rst.txt b/src/doc/4.0-alpha4/_sources/new/streaming.rst.txt deleted file mode 100644 index 1807eb402..000000000 --- a/src/doc/4.0-alpha4/_sources/new/streaming.rst.txt +++ /dev/null @@ -1,162 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Improved Streaming ---------------------- - -Apache Cassandra 4.0 has made several improvements to streaming. Streaming is the process used by nodes of a cluster to exchange data in the form of SSTables. Streaming of SSTables is performed for several operations, such as: - -- SSTable Repair -- Host Replacement -- Range movements -- Bootstrapping -- Rebuild -- Cluster expansion - -Streaming based on Netty -^^^^^^^^^^^^^^^^^^^^^^^^ - -Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO) with Netty (`CASSANDRA-12229 -`_). It replaces the single-threaded (or sequential), synchronous, blocking model of streaming messages and transfer of files. Netty supports non-blocking, asynchronous, multi-threaded streaming with which multiple connections are opened simultaneously. Non-blocking implies that threads are not blocked as they don’t wait for a response for a sent request. A response could be returned in a different thread. With asynchronous, connections and threads are decoupled and do not have a 1:1 relation. Several more connections than threads may be opened. - -Zero Copy Streaming -^^^^^^^^^^^^^^^^^^^^ - -Pre-4.0, during streaming Cassandra reifies the SSTables into objects. This creates unnecessary garbage and slows down the whole streaming process as some SSTables can be transferred as a whole file rather than individual partitions. Cassandra 4.0 has added support for streaming entire SSTables when possible (`CASSANDRA-14556 -`_) for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use ZeroCopy for eligible SSTables significantly speeding up transfers and increasing throughput. A zero-copy path avoids bringing data into user-space on both sending and receiving side. Any streaming related operations will notice corresponding improvement. Zero copy streaming is hardware bound; only limited by the hardware limitations (Network and Disk IO ). - -High Availability -***************** -In benchmark tests Zero Copy Streaming is 5x faster than partitions based streaming. Faster streaming provides the benefit of improved availability. A cluster’s recovery mainly depends on the streaming speed, Cassandra clusters with failed nodes will be able to recover much more quickly (5x faster). If a node fails, SSTables need to be streamed to a replacement node. During the replacement operation, the new Cassandra node streams SSTables from the neighboring nodes that hold copies of the data belonging to this new node’s token range. Depending on the amount of data stored, this process can require substantial network bandwidth, taking some time to complete. The longer these range movement operations take, the more the cluster availability is lost. Failure of multiple nodes would reduce high availability greatly. The faster the new node completes streaming its data, the faster it can serve traffic, increasing the availability of the cluster. - -Enabling Zero Copy Streaming -***************************** -Zero copy streaming is enabled by setting the following setting in ``cassandra.yaml``. - -:: - - stream_entire_sstables: true - -By default zero copy streaming is enabled. - -SSTables Eligible for Zero Copy Streaming -***************************************** -Zero copy streaming is used if all partitions within the SSTable need to be transmitted. This is common when using ``LeveledCompactionStrategy`` or when partitioning SSTables by token range has been enabled. All partition keys in the SSTables are iterated over to determine the eligibility for Zero Copy streaming. - -Benefits of Zero Copy Streaming -******************************** -When enabled, it permits Cassandra to zero-copy stream entire eligible SSTables between nodes, including every component. This speeds up the network transfer significantly subject to throttling specified by ``stream_throughput_outbound_megabits_per_sec``. - -Enabling this will reduce the GC pressure on sending and receiving node. While this feature tries to keep the disks balanced, it cannot guarantee it. This feature will be automatically disabled if internode encryption is enabled. Currently this can be used with Leveled Compaction. - -Configuring for Zero Copy Streaming -************************************ -Throttling would reduce the streaming speed. The ``stream_throughput_outbound_megabits_per_sec`` throttles all outbound streaming file transfers on a node to the given total throughput in Mbps. When unset, the default is 200 Mbps or 25 MB/s. - -:: - - stream_throughput_outbound_megabits_per_sec: 200 - -To run any Zero Copy streaming benchmark the ``stream_throughput_outbound_megabits_per_sec`` must be set to a really high value otherwise, throttling will be significant and the benchmark results will not be meaningful. - -The ``inter_dc_stream_throughput_outbound_megabits_per_sec`` throttles all streaming file transfer between the datacenters, this setting allows users to throttle inter dc stream throughput in addition to throttling all network stream traffic as configured with ``stream_throughput_outbound_megabits_per_sec``. When unset, the default is 200 Mbps or 25 MB/s. - -:: - - inter_dc_stream_throughput_outbound_megabits_per_sec: 200 - -SSTable Components Streamed with Zero Copy Streaming -***************************************************** -Zero Copy Streaming streams entire SSTables. SSTables are made up of multiple components in separate files. SSTable components streamed are listed in Table 1. - -Table 1. SSTable Components - -+------------------+---------------------------------------------------+ -|SSTable Component | Description | -+------------------+---------------------------------------------------+ -| Data.db |The base data for an SSTable: the remaining | -| |components can be regenerated based on the data | -| |component. | -+------------------+---------------------------------------------------+ -| Index.db |Index of the row keys with pointers to their | -| |positions in the data file. | -+------------------+---------------------------------------------------+ -| Filter.db |Serialized bloom filter for the row keys in the | -| |SSTable. | -+------------------+---------------------------------------------------+ -|CompressionInfo.db|File to hold information about uncompressed | -| |data length, chunk offsets etc. | -+------------------+---------------------------------------------------+ -| Statistics.db |Statistical metadata about the content of the | -| |SSTable. | -+------------------+---------------------------------------------------+ -| Digest.crc32 |Holds CRC32 checksum of the data file | -| |size_bytes. | -+------------------+---------------------------------------------------+ -| CRC.db |Holds the CRC32 for chunks in an uncompressed file.| -+------------------+---------------------------------------------------+ -| Summary.db |Holds SSTable Index Summary | -| |(sampling of Index component) | -+------------------+---------------------------------------------------+ -| TOC.txt |Table of contents, stores the list of all | -| |components for the SSTable. | -+------------------+---------------------------------------------------+ - -Custom component, used by e.g. custom compaction strategy may also be included. - -Repair Streaming Preview -^^^^^^^^^^^^^^^^^^^^^^^^ - -Repair with ``nodetool repair`` involves streaming of repaired SSTables and a repair preview has been added to provide an estimate of the amount of repair streaming that would need to be performed. Repair preview (`CASSANDRA-13257 -`_) is invoke with ``nodetool repair --preview`` using option: - -:: - --prv, --preview - -It determines ranges and amount of data to be streamed, but doesn't actually perform repair. - -Parallelizing of Streaming of Keyspaces -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The streaming of the different keyspaces for bootstrap and rebuild has been parallelized in Cassandra 4.0 (`CASSANDRA-4663 -`_). - -Unique nodes for Streaming in Multi-DC deployment -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Range Streamer picks unique nodes to stream data from when number of replicas in each DC is three or more (`CASSANDRA-4650 -`_). What the optimization does is to even out the streaming load across the cluster. Without the optimization, some node can be picked up to stream more data than others. This patch allows to select dedicated node to stream only one range. - -This will increase the performance of bootstrapping a node and will also put less pressure on nodes serving the data. This does not affect if N < 3 in each DC as then it streams data from only 2 nodes. - -Stream Operation Types -^^^^^^^^^^^^^ - -It is important to know the type or purpose of a certain stream. Version 4.0 (`CASSANDRA-13064 -`_) adds an ``enum`` to distinguish between the different types of streams. Stream types are available both in a stream request and a stream task. The different stream types are: - -- Restore replica count -- Unbootstrap -- Relocation -- Bootstrap -- Rebuild -- Bulk Load -- Repair - -Disallow Decommission when number of Replicas will drop below configured RF -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`CASSANDRA-12510 -`_ guards against decommission that will drop # of replicas below configured replication factor (RF), and adds the ``--force`` option that allows decommission to continue if intentional; force decommission of this node even when it reduces the number of replicas to below configured RF. diff --git a/src/doc/4.0-alpha4/_sources/new/transientreplication.rst.txt b/src/doc/4.0-alpha4/_sources/new/transientreplication.rst.txt deleted file mode 100644 index 438f43797..000000000 --- a/src/doc/4.0-alpha4/_sources/new/transientreplication.rst.txt +++ /dev/null @@ -1,155 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Transient Replication ---------------------- - -**Note**: - -Transient Replication (`CASSANDRA-14404 -`_) is an experimental feature designed for expert Apache Cassandra users who are able to validate every aspect of the database for their application and deployment. -That means being able to check that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, configuration, operational practices, and availability requirements. -Apache Cassandra 4.0 has the initial implementation of transient replication. Future releases of Cassandra will make this feature suitable for a wider audience. -It is anticipated that a future version will support monotonic reads with transient replication as well as LWT, logged batches, and counters. Being experimental, Transient replication is **not** recommended for production use. - -Objective -^^^^^^^^^ - -The objective of transient replication is to decouple storage requirements from data redundancy (or consensus group size) using incremental repair, in order to reduce storage overhead. -Certain nodes act as full replicas (storing all the data for a given token range), and some nodes act as transient replicas, storing only unrepaired data for the same token ranges. - -The optimization that is made possible with transient replication is called "Cheap quorums", which implies that data redundancy is increased without corresponding increase in storage usage. - -Transient replication is useful when sufficient full replicas are unavailable to receive and store all the data. -Transient replication allows you to configure a subset of replicas to only replicate data that hasn't been incrementally repaired. -As an optimization, we can avoid writing data to a transient replica if we have successfully written data to the full replicas. - -After incremental repair, transient data stored on transient replicas can be discarded. - -Enabling Transient Replication -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Transient replication is not enabled by default. Transient replication must be enabled on each node in a cluster separately by setting the following configuration property in ``cassandra.yaml``. - -:: - - enable_transient_replication: true - -Transient replication may be configured with both ``SimpleStrategy`` and ``NetworkTopologyStrategy``. Transient replication is configured by setting replication factor as ``/``. - -As an example, create a keyspace with replication factor (RF) 3. - -:: - - CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy', - 'replication_factor' : 4/1}; - - -As another example, ``some_keysopace keyspace`` will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient: - -:: - - CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy', - 'DC1' : '3/1'', 'DC2' : '5/2'}; - -Transiently replicated keyspaces only support tables with ``read_repair`` set to ``NONE``. - -Important Restrictions: - -- RF cannot be altered while some endpoints are not in a normal state (no range movements). -- You can't add full replicas if there are any transient replicas. You must first remove all transient replicas, then change the # of full replicas, then add back the transient replicas. -- You can only safely increase number of transients one at a time with incremental repair run in between each time. - - -Additionally, transient replication cannot be used for: - -- Monotonic Reads -- Lightweight Transactions (LWTs) -- Logged Batches -- Counters -- Keyspaces using materialized views -- Secondary indexes (2i) - -Cheap Quorums -^^^^^^^^^^^^^ - -Cheap quorums are a set of optimizations on the write path to avoid writing to transient replicas unless sufficient full replicas are not available to satisfy the requested consistency level. -Hints are never written for transient replicas. Optimizations on the read path prefer reading from transient replicas. -When writing at quorum to a table configured to use transient replication the quorum will always prefer available full -replicas over transient replicas so that transient replicas don't have to process writes. Tail latency is reduced by -rapid write protection (similar to rapid read protection) when full replicas are slow or unavailable by sending writes -to transient replicas. Transient replicas can serve reads faster as they don't have to do anything beyond bloom filter -checks if they have no data. With vnodes and large cluster sizes they will not have a large quantity of data -even for failure of one or more full replicas where transient replicas start to serve a steady amount of write traffic -for some of their transiently replicated ranges. - -Speculative Write Option -^^^^^^^^^^^^^^^^^^^^^^^^ -The ``CREATE TABLE`` adds an option ``speculative_write_threshold`` for use with transient replicas. The option is of type ``simple`` with default value as ``99PERCENTILE``. When replicas are slow or unresponsive ``speculative_write_threshold`` specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas. - - -Pending Ranges and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Pending ranges refers to the movement of token ranges between transient replicas. When a transient range is moved, there -will be a period of time where both transient replicas would need to receive any write intended for the logical -transient replica so that after the movement takes effect a read quorum is able to return a response. Nodes are *not* -temporarily transient replicas during expansion. They stream data like a full replica for the transient range before they -can serve reads. A pending state is incurred similar to how there is a pending state for full replicas. Transient replicas -also always receive writes when they are pending. Pending transient ranges are sent a bit more data and reading from -them is avoided. - - -Read Repair and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Read repair never attempts to repair a transient replica. Reads will always include at least one full replica. -They should also prefer transient replicas where possible. Range scans ensure the entire scanned range performs -replica selection that satisfies the requirement that every range scanned includes one full replica. During incremental -& validation repair handling, at transient replicas anti-compaction does not output any data for transient ranges as the -data will be dropped after repair, and transient replicas never have data streamed to them. - - -Transitioning between Full Replicas and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The additional state transitions that transient replication introduces requires streaming and ``nodetool cleanup`` to -behave differently. When data is streamed it is ensured that it is streamed from a full replica and not a transient replica. - -Transitioning from not replicated to transiently replicated means that a node must stay pending until the next incremental -repair completes at which point the data for that range is known to be available at full replicas. - -Transitioning from transiently replicated to fully replicated requires streaming from a full replica and is identical -to how data is streamed when transitioning from not replicated to replicated. The transition is managed so the transient -replica is not read from as a full replica until streaming completes. It can be used immediately for a write quorum. - -Transitioning from fully replicated to transiently replicated requires cleanup to remove repaired data from the transiently -replicated range to reclaim space. It can be used immediately for a write quorum. - -Transitioning from transiently replicated to not replicated requires cleanup to be run to remove the formerly transiently replicated data. - -When transient replication is in use ring changes are supported including add/remove node, change RF, add/remove DC. - - -Transient Replication supports EACH_QUORUM -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -(`CASSANDRA-14727 -`_) adds support for Transient Replication support for ``EACH_QUORUM``. Per (`CASSANDRA-14768 -`_), we ensure we write to at least a ``QUORUM`` of nodes in every DC, -regardless of how many responses we need to wait for and our requested consistency level. This is to minimally surprise -users with transient replication; with normal writes, we soft-ensure that we reach ``QUORUM`` in all DCs we are able to, -by writing to every node; even if we don't wait for ACK, we have in both cases sent sufficient messages. diff --git a/src/doc/4.0-alpha4/_sources/new/virtualtables.rst.txt b/src/doc/4.0-alpha4/_sources/new/virtualtables.rst.txt deleted file mode 100644 index 1a39dc678..000000000 --- a/src/doc/4.0-alpha4/_sources/new/virtualtables.rst.txt +++ /dev/null @@ -1,342 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Virtual Tables --------------- - -Apache Cassandra 4.0 implements virtual tables (`CASSANDRA-7622 -`_). - -Definition -^^^^^^^^^^ - -A virtual table is a table that is backed by an API instead of data explicitly managed and stored as SSTables. Apache Cassandra 4.0 implements a virtual keyspace interface for virtual tables. Virtual tables are specific to each node. - -Objective -^^^^^^^^^ - -A virtual table could have several uses including: - -- Expose metrics through CQL -- Expose YAML configuration information - -How are Virtual Tables different from regular tables? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables and virtual keyspaces are quite different from regular tables and keyspaces respectively such as: - -- Virtual tables are read-only, but it is likely to change -- Virtual tables are not replicated -- Virtual tables are local only and non distributed -- Virtual tables have no associated SSTables -- Consistency level of the queries sent virtual tables are ignored -- Virtual tables are managed by Cassandra and a user cannot run DDL to create new virtual tables or DML to modify existing virtual tables -- Virtual tables are created in special keyspaces and not just any keyspace -- All existing virtual tables use ``LocalPartitioner``. Since a virtual table is not replicated the partitioner sorts in order of partition keys instead of by their hash. -- Making advanced queries with ``ALLOW FILTERING`` and aggregation functions may be used with virtual tables even though in normal tables we dont recommend it - -Virtual Keyspaces -^^^^^^^^^^^^^^^^^ - -Apache Cassandra 4.0 has added two new keyspaces for virtual tables: ``system_virtual_schema`` and ``system_views``. Run the following command to list the keyspaces: - -:: - - cqlsh> DESC KEYSPACES; - system_schema system system_distributed system_virtual_schema - system_auth system_traces system_views - -The ``system_virtual_schema keyspace`` contains schema information on virtual tables. The ``system_views`` keyspace contains the actual virtual tables. - -Virtual Table Limitations -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables and virtual keyspaces have some limitations initially though some of these could change such as: - -- Cannot alter or drop virtual keyspaces or tables -- Cannot truncate virtual tables -- Expiring columns are not supported by virtual tables -- Conditional updates are not supported by virtual tables -- Cannot create tables in virtual keyspaces -- Cannot perform any operations against virtual keyspace -- Secondary indexes are not supported on virtual tables -- Cannot create functions in virtual keyspaces -- Cannot create types in virtual keyspaces -- Materialized views are not supported on virtual tables -- Virtual tables don't support ``DELETE`` statements -- Cannot ``CREATE TRIGGER`` against a virtual table -- Conditional ``BATCH`` statements cannot include mutations for virtual tables -- Cannot include a virtual table statement in a logged batch -- Mutations for virtual and regular tables cannot exist in the same batch -- Conditional ``BATCH`` statements cannot include mutations for virtual tables -- Cannot create aggregates in virtual keyspaces; but may run aggregate functions on select - -Listing and Describing Virtual Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables in a virtual keyspace may be listed with ``DESC TABLES``. The ``system_views`` virtual keyspace tables include the following: - -:: - - cqlsh> USE system_views; - cqlsh:system_views> DESC TABLES; - coordinator_scans clients tombstones_scanned internode_inbound - disk_usage sstable_tasks live_scanned caches - local_writes max_partition_size local_reads - coordinator_writes internode_outbound thread_pools - local_scans coordinator_reads settings - -Some of the salient virtual tables in ``system_views`` virtual keyspace are described in Table 1. - -Table 1 : Virtual Tables in system_views - -+------------------+---------------------------------------------------+ -|Virtual Table | Description | -+------------------+---------------------------------------------------+ -| clients |Lists information about all connected clients. | -+------------------+---------------------------------------------------+ -| disk_usage |Disk usage including disk_space, keyspace_name, | -| |and table_name by system keyspaces. | -+------------------+---------------------------------------------------+ -| local_writes |A table metric for local writes | -| |including count, keyspace_name, | -| |max, median, per_second, and | -| |table_name. | -+------------------+---------------------------------------------------+ -| caches |Displays the general cache information including | -| |cache name, capacity_bytes, entry_count, hit_count,| -| |hit_ratio double, recent_hit_rate_per_second, | -| |recent_request_rate_per_second, request_count, and | -| |size_bytes. | -+------------------+---------------------------------------------------+ -| local_reads |A table metric for local reads information. | -+------------------+---------------------------------------------------+ -| sstable_tasks |Lists currently running tasks such as compactions | -| |and upgrades on SSTables. | -+------------------+---------------------------------------------------+ -|internode_inbound |Lists information about the inbound | -| |internode messaging. | -+------------------+---------------------------------------------------+ -| thread_pools |Lists metrics for each thread pool. | -+------------------+---------------------------------------------------+ -| settings |Displays configuration settings in cassandra.yaml. | -+------------------+---------------------------------------------------+ -|max_partition_size|A table metric for maximum partition size. | -+------------------+---------------------------------------------------+ -|internode_outbound|Information about the outbound internode messaging.| -| | | -+------------------+---------------------------------------------------+ - -We shall discuss some of the virtual tables in more detail next. - -Clients Virtual Table -********************* - -The ``clients`` virtual table lists all active connections (connected clients) including their ip address, port, connection stage, driver name, driver version, hostname, protocol version, request count, ssl enabled, ssl protocol and user name: - -:: - - cqlsh:system_views> select * from system_views.clients; - address | port | connection_stage | driver_name | driver_version | hostname | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username - -----------+-------+------------------+-------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+----------- - 127.0.0.1 | 50628 | ready | null | null | localhost | 4 | 55 | null | False | null | anonymous - 127.0.0.1 | 50630 | ready | null | null | localhost | 4 | 70 | null | False | null | anonymous - - (2 rows) - -Some examples of how ``clients`` can be used are: - -- To find applications using old incompatible versions of drivers before upgrading and with ``nodetool enableoldprotocolversions`` and ``nodetool disableoldprotocolversions`` during upgrades. -- To identify clients sending too many requests. -- To find if SSL is enabled during the migration to and from ssl. - - -The virtual tables may be described with ``DESCRIBE`` statement. The DDL listed however cannot be run to create a virtual table. As an example describe the ``system_views.clients`` virtual table: - -:: - - cqlsh:system_views> DESC TABLE system_views.clients; - CREATE TABLE system_views.clients ( - address inet, - connection_stage text, - driver_name text, - driver_version text, - hostname text, - port int, - protocol_version int, - request_count bigint, - ssl_cipher_suite text, - ssl_enabled boolean, - ssl_protocol text, - username text, - PRIMARY KEY (address, port)) WITH CLUSTERING ORDER BY (port ASC) - AND compaction = {'class': 'None'} - AND compression = {}; - -Caches Virtual Table -******************** -The ``caches`` virtual table lists information about the caches. The four caches presently created are chunks, counters, keys and rows. A query on the ``caches`` virtual table returns the following details: - -:: - - cqlsh:system_views> SELECT * FROM system_views.caches; - name | capacity_bytes | entry_count | hit_count | hit_ratio | recent_hit_rate_per_second | recent_request_rate_per_second | request_count | size_bytes - ---------+----------------+-------------+-----------+-----------+----------------------------+--------------------------------+---------------+------------ - chunks | 229638144 | 29 | 166 | 0.83 | 5 | 6 | 200 | 475136 - counters | 26214400 | 0 | 0 | NaN | 0 | 0 | 0 | 0 - keys | 52428800 | 14 | 124 | 0.873239 | 4 | 4 | 142 | 1248 - rows | 0 | 0 | 0 | NaN | 0 | 0 | 0 | 0 - - (4 rows) - -Settings Virtual Table -********************** -The ``settings`` table is rather useful and lists all the current configuration settings from the ``cassandra.yaml``. The encryption options are overridden to hide the sensitive truststore information or passwords. The configuration settings however cannot be set using DML on the virtual table presently: -:: - - cqlsh:system_views> SELECT * FROM system_views.settings; - - name | value - -------------------------------------+-------------------- - allocate_tokens_for_keyspace | null - audit_logging_options_enabled | false - auto_snapshot | true - automatic_sstable_upgrade | false - cluster_name | Test Cluster - enable_transient_replication | false - hinted_handoff_enabled | true - hints_directory | /home/ec2-user/cassandra/data/hints - incremental_backups | false - initial_token | null - ... - ... - ... - rpc_address | localhost - ssl_storage_port | 7001 - start_native_transport | true - storage_port | 7000 - stream_entire_sstables | true - (224 rows) - - -The ``settings`` table can be really useful if yaml file has been changed since startup and dont know running configuration, or to find if they have been modified via jmx/nodetool or virtual tables. - - -Thread Pools Virtual Table -************************** - -The ``thread_pools`` table lists information about all thread pools. Thread pool information includes active tasks, active tasks limit, blocked tasks, blocked tasks all time, completed tasks, and pending tasks. A query on the ``thread_pools`` returns following details: - -:: - - cqlsh:system_views> select * from system_views.thread_pools; - - name | active_tasks | active_tasks_limit | blocked_tasks | blocked_tasks_all_time | completed_tasks | pending_tasks - ------------------------------+--------------+--------------------+---------------+------------------------+-----------------+--------------- - AntiEntropyStage | 0 | 1 | 0 | 0 | 0 | 0 - CacheCleanupExecutor | 0 | 1 | 0 | 0 | 0 | 0 - CompactionExecutor | 0 | 2 | 0 | 0 | 881 | 0 - CounterMutationStage | 0 | 32 | 0 | 0 | 0 | 0 - GossipStage | 0 | 1 | 0 | 0 | 0 | 0 - HintsDispatcher | 0 | 2 | 0 | 0 | 0 | 0 - InternalResponseStage | 0 | 2 | 0 | 0 | 0 | 0 - MemtableFlushWriter | 0 | 2 | 0 | 0 | 1 | 0 - MemtablePostFlush | 0 | 1 | 0 | 0 | 2 | 0 - MemtableReclaimMemory | 0 | 1 | 0 | 0 | 1 | 0 - MigrationStage | 0 | 1 | 0 | 0 | 0 | 0 - MiscStage | 0 | 1 | 0 | 0 | 0 | 0 - MutationStage | 0 | 32 | 0 | 0 | 0 | 0 - Native-Transport-Requests | 1 | 128 | 0 | 0 | 130 | 0 - PendingRangeCalculator | 0 | 1 | 0 | 0 | 1 | 0 - PerDiskMemtableFlushWriter_0 | 0 | 2 | 0 | 0 | 1 | 0 - ReadStage | 0 | 32 | 0 | 0 | 13 | 0 - Repair-Task | 0 | 2147483647 | 0 | 0 | 0 | 0 - RequestResponseStage | 0 | 2 | 0 | 0 | 0 | 0 - Sampler | 0 | 1 | 0 | 0 | 0 | 0 - SecondaryIndexManagement | 0 | 1 | 0 | 0 | 0 | 0 - ValidationExecutor | 0 | 2147483647 | 0 | 0 | 0 | 0 - ViewBuildExecutor | 0 | 1 | 0 | 0 | 0 | 0 - ViewMutationStage | 0 | 32 | 0 | 0 | 0 | 0 - -(24 rows) - -Internode Inbound Messaging Virtual Table -***************************************** - -The ``internode_inbound`` virtual table is for the internode inbound messaging. Initially no internode inbound messaging may get listed. In addition to the address, port, datacenter and rack information includes corrupt frames recovered, corrupt frames unrecovered, error bytes, error count, expired bytes, expired count, processed bytes, processed count, received bytes, received count, scheduled bytes, scheduled count, throttled count, throttled nanos, using bytes, using reserve bytes. A query on the ``internode_inbound`` returns following details: - -:: - - cqlsh:system_views> SELECT * FROM system_views.internode_inbound; - address | port | dc | rack | corrupt_frames_recovered | corrupt_frames_unrecovered | - error_bytes | error_count | expired_bytes | expired_count | processed_bytes | - processed_count | received_bytes | received_count | scheduled_bytes | scheduled_count | throttled_count | throttled_nanos | using_bytes | using_reserve_bytes - ---------+------+----+------+--------------------------+----------------------------+- - ---------- - (0 rows) - -SSTables Tasks Virtual Table -**************************** - -The ``sstable_tasks`` could be used to get information about running tasks. It lists following columns: - -:: - - cqlsh:system_views> SELECT * FROM sstable_tasks; - keyspace_name | table_name | task_id | kind | progress | total | unit - ---------------+------------+--------------------------------------+------------+----------+----------+------- - basic | wide2 | c3909740-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 60418761 | 70882110 | bytes - basic | wide2 | c7556770-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 2995623 | 40314679 | bytes - - -As another example, to find how much time is remaining for SSTable tasks, use the following query: - -:: - - SELECT total - progress AS remaining - FROM system_views.sstable_tasks; - -Other Virtual Tables -******************** - -Some examples of using other virtual tables are as follows. - -Find tables with most disk usage: - -:: - - cqlsh> SELECT * FROM disk_usage WHERE mebibytes > 1 ALLOW FILTERING; - - keyspace_name | table_name | mebibytes - ---------------+------------+----------- - keyspace1 | standard1 | 288 - tlp_stress | keyvalue | 3211 - -Find queries on table/s with greatest read latency: - -:: - - cqlsh> SELECT * FROM local_read_latency WHERE per_second > 1 ALLOW FILTERING; - - keyspace_name | table_name | p50th_ms | p99th_ms | count | max_ms | per_second - ---------------+------------+----------+----------+----------+---------+------------ - tlp_stress | keyvalue | 0.043 | 0.152 | 49785158 | 186.563 | 11418.356 - - -The system_virtual_schema keyspace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``system_virtual_schema`` keyspace has three tables: ``keyspaces``, ``columns`` and ``tables`` for the virtual keyspace definitions, virtual table definitions, and virtual column definitions respectively. It is used by Cassandra internally and a user would not need to access it directly. diff --git a/src/doc/4.0-alpha4/_sources/operating/audit_logging.rst.txt b/src/doc/4.0-alpha4/_sources/operating/audit_logging.rst.txt deleted file mode 100644 index 068209ee8..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/audit_logging.rst.txt +++ /dev/null @@ -1,236 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - - - -Audit Logging ------------------- - -Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml. - -- ``BinAuditLogger`` An efficient way to log events to file in a binary format. -- ``FileAuditLogger`` Logs events to ``audit/audit.log`` file using slf4j logger. - -*Recommendation* ``BinAuditLogger`` is a community recommended logger considering the performance - -What does it capture -^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging captures following events - -- Successful as well as unsuccessful login attempts. - -- All database commands executed via Native protocol (CQL) attempted or successfully executed. - -Limitations -^^^^^^^^^^^ - -Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log. - -What does it log -^^^^^^^^^^^^^^^^^^^ -Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with `|` s to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - -How to configure -^^^^^^^^^^^^^^^^^^ -Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -cassandra.yaml configurations for AuditLog -""""""""""""""""""""""""""""""""""""""""""""" - - ``enabled``: This option enables/ disables audit log - - ``logger``: Class name of the logger/ custom logger. - - ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ - - ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces - - ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except `system`, `system_schema` and `system_virtual_schema` - - ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories - - ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category - - ``included_users``: Comma separated list of users to be included in audit log, default - includes all users - - ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -NodeTool command to enable AuditLog -""""""""""""""""""""""""""""""""""""" -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - -:: - - nodetool enableauditlog - -Options -********** - - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used. - Please remeber that `system`, `system_schema` and `system_virtual_schema` are excluded by default, - if you are overwriting this option via nodetool, - remember to add these keyspaces back if you dont want them in audit logs - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -NodeTool command to disable AuditLog -""""""""""""""""""""""""""""""""""""""" - -``disableauditlog``: Disables AuditLog. - -:: - - nodetool disableuditlog - - - - - - - -NodeTool command to reload AuditLog filters -""""""""""""""""""""""""""""""""""""""""""""" - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - -E.g., - -:: - - nodetool enableauditlog --loggername --included-keyspaces - - - -View the contents of AuditLog Files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``auditlogviewer`` is the new tool introduced to help view the contents of binlog file in human readable text format. - -:: - - auditlogviewer [...] [options] - -Options -"""""""" - -``-f,--follow`` - Upon reacahing the end of the log continue indefinitely - waiting for more records -``-r,--roll_cycle`` - How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -``-h,--help`` - display this help message - -For example, to dump the contents of audit log files on the console - -:: - - auditlogviewer /logs/cassandra/audit - -Sample output -""""""""""""" - -:: - - LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1" - - - -Configuring BinAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``BinAuditLogger`` as a logger in AuditLogging, set the logger to ``BinAuditLogger`` in cassandra.yaml under ``audit_logging_options`` section. ``BinAuditLogger`` can be futher configued using its advanced options in cassandra.yaml. - - -Adcanced Options for BinAuditLogger -"""""""""""""""""""""""""""""""""""""" - -``block`` - Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to ``true`` so that AuditLog records wont be lost - -``max_queue_weight`` - Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to ``256 * 1024 * 1024`` - -``max_log_size`` - Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to ``16L * 1024L * 1024L * 1024L`` - -``roll_cycle`` - How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to ``"HOURLY"`` - -Configuring FileAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``FileAuditLogger`` as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log - - -.. code-block:: xml - - - - ${cassandra.logdir}/audit/audit.log - - - ${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip - - 50MB - 30 - 5GB - - - %-5level [%thread] %date{ISO8601} %F:%L - %msg%n - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/operating/backups.rst.txt b/src/doc/4.0-alpha4/_sources/operating/backups.rst.txt deleted file mode 100644 index 01cb6c588..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,660 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. -.. highlight:: none - -Backups -------- - -Apache Cassandra stores data in immutable SSTable files. Backups in Apache Cassandra database are backup copies of the database data that is stored as SSTable files. Backups are used for several purposes including the following: - -- To store a data copy for durability -- To be able to restore a table if table data is lost due to node/partition/network failure -- To be able to transfer the SSTable files to a different machine; for portability - -Types of Backups -^^^^^^^^^^^^^^^^ -Apache Cassandra supports two kinds of backup strategies. - -- Snapshots -- Incremental Backups - -A *snapshot* is a copy of a table’s SSTable files at a given time, created via hard links. The DDL to create the table is stored as well. Snapshots may be created by a user or created automatically. -The setting (``snapshot_before_compaction``) in ``cassandra.yaml`` determines if snapshots are created before each compaction. -By default ``snapshot_before_compaction`` is set to false. -Snapshots may be created automatically before keyspace truncation or dropping of a table by setting ``auto_snapshot`` to true (default) in ``cassandra.yaml``. -Truncates could be delayed due to the auto snapshots and another setting in ``cassandra.yaml`` determines how long the coordinator should wait for truncates to complete. -By default Cassandra waits 60 seconds for auto snapshots to complete. - -An *incremental backup* is a copy of a table’s SSTable files created by a hard link when memtables are flushed to disk as SSTables. -Typically incremental backups are paired with snapshots to reduce the backup time as well as reduce disk space. -Incremental backups are not enabled by default and must be enabled explicitly in ``cassandra.yaml`` (with ``incremental_backups`` setting) or with the Nodetool. -Once enabled, Cassandra creates a hard link to each SSTable flushed or streamed locally in a ``backups/`` subdirectory of the keyspace data. Incremental backups of system tables are also created. - -Data Directory Structure -^^^^^^^^^^^^^^^^^^^^^^^^ -The directory structure of Cassandra data consists of different directories for keyspaces, and tables with the data files within the table directories. Directories backups and snapshots to store backups and snapshots respectively for a particular table are also stored within the table directory. The directory structure for Cassandra is illustrated in Figure 1. - -.. figure:: Figure_1_backups.jpg - -Figure 1. Directory Structure for Cassandra Data - - -Setting Up Example Tables for Backups and Snapshots -**************************************************** -In this section we shall create some example data that could be used to demonstrate incremental backups and snapshots. We have used a three node Cassandra cluster. -First, the keyspaces are created. Subsequently tables are created within a keyspace and table data is added. We have used two keyspaces ``CQLKeyspace`` and ``CatalogKeyspace`` with two tables within each. -Create ``CQLKeyspace``: - -:: - - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Create table ``t`` in the ``CQLKeyspace`` keyspace. - -:: - - cqlsh> USE CQLKeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - - -Add data to table ``t``: - -:: - - cqlsh:cqlkeyspace> - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - - -A table query lists the data: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - - (2 rows) - -Create another table ``t2``: - -:: - - cqlsh:cqlkeyspace> CREATE TABLE t2 ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Add data to table ``t2``: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (1, 1, 'val1'); - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (2, 2, 'val2'); - - -A table query lists table data: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t2; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - 2 | 2 | val2 - - (3 rows) - -Create a second keyspace ``CatalogKeyspace``: - -:: - - cqlsh:cqlkeyspace> CREATE KEYSPACE CatalogKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Create a table called ``journal`` in ``CatalogKeyspace``: - -:: - - cqlsh:cqlkeyspace> USE CatalogKeyspace; - cqlsh:catalogkeyspace> CREATE TABLE journal ( - ... id int, - ... name text, - ... publisher text, - ... PRIMARY KEY (id) - ... ); - - -Add data to table ``journal``: - -:: - - cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (0, 'Apache - Cassandra Magazine', 'Apache Cassandra'); - cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (1, 'Couchbase - Magazine', 'Couchbase'); - -Query table ``journal`` to list its data: - -:: - - cqlsh:catalogkeyspace> SELECT * FROM journal; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - -Add another table called ``magazine``: - -:: - - cqlsh:catalogkeyspace> CREATE TABLE magazine ( - ... id int, - ... name text, - ... publisher text, - ... PRIMARY KEY (id) - ... ); - -Add table data to ``magazine``: - -:: - - cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (0, 'Apache - Cassandra Magazine', 'Apache Cassandra'); - cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (1, 'Couchbase - Magazine', 'Couchbase'); - -List table ``magazine``’s data: - -:: - - cqlsh:catalogkeyspace> SELECT * from magazine; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - -Snapshots -^^^^^^^^^ -In this section including sub-sections we shall demonstrate creating snapshots. The command used to create a snapshot is ``nodetool snapshot`` and its usage is as follows: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool help snapshot - NAME - nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot - of the specified table - - SYNOPSIS - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] snapshot - [(-cf | --column-family
| --table
)] - [(-kt | --kt-list | -kc | --kc.list )] - [(-sf | --skip-flush)] [(-t | --tag )] [--] [] - - OPTIONS - -cf
, --column-family
, --table
- The table name (you must specify one and only one keyspace for using - this option) - - -h , --host - Node hostname or ip address - - -kt , --kt-list , -kc , --kc.list - The list of Keyspace.table to take snapshot.(you must not specify - only keyspace) - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -sf, --skip-flush - Do not flush memtables before snapshotting (snapshot will not - contain unflushed data) - - -t , --tag - The name of the snapshot - - -u , --username - Remote jmx agent username - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [] - List of keyspaces. By default, all keyspaces - -Configuring for Snapshots -*************************** -To demonstrate creating snapshots with Nodetool on the commandline we have set -``auto_snapshots`` setting to ``false`` in ``cassandra.yaml``: - -:: - - auto_snapshot: false - -Also set ``snapshot_before_compaction`` to ``false`` to disable creating snapshots automatically before compaction: - -:: - - snapshot_before_compaction: false - -Creating Snapshots -******************* -To demonstrate creating snapshots start with no snapshots. Search for snapshots and none get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - -We shall be using the example keyspaces and tables to create snapshots. - -Taking Snapshots of all Tables in a Keyspace -+++++++++++++++++++++++++++++++++++++++++++++ - -To take snapshots of all tables in a keyspace and also optionally tag the snapshot the syntax becomes: - -:: - - nodetool snapshot --tag -- - -As an example create a snapshot called ``catalog-ks`` for all the tables in the ``catalogkeyspace`` keyspace: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag catalog-ks -- catalogkeyspace - Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [catalog-ks] and - options {skipFlush=false} - Snapshot directory: catalog-ks - -Search for snapshots and ``snapshots`` directories for the tables ``journal`` and ``magazine``, which are in the ``catalogkeyspace`` keyspace should get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots - -Snapshots of all tables in multiple keyspaces may be created similarly, as an example: - -:: - - nodetool snapshot --tag catalog-cql-ks --catalogkeyspace,cqlkeyspace - -Taking Snapshots of Single Table in a Keyspace -++++++++++++++++++++++++++++++++++++++++++++++ -To take a snapshot of a single table the ``nodetool snapshot`` command syntax becomes as follows: - -:: - - nodetool snapshot --tag --table
-- - -As an example create a snapshot for table ``magazine`` in keyspace ``catalokeyspace``: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag magazine --table magazine -- - catalogkeyspace - Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [magazine] and - options {skipFlush=false} - Snapshot directory: magazine - -Taking Snapshot of Multiple Tables from same Keyspace -++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To take snapshots of multiple tables in a keyspace the list of *Keyspace.table* must be specified with option ``--kt-list``. As an example create snapshots for tables ``t`` and ``t2`` in the ``cqlkeyspace`` keyspace: - -:: - - nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag multi-table - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag - multi-table - Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi- - table] and options {skipFlush=false} - Snapshot directory: multi-table - -Multiple snapshots of the same set of tables may be created and tagged with a different name. As an example, create another snapshot for the same set of tables ``t`` and ``t2`` in the ``cqlkeyspace`` keyspace and tag the snapshots differently: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag - multi-table-2 - Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi- - table-2] and options {skipFlush=false} - Snapshot directory: multi-table-2 - -Taking Snapshot of Multiple Tables from Different Keyspaces -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To take snapshots of multiple tables that are in different keyspaces the command syntax is the same as when multiple tables are in the same keyspace. Each *keyspace.table* must be specified separately in the ``--kt-list`` option. As an example, create a snapshot for table ``t`` in the ``cqlkeyspace`` and table ``journal`` in the catalogkeyspace and tag the snapshot ``multi-ks``. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list - catalogkeyspace.journal,cqlkeyspace.t --tag multi-ks - Requested creating snapshot(s) for [catalogkeyspace.journal,cqlkeyspace.t] with snapshot - name [multi-ks] and options {skipFlush=false} - Snapshot directory: multi-ks - -Listing Snapshots -*************************** -To list snapshots use the ``nodetool listsnapshots`` command. All the snapshots that we created in the preceding examples get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool listsnapshots - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - multi-table cqlkeyspace t2 4.86 KiB 5.67 KiB - multi-table cqlkeyspace t 4.89 KiB 5.7 KiB - multi-ks cqlkeyspace t 4.89 KiB 5.7 KiB - multi-ks catalogkeyspace journal 4.9 KiB 5.73 KiB - magazine catalogkeyspace magazine 4.9 KiB 5.73 KiB - multi-table-2 cqlkeyspace t2 4.86 KiB 5.67 KiB - multi-table-2 cqlkeyspace t 4.89 KiB 5.7 KiB - catalog-ks catalogkeyspace journal 4.9 KiB 5.73 KiB - catalog-ks catalogkeyspace magazine 4.9 KiB 5.73 KiB - - Total TrueDiskSpaceUsed: 44.02 KiB - -Finding Snapshots Directories -****************************** -The ``snapshots`` directories may be listed with ``find –name snapshots`` command: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/snapshots - ./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots - [ec2-user@ip-10-0-2-238 ~]$ - -To list the snapshots for a particular table first change directory ( with ``cd``) to the ``snapshots`` directory for the table. As an example, list the snapshots for the ``catalogkeyspace/journal`` table. Two snapshots get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/journal- - 296a2d30c22a11e9b1350d927649052c/snapshots - [ec2-user@ip-10-0-2-238 snapshots]$ ls -l - total 0 - drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:44 catalog-ks - drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:52 multi-ks - -A ``snapshots`` directory lists the SSTable files in the snapshot. ``Schema.cql`` file is also created in each snapshot for the schema definition DDL that may be run in CQL to create the table when restoring from a snapshot: - -:: - - [ec2-user@ip-10-0-2-238 snapshots]$ cd catalog-ks - [ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 31 Aug 19 02:44 manifest.jsonZ - - -rw-rw-r--. 4 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 4 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 4 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 4 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 4 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 4 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 4 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 4 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - -rw-rw-r--. 1 ec2-user ec2-user 814 Aug 19 02:44 schema.cql - -Clearing Snapshots -****************** -Snapshots may be cleared or deleted with the ``nodetool clearsnapshot`` command. Either a specific snapshot name must be specified or the ``–all`` option must be specified. -As an example delete a snapshot called ``magazine`` from keyspace ``cqlkeyspace``: - -:: - - nodetool clearsnapshot -t magazine – cqlkeyspace - Delete all snapshots from cqlkeyspace with the –all option. - nodetool clearsnapshot –all -- cqlkeyspace - - - -Incremental Backups -^^^^^^^^^^^^^^^^^^^ -In the following sub-sections we shall discuss configuring and creating incremental backups. - -Configuring for Incremental Backups -*********************************** - -To create incremental backups set ``incremental_backups`` to ``true`` in ``cassandra.yaml``. - -:: - - incremental_backups: true - -This is the only setting needed to create incremental backups. By default ``incremental_backups`` setting is set to ``false`` because a new set of SSTable files is created for each data flush and if several CQL statements are to be run the ``backups`` directory could fill up quickly and use up storage that is needed to store table data. -Incremental backups may also be enabled on the command line with the Nodetool command ``nodetool enablebackup``. Incremental backups may be disabled with ``nodetool disablebackup`` command. Status of incremental backups, whether they are enabled may be found with ``nodetool statusbackup``. - - - -Creating Incremental Backups -****************************** -After each table is created flush the table data with ``nodetool flush`` command. Incremental backups get created. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t2 - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush catalogkeyspace journal magazine - -Finding Incremental Backups -*************************** - -Incremental backups are created within the Cassandra’s ``data`` directory within a table directory. Backups may be found with following command. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name backups - - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - ./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/backups - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/backups - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups - -Creating an Incremental Backup -****************************** -This section discusses how incremental backups are created in more detail starting with when a new keyspace is created and a table is added. Create a keyspace called ``CQLKeyspace`` (arbitrary name). - -:: - - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3} - -Create a table called ``t`` within the ``CQLKeyspace`` keyspace: - -:: - - cqlsh> USE CQLKeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Flush the keyspace and table: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - -Search for backups and a ``backups`` directory should get listed even though we have added no table data yet. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name backups - - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - -Change directory to the ``backups`` directory and list files and no files get listed as no table data has been added yet: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 0 - -Next, add a row of data to table ``t`` that we created: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - -Run the ``nodetool flush`` command to flush table data: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - -List the files and directories in the ``backups`` directory and SSTable files for an incremental backup get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 36 - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:32 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 43 Aug 19 00:32 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:32 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:32 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:32 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:32 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:32 na-1-big-TOC.txt - -Add another row of data: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - -Again, run the ``nodetool flush`` command: - -:: - - [ec2-user@ip-10-0-2-238 backups]$ nodetool flush cqlkeyspace t - -A new incremental backup gets created for the new data added. List the files in the ``backups`` directory for table ``t`` and two sets of SSTable files get listed, one for each incremental backup. The SSTable files are timestamped, which distinguishes the first incremental backup from the second: - -:: - - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 72 - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:32 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 43 Aug 19 00:32 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:32 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:32 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:32 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:32 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:32 na-1-big-TOC.txt - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:35 na-2-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 41 Aug 19 00:35 na-2-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:35 na-2-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:35 na-2-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:35 na-2-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:35 na-2-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:35 na-2-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:35 na-2-big-TOC.txt - [ec2-user@ip-10-0-2-238 backups]$ - -The ``backups`` directory for table ``cqlkeyspace/t`` is created within the ``data`` directory for the table: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330 - [ec2-user@ip-10-0-2-238 t-d132e240c21711e9bbee19821dcea330]$ ls -l - total 36 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:30 backups - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 02:30 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 79 Aug 19 02:30 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 02:30 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:30 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:30 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4696 Aug 19 02:30 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 02:30 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 02:30 na-1-big-TOC.txt - -The incremental backups for the other keyspaces/tables get created similarly. As an example the ``backups`` directory for table ``catalogkeyspace/magazine`` is created within the data directory: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c - [ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l - total 36 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - - - - - -Restoring from Incremental Backups and Snapshots -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The two main tools/commands for restoring a table after it has been dropped are: - -- sstableloader -- nodetool import - -A snapshot contains essentially the same set of SSTable files as an incremental backup does with a few additional files. A snapshot includes a ``schema.cql`` file for the schema DDL to create a table in CQL. A table backup does not include DDL which must be obtained from a snapshot when restoring from an incremental backup. - - diff --git a/src/doc/4.0-alpha4/_sources/operating/bloom_filters.rst.txt b/src/doc/4.0-alpha4/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/4.0-alpha4/_sources/operating/bulk_loading.rst.txt b/src/doc/4.0-alpha4/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index 850260ac0..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,660 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading -============== - -Bulk loading of data in Apache Cassandra is supported by different tools. The data to be bulk loaded must be in the form of SSTables. Cassandra does not support loading data in any other format such as CSV, JSON, and XML directly. Bulk loading could be used to: - -- Restore incremental backups and snapshots. Backups and snapshots are already in the form of SSTables. -- Load existing SSTables into another cluster, which could have a different number of nodes or replication strategy. -- Load external data into a cluster - -**Note*: CSV Data can be loaded via the cqlsh COPY command but we do not recommend this for bulk loading, which typically requires many GB or TB of data. - -Tools for Bulk Loading -^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra provides two commands or tools for bulk loading data. These are: - -- Cassandra Bulk loader, also called ``sstableloader`` -- The ``nodetool import`` command - -The ``sstableloader`` and ``nodetool import`` are accessible if the Cassandra installation ``bin`` directory is in the ``PATH`` environment variable. Or these may be accessed directly from the ``bin`` directory. We shall discuss each of these next. We shall use the example or sample keyspaces and tables created in the Backups section. - -Using sstableloader -^^^^^^^^^^^^^^^^^^^ - -The ``sstableloader`` is the main tool for bulk uploading data. The ``sstableloader`` streams SSTable data files to a running cluster. The ``sstableloader`` loads data conforming to the replication strategy and replication factor. The table to upload data to does need not to be empty. - -The only requirements to run ``sstableloader`` are: - -1. One or more comma separated initial hosts to connect to and get ring information. -2. A directory path for the SSTables to load. - -Its usage is as follows. - -:: - - sstableloader [options] - -Sstableloader bulk loads the SSTables found in the directory ```` to the configured cluster. The ```` is used as the target *keyspace/table* name. As an example, to load an SSTable named -``Standard1-g-1-Data.db`` into ``Keyspace1/Standard1``, you will need to have the -files ``Standard1-g-1-Data.db`` and ``Standard1-g-1-Index.db`` in a directory ``/path/to/Keyspace1/Standard1/``. - -Sstableloader Option to accept Target keyspace name -**************************************************** -Often as part of a backup strategy some Cassandra DBAs store an entire data directory. When corruption in data is found then they would like to restore data in the same cluster (for large clusters 200 nodes) but with different keyspace name. - -Currently ``sstableloader`` derives keyspace name from the folder structure. As an option to specify target keyspace name as part of ``sstableloader``, version 4.0 adds support for the ``--target-keyspace`` option (`CASSANDRA-13884 -`_). - -The supported options are as follows from which only ``-d,--nodes `` is required. - -:: - - -alg,--ssl-alg Client SSL: algorithm - - -ap,--auth-provider Custom - AuthProvider class name for - cassandra authentication - -ciphers,--ssl-ciphers Client SSL: - comma-separated list of - encryption suites to use - -cph,--connections-per-host Number of - concurrent connections-per-host. - -d,--nodes Required. - Try to connect to these hosts (comma separated) initially for ring information - - -f,--conf-path cassandra.yaml file path for streaming throughput and client/server SSL. - - -h,--help Display this help message - - -i,--ignore Don't stream to this (comma separated) list of nodes - - -idct,--inter-dc-throttle Inter-datacenter throttle speed in Mbits (default unlimited) - - -k,--target-keyspace Target - keyspace name - -ks,--keystore Client SSL: - full path to keystore - -kspw,--keystore-password Client SSL: - password of the keystore - --no-progress Don't - display progress - -p,--port Port used - for native connection (default 9042) - -prtcl,--ssl-protocol Client SSL: - connections protocol to use (default: TLS) - -pw,--password Password for - cassandra authentication - -sp,--storage-port Port used - for internode communication (default 7000) - -spd,--server-port-discovery Use ports - published by server to decide how to connect. With SSL requires StartTLS - to be used. - -ssp,--ssl-storage-port Port used - for TLS internode communication (default 7001) - -st,--store-type Client SSL: - type of store - -t,--throttle Throttle - speed in Mbits (default unlimited) - -ts,--truststore Client SSL: - full path to truststore - -tspw,--truststore-password Client SSL: - Password of the truststore - -u,--username Username for - cassandra authentication - -v,--verbose verbose - output - -The ``cassandra.yaml`` file could be provided on the command-line with ``-f`` option to set up streaming throughput, client and server encryption options. Only ``stream_throughput_outbound_megabits_per_sec``, ``server_encryption_options`` and ``client_encryption_options`` are read from yaml. You can override options read from ``cassandra.yaml`` with corresponding command line options. - -A sstableloader Demo -******************** -We shall demonstrate using ``sstableloader`` by uploading incremental backup data for table ``catalogkeyspace.magazine``. We shall also use a snapshot of the same table to bulk upload in a different run of ``sstableloader``. The backups and snapshots for the ``catalogkeyspace.magazine`` table are listed as follows. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c - [ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l - total 0 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups - drwxrwxr-x. 4 ec2-user ec2-user 40 Aug 19 02:45 snapshots - -The directory path structure of SSTables to be uploaded using ``sstableloader`` is used as the target keyspace/table. - -We could have directly uploaded from the ``backups`` and ``snapshots`` directories respectively if the directory structure were in the format used by ``sstableloader``. But the directory path of backups and snapshots for SSTables is ``/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups`` and ``/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots`` respectively, which cannot be used to upload SSTables to ``catalogkeyspace.magazine`` table. The directory path structure must be ``/catalogkeyspace/magazine/`` to use ``sstableloader``. We need to create a new directory structure to upload SSTables with ``sstableloader`` which is typical when using ``sstableloader``. Create a directory structure ``/catalogkeyspace/magazine`` and set its permissions. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine - -Bulk Loading from an Incremental Backup -+++++++++++++++++++++++++++++++++++++++ -An incremental backup does not include the DDL for a table. The table must already exist. If the table was dropped it may be created using the ``schema.cql`` generated with every snapshot of a table. As we shall be using ``sstableloader`` to load SSTables to the ``magazine`` table, the table must exist prior to running ``sstableloader``. The table does not need to be empty but we have used an empty table as indicated by a CQL query: - -:: - - cqlsh:catalogkeyspace> SELECT * FROM magazine; - - id | name | publisher - ----+------+----------- - - (0 rows) - -After the table to upload has been created copy the SSTable files from the ``backups`` directory to the ``/catalogkeyspace/magazine/`` directory that we created. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c/backups/* /catalogkeyspace/magazine/ - -Run the ``sstableloader`` to upload SSTables from the ``/catalogkeyspace/magazine/`` directory. - -:: - - sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - -The output from the ``sstableloader`` command should be similar to the listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - Opening SSTables and calculating sections to stream - Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db - /catalogkeyspace/magazine/na-2-big-Data.db to [35.173.233.153:7000, 10.0.2.238:7000, - 54.158.45.75:7000] - progress: [35.173.233.153:7000]0:1/2 88 % total: 88% 0.018KiB/s (avg: 0.018KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% total: 176% 33.807KiB/s (avg: 0.036KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% total: 176% 0.000KiB/s (avg: 0.029KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:1/2 39 % total: 81% 0.115KiB/s - (avg: 0.024KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % total: 108% - 97.683KiB/s (avg: 0.033KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:1/2 39 % total: 80% 0.233KiB/s (avg: 0.040KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 88.522KiB/s (avg: 0.049KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.045KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.044KiB/s) - -After the ``sstableloader`` has run query the ``magazine`` table and the loaded table should get listed when a query is run. - -:: - - cqlsh:catalogkeyspace> SELECT * FROM magazine; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - cqlsh:catalogkeyspace> - -Bulk Loading from a Snapshot -+++++++++++++++++++++++++++++ -In this section we shall demonstrate restoring a snapshot of the ``magazine`` table to the ``magazine`` table. As we used the same table to restore data from a backup the directory structure required by ``sstableloader`` should already exist. If the directory structure needed to load SSTables to ``catalogkeyspace.magazine`` does not exist create the directories and set their permissions. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine - -As we shall be copying the snapshot files to the directory remove any files that may be in the directory. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo rm /catalogkeyspace/magazine/* - [ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine/ - [ec2-user@ip-10-0-2-238 magazine]$ ls -l - total 0 - - -Copy the snapshot files to the ``/catalogkeyspace/magazine`` directory. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c/snapshots/magazine/* /catalogkeyspace/magazine - -List the files in the ``/catalogkeyspace/magazine`` directory and a ``schema.cql`` should also get listed. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 magazine]$ ls -l - total 44 - -rw-r--r--. 1 root root 31 Aug 19 04:13 manifest.json - -rw-r--r--. 1 root root 47 Aug 19 04:13 na-1-big-CompressionInfo.db - -rw-r--r--. 1 root root 97 Aug 19 04:13 na-1-big-Data.db - -rw-r--r--. 1 root root 10 Aug 19 04:13 na-1-big-Digest.crc32 - -rw-r--r--. 1 root root 16 Aug 19 04:13 na-1-big-Filter.db - -rw-r--r--. 1 root root 16 Aug 19 04:13 na-1-big-Index.db - -rw-r--r--. 1 root root 4687 Aug 19 04:13 na-1-big-Statistics.db - -rw-r--r--. 1 root root 56 Aug 19 04:13 na-1-big-Summary.db - -rw-r--r--. 1 root root 92 Aug 19 04:13 na-1-big-TOC.txt - -rw-r--r--. 1 root root 815 Aug 19 04:13 schema.cql - -Alternatively create symlinks to the snapshot folder instead of copying the data, something like: - -:: - - mkdir keyspace_name - ln -s _path_to_snapshot_folder keyspace_name/table_name - -If the ``magazine`` table was dropped run the DDL in the ``schema.cql`` to create the table. Run the ``sstableloader`` with the following command. - -:: - - sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - -As the output from the command indicates SSTables get streamed to the cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - - Established connection to initial hosts - Opening SSTables and calculating sections to stream - Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db to - [35.173.233.153:7000, 10.0.2.238:7000, 54.158.45.75:7000] - progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.017KiB/s (avg: 0.017KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.000KiB/s (avg: 0.014KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % total: 108% 0.115KiB/s - (avg: 0.017KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.232KiB/s (avg: 0.024KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.022KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.021KiB/s) - -Some other requirements of ``sstableloader`` that should be kept into consideration are: - -- The SSTables to be loaded must be compatible with the Cassandra version being loaded into. -- Repairing tables that have been loaded into a different cluster does not repair the source tables. -- Sstableloader makes use of port 7000 for internode communication. -- Before restoring incremental backups run ``nodetool flush`` to backup any data in memtables - -Using nodetool import -^^^^^^^^^^^^^^^^^^^^^ -In this section we shall import SSTables into a table using the ``nodetool import`` command. The ``nodetool refresh`` command is deprecated, and it is recommended to use ``nodetool import`` instead. The ``nodetool refresh`` does not have an option to load new SSTables from a separate directory which the ``nodetool import`` does. - -The command usage is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] import - [(-c | --no-invalidate-caches)] [(-e | --extended-verify)] - [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)] - [(-t | --no-tokens)] [(-v | --no-verify)] [--]
- ... - -The arguments ``keyspace``, ``table`` name and ``directory`` to import SSTables from are required. - -The supported options are as follows. - -:: - - -c, --no-invalidate-caches - Don't invalidate the row cache when importing - - -e, --extended-verify - Run an extended verify, verifying all values in the new SSTables - - -h , --host - Node hostname or ip address - - -l, --keep-level - Keep the level on the new SSTables - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -q, --quick - Do a quick import without verifying SSTables, clearing row cache or - checking in which data directory to put the file - - -r, --keep-repaired - Keep any repaired information from the SSTables - - -t, --no-tokens - Don't verify that all tokens in the new SSTable are owned by the - current node - - -u , --username - Remote jmx agent username - - -v, --no-verify - Don't verify new SSTables - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - -As the keyspace and table are specified on the command line ``nodetool import`` does not have the same requirement that ``sstableloader`` does, which is to have the SSTables in a specific directory path. When importing snapshots or incremental backups with ``nodetool import`` the SSTables don’t need to be copied to another directory. - -Importing Data from an Incremental Backup -***************************************** - -In this section we shall demonstrate using ``nodetool import`` to import SSTables from an incremental backup. We shall use the example table ``cqlkeyspace.t``. Drop table ``t`` as we are demonstrating to restore the table. - -:: - - cqlsh:cqlkeyspace> DROP table t; - -An incremental backup for a table does not include the schema definition for the table. If the schema definition is not kept as a separate backup, the ``schema.cql`` from a backup of the table may be used to create the table as follows. - -:: - - cqlsh:cqlkeyspace> CREATE TABLE IF NOT EXISTS cqlkeyspace.t ( - ... id int PRIMARY KEY, - ... k int, - ... v text) - ... WITH ID = d132e240-c217-11e9-bbee-19821dcea330 - ... AND bloom_filter_fp_chance = 0.01 - ... AND crc_check_chance = 1.0 - ... AND default_time_to_live = 0 - ... AND gc_grace_seconds = 864000 - ... AND min_index_interval = 128 - ... AND max_index_interval = 2048 - ... AND memtable_flush_period_in_ms = 0 - ... AND speculative_retry = '99p' - ... AND additional_write_policy = '99p' - ... AND comment = '' - ... AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' } - ... AND compaction = { 'max_threshold': '32', 'min_threshold': '4', - 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } - ... AND compression = { 'chunk_length_in_kb': '16', 'class': - 'org.apache.cassandra.io.compress.LZ4Compressor' } - ... AND cdc = false - ... AND extensions = { }; - -Initially the table could be empty, but does not have to be. - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+--- - - (0 rows) - -Run the ``nodetool import`` command by providing the keyspace, table and the backups directory. We don’t need to copy the table backups to another directory to run ``nodetool import`` as we had to when using ``sstableloader``. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool import -- cqlkeyspace t - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 ~]$ - -The SSTables get imported into the table. Run a query in cqlsh to list the data imported. - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - - -Importing Data from a Snapshot -******************************** -Importing SSTables from a snapshot with the ``nodetool import`` command is similar to importing SSTables from an incremental backup. To demonstrate we shall import a snapshot for table ``catalogkeyspace.journal``. Drop the table as we are demonstrating to restore the table from a snapshot. - -:: - - cqlsh:cqlkeyspace> use CATALOGKEYSPACE; - cqlsh:catalogkeyspace> DROP TABLE journal; - -We shall use the ``catalog-ks`` snapshot for the ``journal`` table. List the files in the snapshot. The snapshot includes a ``schema.cql``, which is the schema definition for the ``journal`` table. - -:: - - [ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 31 Aug 19 02:44 manifest.json - -rw-rw-r--. 3 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 3 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 3 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 3 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 3 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 3 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 3 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 3 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - -rw-rw-r--. 1 ec2-user ec2-user 814 Aug 19 02:44 schema.cql - -Copy the DDL from the ``schema.cql`` and run in cqlsh to create the ``catalogkeyspace.journal`` table. - -:: - - cqlsh:catalogkeyspace> CREATE TABLE IF NOT EXISTS catalogkeyspace.journal ( - ... id int PRIMARY KEY, - ... name text, - ... publisher text) - ... WITH ID = 296a2d30-c22a-11e9-b135-0d927649052c - ... AND bloom_filter_fp_chance = 0.01 - ... AND crc_check_chance = 1.0 - ... AND default_time_to_live = 0 - ... AND gc_grace_seconds = 864000 - ... AND min_index_interval = 128 - ... AND max_index_interval = 2048 - ... AND memtable_flush_period_in_ms = 0 - ... AND speculative_retry = '99p' - ... AND additional_write_policy = '99p' - ... AND comment = '' - ... AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' } - ... AND compaction = { 'min_threshold': '4', 'max_threshold': - '32', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } - ... AND compression = { 'chunk_length_in_kb': '16', 'class': - 'org.apache.cassandra.io.compress.LZ4Compressor' } - ... AND cdc = false - ... AND extensions = { }; - - -Run the ``nodetool import`` command to import the SSTables for the snapshot. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool import -- catalogkeyspace journal - ./cassandra/data/data/catalogkeyspace/journal- - 296a2d30c22a11e9b1350d927649052c/snapshots/catalog-ks/ - [ec2-user@ip-10-0-2-238 ~]$ - -Subsequently run a CQL query on the ``journal`` table and the data imported gets listed. - -:: - - cqlsh:catalogkeyspace> - cqlsh:catalogkeyspace> SELECT * FROM journal; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - cqlsh:catalogkeyspace> - - -Bulk Loading External Data -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Bulk loading external data directly is not supported by any of the tools we have discussed which include ``sstableloader`` and ``nodetool import``. The ``sstableloader`` and ``nodetool import`` require data to be in the form of SSTables. Apache Cassandra supports a Java API for generating SSTables from input data. Subsequently the ``sstableloader`` or ``nodetool import`` could be used to bulk load the SSTables. Next, we shall discuss the ``org.apache.cassandra.io.sstable.CQLSSTableWriter`` Java class for generating SSTables. - -Generating SSTables with CQLSSTableWriter Java API -*************************************************** -To generate SSTables using the ``CQLSSTableWriter`` class the following need to be supplied at the least. - -- An output directory to generate the SSTable in -- The schema for the SSTable -- A prepared insert statement -- A partitioner - -The output directory must already have been created. Create a directory (``/sstables`` as an example) and set its permissions. - -:: - - sudo mkdir /sstables - sudo chmod 777 -R /sstables - -Next, we shall discuss To use ``CQLSSTableWriter`` could be used in a Java application. Create a Java constant for the output directory. - -:: - - public static final String OUTPUT_DIR = "./sstables"; - -``CQLSSTableWriter`` Java API has the provision to create a user defined type. Create a new type to store ``int`` data: - -:: - - String type = "CREATE TYPE CQLKeyspace.intType (a int, b int)"; - // Define a String variable for the SSTable schema. - String schema = "CREATE TABLE CQLKeyspace.t (" - + " id int PRIMARY KEY," - + " k int," - + " v1 text," - + " v2 intType," - + ")"; - -Define a ``String`` variable for the prepared insert statement to use: - -:: - - String insertStmt = "INSERT INTO CQLKeyspace.t (id, k, v1, v2) VALUES (?, ?, ?, ?)"; - -The partitioner to use does not need to be set as the default partitioner ``Murmur3Partitioner`` is used. - -All these variables or settings are used by the builder class ``CQLSSTableWriter.Builder`` to create a ``CQLSSTableWriter`` object. - -Create a File object for the output directory. - -:: - - File outputDir = new File(OUTPUT_DIR + File.separator + "CQLKeyspace" + File.separator + "t"); - -Next, obtain a ``CQLSSTableWriter.Builder`` object using ``static`` method ``CQLSSTableWriter.builder()``. Set the output -directory ``File`` object, user defined type, SSTable schema, buffer size, prepared insert statement, and optionally any of the other builder options, and invoke the ``build()`` method to create a ``CQLSSTableWriter`` object: - -:: - - CQLSSTableWriter writer = CQLSSTableWriter.builder() - .inDirectory(outputDir) - .withType(type) - .forTable(schema) - .withBufferSizeInMB(256) - .using(insertStmt).build(); - -Next, set the SSTable data. If any user define types are used obtain a ``UserType`` object for these: - -:: - - UserType userType = writer.getUDType("intType"); - -Add data rows for the resulting SSTable. - -:: - - writer.addRow(0, 0, "val0", userType.newValue().setInt("a", 0).setInt("b", 0)); - writer.addRow(1, 1, "val1", userType.newValue().setInt("a", 1).setInt("b", 1)); - writer.addRow(2, 2, "val2", userType.newValue().setInt("a", 2).setInt("b", 2)); - -Close the writer, finalizing the SSTable. - -:: - - writer.close(); - -All the public methods the ``CQLSSTableWriter`` class provides including some other methods that are not discussed in the preceding example are as follows. - -===================================================================== ============ -Method Description -===================================================================== ============ -addRow(java.util.List values) Adds a new row to the writer. Returns a CQLSSTableWriter object. Each provided value type should correspond to the types of the CQL column the value is for. The correspondence between java type and CQL type is the same one than the one documented at www.datastax.com/drivers/java/2.0/apidocs/com/datastax/driver/core/DataType.Name.html#asJavaC lass(). -addRow(java.util.Map values) Adds a new row to the writer. Returns a CQLSSTableWriter object. This is equivalent to the other addRow methods, but takes a map whose keys are the names of the columns to add instead of taking a list of the values in the order of the insert statement used during construction of this SSTable writer. The column names in the map keys must be in lowercase unless the declared column name is a case-sensitive quoted identifier in which case the map key must use the exact case of the column. The values parameter is a map of column name to column values representing the new row to add. If a column is not included in the map, it's value will be null. If the map contains keys that do not correspond to one of the columns of the insert statement used when creating this SSTable writer, the corresponding value is ignored. -addRow(java.lang.Object... values) Adds a new row to the writer. Returns a CQLSSTableWriter object. -CQLSSTableWriter.builder() Returns a new builder for a CQLSSTableWriter. -close() Closes the writer. -rawAddRow(java.nio.ByteBuffer... values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. -rawAddRow(java.util.List values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. | -rawAddRow(java.util.Map values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. | -getUDType(String dataType) Returns the User Defined type used in this SSTable Writer that can be used to create UDTValue instances. -===================================================================== ============ - - -All the public methods the ``CQLSSTableWriter.Builder`` class provides including some other methods that are not discussed in the preceding example are as follows. - -============================================ ============ -Method Description -============================================ ============ -inDirectory(String directory) The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable. -inDirectory(File directory) The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable. -forTable(String schema) The schema (CREATE TABLE statement) for the table for which SSTable is to be created. The - provided CREATE TABLE statement must use a fully-qualified table name, one that includes the - keyspace name. This is a mandatory option. - -withPartitioner(IPartitioner partitioner) The partitioner to use. By default, Murmur3Partitioner will be used. If this is not the - partitioner used by the cluster for which the SSTables are created, the correct partitioner - needs to be provided. - -using(String insert) The INSERT or UPDATE statement defining the order of the values to add for a given CQL row. - The provided INSERT statement must use a fully-qualified table name, one that includes the - keyspace name. Moreover, said statement must use bind variables since these variables will - be bound to values by the resulting SSTable writer. This is a mandatory option. - -withBufferSizeInMB(int size) The size of the buffer to use. This defines how much data will be buffered before being - written as a new SSTable. This corresponds roughly to the data size that will have the - created SSTable. The default is 128MB, which should be reasonable for a 1GB heap. If - OutOfMemory exception gets generated while using the SSTable writer, should lower this - value. - -sorted() Creates a CQLSSTableWriter that expects sorted inputs. If this option is used, the resulting - SSTable writer will expect rows to be added in SSTable sorted order (and an exception will - be thrown if that is not the case during row insertion). The SSTable sorted order means that - rows are added such that their partition keys respect the partitioner order. This option - should only be used if the rows can be provided in order, which is rarely the case. If the - rows can be provided in order however, using this sorted might be more efficient. If this - option is used, some option like withBufferSizeInMB will be ignored. - -build() Builds a CQLSSTableWriter object. - -============================================ ============ - diff --git a/src/doc/4.0-alpha4/_sources/operating/cdc.rst.txt b/src/doc/4.0-alpha4/_sources/operating/cdc.rst.txt deleted file mode 100644 index a7177b544..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,96 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property ``cdc=true`` (either when :ref:`creating the table ` or -:ref:`altering it `). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in ``cassandra.yaml``. On segment fsync to disk, if CDC data is present anywhere in the segment a -_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word "COMPLETED" will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file. - -We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable. - -A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disabling CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -Use a `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -If CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `JIRA ticket `__ -- `JIRA ticket `__ diff --git a/src/doc/4.0-alpha4/_sources/operating/compaction/index.rst.txt b/src/doc/4.0-alpha4/_sources/operating/compaction/index.rst.txt deleted file mode 100644 index ea505dd47..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/compaction/index.rst.txt +++ /dev/null @@ -1,301 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Strategies -^^^^^^^^^^ - -Picking the right compaction strategy for your workload will ensure the best performance for both querying and for compaction itself. - -:ref:`Size Tiered Compaction Strategy ` - The default compaction strategy. Useful as a fallback when other strategies don't fit the workload. Most useful for - non pure time series workloads with spinning disks, or when the I/O from :ref:`LCS ` is too high. - - -:ref:`Leveled Compaction Strategy ` - Leveled Compaction Strategy (LCS) is optimized for read heavy workloads, or workloads with lots of updates and deletes. It is not a good choice for immutable time series data. - - -:ref:`Time Window Compaction Strategy ` - Time Window Compaction Strategy is designed for TTL'ed, mostly immutable time series data. - - - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). With ``TimeWindowCompactionStrategy`` -it is possible to remove the guarantee (not check for shadowing data) by enabling ``unsafe_aggressive_sstable_expiration``. - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - - - - - diff --git a/src/doc/4.0-alpha4/_sources/operating/compaction/lcs.rst.txt b/src/doc/4.0-alpha4/_sources/operating/compaction/lcs.rst.txt deleted file mode 100644 index 48c282eb7..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/compaction/lcs.rst.txt +++ /dev/null @@ -1,90 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - - diff --git a/src/doc/4.0-alpha4/_sources/operating/compaction/stcs.rst.txt b/src/doc/4.0-alpha4/_sources/operating/compaction/stcs.rst.txt deleted file mode 100644 index 658933757..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/compaction/stcs.rst.txt +++ /dev/null @@ -1,58 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -.. _STCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - - diff --git a/src/doc/4.0-alpha4/_sources/operating/compaction/twcs.rst.txt b/src/doc/4.0-alpha4/_sources/operating/compaction/twcs.rst.txt deleted file mode 100644 index 3641a5aab..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/compaction/twcs.rst.txt +++ /dev/null @@ -1,76 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. -``unsafe_aggressive_sstable_expiration`` (default: false) - Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially - risky option that can lead to data loss or deleted data re-appearing, going beyond what - `unchecked_tombstone_compaction` does for single sstable compaction. Due to the risk the jvm must also be - started with `-Dcassandra.unsafe_aggressive_sstable_expiration=true`. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled). - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. - diff --git a/src/doc/4.0-alpha4/_sources/operating/compression.rst.txt b/src/doc/4.0-alpha4/_sources/operating/compression.rst.txt deleted file mode 100644 index b4308b31a..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,97 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. By -default, three options are relevant: - -- ``class`` specifies the compression class - Cassandra provides four classes (``LZ4Compressor``, - ``SnappyCompressor``, ``DeflateCompressor`` and ``ZstdCompressor``). The default is ``LZ4Compressor``. -- ``chunk_length_in_kb`` specifies the number of kilobytes of data per compression chunk. The default is 64KB. -- ``crc_check_chance`` determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. The default is 1.0. -- ``compression_level`` is only applicable for ``ZstdCompressor`` and accepts values between ``-131072`` and ``22``. - The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called - "ultra levels" and should be used with caution, as they require more memory. The default is 3. - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/4.0-alpha4/_sources/operating/hardware.rst.txt b/src/doc/4.0-alpha4/_sources/operating/hardware.rst.txt deleted file mode 100644 index d90550c80..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,85 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/4.0-alpha4/_sources/operating/hints.rst.txt b/src/doc/4.0-alpha4/_sources/operating/hints.rst.txt deleted file mode 100644 index 55c42a401..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,279 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _hints: - -Hints -===== - -Hinting is a data repair technique applied during write operations. When -replica nodes are unavailable to accept a mutation, either due to failure or -more commonly routine maintenance, coordinators attempting to write to those -replicas store temporary hints on their local filesystem for later application -to the unavailable replica. Hints are an important way to help reduce the -duration of data inconsistency. Coordinators replay hints quickly after -unavailable replica nodes return to the ring. Hints are best effort, however, -and do not guarantee eventual consistency like :ref:`anti-entropy repair -` does. - -Hints are useful because of how Apache Cassandra replicates data to provide -fault tolerance, high availability and durability. Cassandra :ref:`partitions -data across the cluster ` using consistent -hashing, and then replicates keys to multiple nodes along the hash ring. To -guarantee availability, all replicas of a key can accept mutations without -consensus, but this means it is possible for some replicas to accept a mutation -while others do not. When this happens an inconsistency is introduced. - -Hints are one of the three ways, in addition to read-repair and -full/incremental anti-entropy repair, that Cassandra implements the eventual -consistency guarantee that all updates are eventually received by all replicas. -Hints, like read-repair, are best effort and not an alternative to performing -full repair, but they do help reduce the duration of inconsistency between -replicas in practice. - -Hinted Handoff --------------- - -Hinted handoff is the process by which Cassandra applies hints to unavailable -nodes. - -For example, consider a mutation is to be made at ``Consistency Level`` -``LOCAL_QUORUM`` against a keyspace with ``Replication Factor`` of ``3``. -Normally the client sends the mutation to a single coordinator, who then sends -the mutation to all three replicas, and when two of the three replicas -acknowledge the mutation the coordinator responds successfully to the client. -If a replica node is unavailable, however, the coordinator stores a hint -locally to the filesystem for later application. New hints will be retained for -up to ``max_hint_window_in_ms`` of downtime (defaults to ``3 hours``). If the -unavailable replica does return to the cluster before the window expires, the -coordinator applies any pending hinted mutations against the replica to ensure -that eventual consistency is maintained. - -.. figure:: images/hints.svg - :alt: Hinted Handoff Example - - Hinted Handoff in Action - -* (``t0``): The write is sent by the client, and the coordinator sends it - to the three replicas. Unfortunately ``replica_2`` is restarting and cannot - receive the mutation. -* (``t1``): The client receives a quorum acknowledgement from the coordinator. - At this point the client believe the write to be durable and visible to reads - (which it is). -* (``t2``): After the write timeout (default ``2s``), the coordinator decides - that ``replica_2`` is unavailable and stores a hint to its local disk. -* (``t3``): Later, when ``replica_2`` starts back up it sends a gossip message - to all nodes, including the coordinator. -* (``t4``): The coordinator replays hints including the missed mutation - against ``replica_2``. - -If the node does not return in time, the destination replica will be -permanently out of sync until either read-repair or full/incremental -anti-entropy repair propagates the mutation. - -Application of Hints -^^^^^^^^^^^^^^^^^^^^ - -Hints are streamed in bulk, a segment at a time, to the target replica node and -the target node replays them locally. After the target node has replayed a -segment it deletes the segment and receives the next segment. This continues -until all hints are drained. - -Storage of Hints on Disk -^^^^^^^^^^^^^^^^^^^^^^^^ - -Hints are stored in flat files in the coordinator node’s -``$CASSANDRA_HOME/data/hints`` directory. A hint includes a hint id, the target -replica node on which the mutation is meant to be stored, the serialized -mutation (stored as a blob) that couldn't be delivered to the replica node, the -mutation timestamp, and the Cassandra version used to serialize the mutation. -By default hints are compressed using ``LZ4Compressor``. Multiple hints are -appended to the same hints file. - -Since hints contain the original unmodified mutation timestamp, hint application -is idempotent and cannot overwrite a future mutation. - -Hints for Timed Out Write Requests -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Hints are also stored for write requests that time out. The -``write_request_timeout_in_ms`` setting in ``cassandra.yaml`` configures the -timeout for write requests. - -:: - - write_request_timeout_in_ms: 2000 - -The coordinator waits for the configured amount of time for write requests to -complete, at which point it will time out and generate a hint for the timed out -request. The lowest acceptable value for ``write_request_timeout_in_ms`` is 10 ms. - - -Configuring Hints ------------------ - -Hints are enabled by default as they are critical for data consistency. The -``cassandra.yaml`` configuration file provides several settings for configuring -hints: - -Table 1. Settings for Hints - -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|Setting | Description |Default Value | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_enabled`` |Enables/Disables hinted handoffs | ``true`` | -| | | | -| | | | -| | | | -| | | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_disabled_datacenters`` |A list of data centers that do not perform | ``unset`` | -| |hinted handoffs even when handoff is | | -| |otherwise enabled. | | -| |Example: | | -| | | | -| | .. code-block:: yaml | | -| | | | -| | hinted_handoff_disabled_datacenters: | | -| | - DC1 | | -| | - DC2 | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hint_window_in_ms`` |Defines the maximum amount of time (ms) | ``10800000`` # 3 hours | -| |a node shall have hints generated after it | | -| |has failed. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_throttle_in_kb`` |Maximum throttle in KBs per second, per | | -| |delivery thread. This will be reduced | ``1024`` | -| |proportionally to the number of nodes in | | -| |the cluster. | | -| |(If there are two nodes in the cluster, | | -| |each delivery thread will use the maximum | | -| |rate; if there are 3, each will throttle | | -| |to half of the maximum,since it is expected| | -| |for two nodes to be delivering hints | | -| |simultaneously.) | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hints_delivery_threads`` |Number of threads with which to deliver | ``2`` | -| |hints; Consider increasing this number when| | -| |you have multi-dc deployments, since | | -| |cross-dc handoff tends to be slower | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_directory`` |Directory where Cassandra stores hints. |``$CASSANDRA_HOME/data/hints`` | -| | | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_flush_period_in_ms`` |How often hints should be flushed from the | ``10000`` | -| |internal buffers to disk. Will *not* | | -| |trigger fsync. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hints_file_size_in_mb`` |Maximum size for a single hints file, in | ``128`` | -| |megabytes. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_compression`` |Compression to apply to the hint files. | ``LZ4Compressor`` | -| |If omitted, hints files will be written | | -| |uncompressed. LZ4, Snappy, and Deflate | | -| |compressors are supported. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ - -Configuring Hints at Runtime with ``nodetool`` ----------------------------------------------- - -``nodetool`` provides several commands for configuring hints or getting hints -related information. The nodetool commands override the corresponding -settings if any in ``cassandra.yaml`` for the node running the command. - -Table 2. Nodetool Commands for Hints - -+--------------------------------+-------------------------------------------+ -|Command | Description | -+--------------------------------+-------------------------------------------+ -|``nodetool disablehandoff`` |Disables storing and delivering hints | -+--------------------------------+-------------------------------------------+ -|``nodetool disablehintsfordc`` |Disables storing and delivering hints to a | -| |data center | -+--------------------------------+-------------------------------------------+ -|``nodetool enablehandoff`` |Re-enables future hints storing and | -| |delivery on the current node | -+--------------------------------+-------------------------------------------+ -|``nodetool enablehintsfordc`` |Enables hints for a data center that was | -| |previously disabled | -+--------------------------------+-------------------------------------------+ -|``nodetool getmaxhintwindow`` |Prints the max hint window in ms. New in | -| |Cassandra 4.0. | -+--------------------------------+-------------------------------------------+ -|``nodetool handoffwindow`` |Prints current hinted handoff window | -+--------------------------------+-------------------------------------------+ -|``nodetool pausehandoff`` |Pauses hints delivery process | -+--------------------------------+-------------------------------------------+ -|``nodetool resumehandoff`` |Resumes hints delivery process | -+--------------------------------+-------------------------------------------+ -|``nodetool |Sets hinted handoff throttle in kb | -|sethintedhandoffthrottlekb`` |per second, per delivery thread | -+--------------------------------+-------------------------------------------+ -|``nodetool setmaxhintwindow`` |Sets the specified max hint window in ms | -+--------------------------------+-------------------------------------------+ -|``nodetool statushandoff`` |Status of storing future hints on the | -| |current node | -+--------------------------------+-------------------------------------------+ -|``nodetool truncatehints`` |Truncates all hints on the local node, or | -| |truncates hints for the endpoint(s) | -| |specified. | -+--------------------------------+-------------------------------------------+ - -Make Hints Play Faster at Runtime -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default of ``1024 kbps`` handoff throttle is conservative for most modern -networks, and it is entirely possible that in a simple node restart you may -accumulate many gigabytes hints that may take hours to play back. For example if -you are ingesting ``100 Mbps`` of data per node, a single 10 minute long -restart will create ``10 minutes * (100 megabit / second) ~= 7 GiB`` of data -which at ``(1024 KiB / second)`` would take ``7.5 GiB / (1024 KiB / second) = -2.03 hours`` to play back. The exact math depends on the load balancing strategy -(round robin is better than token aware), number of tokens per node (more -tokens is better than fewer), and naturally the cluster's write rate, but -regardless you may find yourself wanting to increase this throttle at runtime. - -If you find yourself in such a situation, you may consider raising -the ``hinted_handoff_throttle`` dynamically via the -``nodetool sethintedhandoffthrottlekb`` command. - -Allow a Node to be Down Longer at Runtime -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Sometimes a node may be down for more than the normal ``max_hint_window_in_ms``, -(default of three hours), but the hardware and data itself will still be -accessible. In such a case you may consider raising the -``max_hint_window_in_ms`` dynamically via the ``nodetool setmaxhintwindow`` -command added in Cassandra 4.0 (`CASSANDRA-11720 `_). -This will instruct Cassandra to continue holding hints for the down -endpoint for a longer amount of time. - -This command should be applied on all nodes in the cluster that may be holding -hints. If needed, the setting can be applied permanently by setting the -``max_hint_window_in_ms`` setting in ``cassandra.yaml`` followed by a rolling -restart. - -Monitoring Hint Delivery ------------------------- - -Cassandra 4.0 adds histograms available to understand how long it takes to deliver -hints which is useful for operators to better identify problems (`CASSANDRA-13234 -`_). - -There are also metrics available for tracking :ref:`Hinted Handoff ` -and :ref:`Hints Service ` metrics. diff --git a/src/doc/4.0-alpha4/_sources/operating/index.rst.txt b/src/doc/4.0-alpha4/_sources/operating/index.rst.txt deleted file mode 100644 index 78c7eb6ea..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction/index - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/4.0-alpha4/_sources/operating/metrics.rst.txt b/src/doc/4.0-alpha4/_sources/operating/metrics.rst.txt deleted file mode 100644 index fc37440d3..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,793 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _monitoring-metrics: - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -.. _table-metrics: - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...
`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorWriteLatency Timer Coordinator write latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -BytesRepaired Gauge Size of table data repaired on disk -BytesUnrepaired Gauge Size of table data unrepaired on disk -BytesPendingRepair Gauge Size of table data isolated for an ongoing incremental repair -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -SpeculativeFailedRetries Counter Number of speculative retries that failed to prevent a timeout -SpeculativeInsufficientReplicas Counter Number of speculative retries that couldn't be attempted due to lack of replicas -SpeculativeSampleLatencyNanos Gauge Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -AnticompactionTime Timer Time spent anticompacting before a consistent repair. -ValidationTime Timer Time spent doing validation compaction during repair. -SyncTime Timer Time spent doing streaming during repair. -BytesValidated Histogram Histogram over the amount of bytes read during validation. -PartitionsValidated Histogram Histogram over the number of partitions read during validation. -BytesAnticompacted Counter How many bytes we anticompacted. -BytesMutatedAnticompaction Counter How many bytes we avoided anticompacting because the sstable was fully contained in the repaired range. -MutatedAnticompactionGauge Gauge Ratio of bytes mutated vs total bytes repaired. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -Most of these metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -WriteFailedIdeaCL Counter Number of writes that failed to achieve the configured ideal consistency level or 0 if none is configured -IdealCLWriteLatency Latency Coordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured -RepairTime Timer Total time spent as repair coordinator. -RepairPrepareTime Timer Total time spent preparing for repair. -======================================= ============== =========== - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools path= scope= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -MaxTasksQueued Gauge The maximum number of tasks queued before a task get blocked. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -ViewBuildExecutor internal Performs materialized views initial build -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - -.. _dropped-metrics: - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessage..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMessage scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -.. _handoff-metrics: - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -.. _hintsservice-metrics: - -HintsService Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintsService.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintsService name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -HintsSucceeded Meter A meter of the hints successfully delivered -HintsFailed Meter A meter of the hints that failed deliver -HintsTimedOut Meter A meter of the hints that timed out -Hint_delays Histogram Histogram of hint delivery delays (in milliseconds) -Hint_delays- Histogram Histogram of hint delivery delays (in milliseconds) per peer -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -============================== =============================== =========== -Name Type Description -============================== =============================== =========== -connectedNativeClients Gauge Number of clients connected to this nodes native protocol server -connections Gauge> List of all connections and their state information -connectedNativeClientsByUser Gauge Number of connnective native clients by username -============================== =============================== =========== - - -Batch Metrics -^^^^^^^^^^^^^ - -Metrics specifc to batch statements. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Batch.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Batch name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -PartitionsPerCounterBatch Histogram Distribution of the number of partitions processed per counter batch -PartitionsPerLoggedBatch Histogram Distribution of the number of partitions processed per logged batch -PartitionsPerUnloggedBatch Histogram Distribution of the number of partitions processed per unlogged batch -=========================== ============== =========== - - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/4.0-alpha4/_sources/operating/read_repair.rst.txt b/src/doc/4.0-alpha4/_sources/operating/read_repair.rst.txt deleted file mode 100644 index d280162b8..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,169 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _read-repair: - -Read repair -============== -Read Repair is the process of repairing data replicas during a read request. If all replicas involved in a read request at the given read consistency level are consistent the data is returned to the client and no read repair is needed. But if the replicas involved in a read request at the given consistency level are not consistent a read repair is performed to make replicas involved in the read request consistent. The most up-to-date data is returned to the client. The read repair runs in the foreground and is blocking in that a response is not returned to the client until the read repair has completed and up-to-date data is constructed. - -Expectation of Monotonic Quorum Reads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Cassandra uses a blocking read repair to ensure the expectation of "monotonic quorum reads" i.e. that in 2 successive quorum reads, it’s guaranteed the 2nd one won't get something older than the 1st one, and this even if a failed quorum write made a write of the most up to date value only to a minority of replicas. "Quorum" means majority of nodes among replicas. - -Table level configuration of monotonic reads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Cassandra 4.0 adds support for table level configuration of monotonic reads (`CASSANDRA-14635 -`_). The ``read_repair`` table option has been added to table schema, with the options ``blocking`` (default), and ``none``. - -The ``read_repair`` option configures the read repair behavior to allow tuning for various performance and consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. Cassandra attempts to provide partition level write atomicity, but since only the data covered by a ``SELECT`` statement is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a batch, but then select a single row by specifying the clustering column in a ``SELECT`` statement. - -The available read repair settings are: - -Blocking -********* -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is started, the read will block on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition level write atomicity. - -None -********* -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - -An example of using the ``NONE`` setting for the ``read_repair`` option is as follows: - -:: - - CREATE TABLE ks.tbl (k INT, c INT, v INT, PRIMARY KEY (k,c)) with read_repair='NONE'"); - -Read Repair Example -^^^^^^^^^^^^^^^^^^^^^^^^^^ -To illustrate read repair with an example, consider that a client sends a read request with read consistency level ``TWO`` to a 5-node cluster as illustrated in Figure 1. Read consistency level determines how many replica nodes must return a response before the read request is considered successful. - - -.. figure:: Figure_1_read_repair.jpg - - -Figure 1. Client sends read request to a 5-node Cluster - -Three nodes host replicas for the requested data as illustrated in Figure 2. With a read consistency level of ``TWO`` two replica nodes must return a response for the read request to be considered successful. If the node the client sends request to hosts a replica of the data requested only one other replica node needs to be sent a read request to. But if the receiving node does not host a replica for the requested data the node becomes a coordinator node and forwards the read request to a node that hosts a replica. A direct read request is forwarded to the fastest node (as determined by dynamic snitch) as shown in Figure 2. A direct read request is a full read and returns the requested data. - -.. figure:: Figure_2_read_repair.jpg - -Figure 2. Direct Read Request sent to Fastest Replica Node - -Next, the coordinator node sends the requisite number of additional requests to satisfy the consistency level, which is ``TWO``. The coordinator node needs to send one more read request for a total of two. All read requests additional to the first direct read request are digest read requests. A digest read request is not a full read and only returns the hash value of the data. Only a hash value is returned to reduce the network data traffic. In the example being discussed the coordinator node sends one digest read request to a node hosting a replica as illustrated in Figure 3. - -.. figure:: Figure_3_read_repair.jpg - -Figure 3. Coordinator Sends a Digest Read Request - -The coordinator node has received a full copy of data from one node and a hash value for the data from another node. To compare the data returned a hash value is calculated for the full copy of data. The two hash values are compared. If the hash values are the same no read repair is needed and the full copy of requested data is returned to the client. The coordinator node only performed a total of two replica read request because the read consistency level is ``TWO`` in the example. If the consistency level were higher such as ``THREE``, three replica nodes would need to respond to a read request and only if all digest or hash values were to match with the hash value of the full copy of data would the read request be considered successful and the data returned to the client. - -But, if the hash value/s from the digest read request/s are not the same as the hash value of the data from the full read request of the first replica node it implies that an inconsistency in the replicas exists. To fix the inconsistency a read repair is performed. - -For example, consider that that digest request returns a hash value that is not the same as the hash value of the data from the direct full read request. We would need to make the replicas consistent for which the coordinator node sends a direct (full) read request to the replica node that it sent a digest read request to earlier as illustrated in Figure 4. - -.. figure:: Figure_4_read_repair.jpg - -Figure 4. Coordinator sends Direct Read Request to Replica Node it had sent Digest Read Request to - -After receiving the data from the second replica node the coordinator has data from two of the replica nodes. It only needs two replicas as the read consistency level is ``TWO`` in the example. Data from the two replicas is compared and based on the timestamps the most recent replica is selected. Data may need to be merged to construct an up-to-date copy of data if one replica has data for only some of the columns. In the example, if the data from the first direct read request is found to be outdated and the data from the second full read request to be the latest read, repair needs to be performed on Replica 2. If a new up-to-date data is constructed by merging the two replicas a read repair would be needed on both the replicas involved. For example, a read repair is performed on Replica 2 as illustrated in Figure 5. - -.. figure:: Figure_5_read_repair.jpg - -Figure 5. Coordinator performs Read Repair - - -The most up-to-date data is returned to the client as illustrated in Figure 6. From the three replicas Replica 1 is not even read and thus not repaired. Replica 2 is repaired. Replica 3 is the most up-to-date and returned to client. - -.. figure:: Figure_6_read_repair.jpg - -Figure 6. Most up-to-date Data returned to Client - -Read Consistency Level and Read Repair -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The read consistency is most significant in determining if a read repair needs to be performed. As discussed in Table 1 a read repair is not needed for all of the consistency levels. - -Table 1. Read Repair based on Read Consistency Level - -+----------------------+-------------------------------------------+ -|Read Consistency Level| Description | -+----------------------+-------------------------------------------+ -| ONE |Read repair is not performed as the | -| |data from the first direct read request | -| |satisfies the consistency level ONE. | -| |No digest read requests are involved | -| |for finding mismatches in data. | -+----------------------+-------------------------------------------+ -| TWO |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -| THREE |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -|LOCAL_ONE |Read repair is not performed as the data | -| |from the direct read request from the | -| |closest replica satisfies the consistency | -| |level LOCAL_ONE.No digest read requests are| -| |involved for finding mismatches in data. | -+----------------------+-------------------------------------------+ -|LOCAL_QUORUM |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -|QUORUM |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ - -If read repair is performed it is made only on the replicas that are not up-to-date and that are involved in the read request. The number of replicas involved in a read request would be based on the read consistency level; in the example it is two. - -Improved Read Repair Blocking Behavior in Cassandra 4.0 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 makes two improvements to read repair blocking behavior (`CASSANDRA-10726 -`_). - -1. Speculative Retry of Full Data Read Requests. Cassandra 4.0 makes use of speculative retry in sending read requests (full, not digest) to replicas if a full data response is not received, whether in the initial full read request or a full data read request during read repair. With speculative retry if it looks like a response may not be received from the initial set of replicas Cassandra sent messages to, to satisfy the consistency level, it speculatively sends additional read request to un-contacted replica/s. Cassandra 4.0 will also speculatively send a repair mutation to a minority of nodes not involved in the read repair data read / write cycle with the combined contents of all un-acknowledged mutations if it looks like one may not respond. Cassandra accepts acks from them in lieu of acks from the initial mutations sent out, so long as it receives the same number of acks as repair mutations transmitted. - -2. Only blocks on Full Data Responses to satisfy the Consistency Level. Cassandra 4.0 only blocks for what is needed for resolving the digest mismatch and wait for enough full data responses to meet the consistency level, no matter whether it’s speculative retry or read repair chance. As an example, if it looks like Cassandra might not receive full data requests from everyone in time, it sends additional requests to additional replicas not contacted in the initial full data read. If the collection of nodes that end up responding in time end up agreeing on the data, the response from the disagreeing replica that started the read repair is not considered, and won't be included in the response to the client, preserving the expectation of monotonic quorum reads. - -Diagnostic Events for Read Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 adds diagnostic events for read repair (`CASSANDRA-14668 -`_) that can be used for exposing information such as: - -- Contacted endpoints -- Digest responses by endpoint -- Affected partition keys -- Speculated reads / writes -- Update oversized - -Background Read Repair -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Background read repair, which was configured using ``read_repair_chance`` and ``dclocal_read_repair_chance`` settings in ``cassandra.yaml`` is removed Cassandra 4.0 (`CASSANDRA-13910 -`_). - -Read repair is not an alternative for other kind of repairs such as full repairs or replacing a node that keeps failing. The data returned even after a read repair has been performed may not be the most up-to-date data if consistency level is other than one requiring response from all replicas. diff --git a/src/doc/4.0-alpha4/_sources/operating/repair.rst.txt b/src/doc/4.0-alpha4/_sources/operating/repair.rst.txt deleted file mode 100644 index 94fdc1109..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _repair: - -Repair ------- - -Cassandra is designed to remain available if one of it's nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren't guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire. - -These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes. - -Incremental and Full Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that's been written since the previous incremental repair. - -Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it's important to understand that once an incremental repair marks data as repaired, it won't -try to repair it again. This is fine for syncing up missed writes, but it doesn't protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally. - -Usage and Best Practices -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since repair can result in a lot of disk and network io, it's not run automatically by Cassandra. It is run by the operator -via nodetool. - -Incremental repair is the default and is run with the following command: - -:: - - nodetool repair - -A full repair can be run with the following command: - -:: - - nodetool repair --full - -Additionally, repair can be run on a single keyspace: - -:: - - nodetool repair [options] - -Or even on specific tables: - -:: - - nodetool repair [options] - - -The repair command only repairs token ranges on the node being repaired, it doesn't repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you're running repair on, which will cause duplicate work if you run it -on every node. The ``-pr`` flag will only repair the "primary" ranges on a node, so you can repair your entire cluster by running -``nodetool repair -pr`` on each node in a single datacenter. - -The specific frequency of repair that's right for your cluster, of course, depends on several factors. However, if you're -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don't want to run incremental repairs, a full repair every 5 days is a good place -to start. - -At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays. - -Other Options -^^^^^^^^^^^^^ - -``-pr, --partitioner-range`` - Restricts repair to the 'primary' token ranges of the node being repaired. A primary range is just a token range for - which a node is the first replica in the ring. - -``-prv, --preview`` - Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints - the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, - add the ``--full`` flag to estimate a full repair. - -``-vd, --validate`` - Verifies that the repaired data is the same across all nodes. Similiar to ``--preview``, this builds and compares merkle - trees of repaired data, but doesn't do any streaming. This is useful for troubleshooting. If this shows that the repaired - data is out of sync, a full repair should be run. - -.. seealso:: - :ref:`nodetool repair docs ` - -Full Repair Example -^^^^^^^^^^^^^^^^^^^^ -Full repair is typically needed to redistribute data after increasing the replication factor of a keyspace or after adding a node to the cluster. Full repair involves streaming SSTables. To demonstrate full repair start with a three node cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool status - Datacenter: us-east-1 - ===================== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns Host ID Rack - UN 10.0.1.115 547 KiB 256 ? b64cb32a-b32a-46b4-9eeb-e123fa8fc287 us-east-1b - UN 10.0.3.206 617.91 KiB 256 ? 74863177-684b-45f4-99f7-d1006625dc9e us-east-1d - UN 10.0.2.238 670.26 KiB 256 ? 4dcdadd2-41f9-4f34-9892-1f20868b27c7 us-east-1c - -Create a keyspace with replication factor 3: - -:: - - cqlsh> DROP KEYSPACE cqlkeyspace; - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Add a table to the keyspace: - -:: - - cqlsh> use cqlkeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Add table data: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (2, 2, 'val2'); - -A query lists the data added: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - 2 | 2 | val2 - (3 rows) - -Make the following changes to a three node cluster: - -1. Increase the replication factor from 3 to 4. -2. Add a 4th node to the cluster - -When the replication factor is increased the following message gets output indicating that a full repair is needed as per (`CASSANDRA-13079 -`_): - -:: - - cqlsh:cqlkeyspace> ALTER KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - Warnings : - When increasing replication factor you need to run a full (-full) repair to distribute the - data. - -Perform a full repair on the keyspace ``cqlkeyspace`` table ``t`` with following command: - -:: - - nodetool repair -full cqlkeyspace t - -Full repair completes in about a second as indicated by the output: - -:: - -[ec2-user@ip-10-0-2-238 ~]$ nodetool repair -full cqlkeyspace t -[2019-08-17 03:06:21,445] Starting repair command #1 (fd576da0-c09b-11e9-b00c-1520e8c38f00), repairing keyspace cqlkeyspace with repair options (parallelism: parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [t], dataCenters: [], hosts: [], previewKind: NONE, # of ranges: 1024, pull repair: false, force repair: false, optimise streams: false) -[2019-08-17 03:06:23,059] Repair session fd8e5c20-c09b-11e9-b00c-1520e8c38f00 for range [(-8792657144775336505,-8786320730900698730], (-5454146041421260303,-5439402053041523135], (4288357893651763201,4324309707046452322], ... , (4350676211955643098,4351706629422088296]] finished (progress: 0%) -[2019-08-17 03:06:23,077] Repair completed successfully -[2019-08-17 03:06:23,077] Repair command #1 finished in 1 second -[ec2-user@ip-10-0-2-238 ~]$ - -The ``nodetool tpstats`` command should list a repair having been completed as ``Repair-Task`` > ``Completed`` column value of 1: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 0 0 99 0 0 - … - Repair-Task 0 0 1 0 0 - RequestResponseStage 0 0 2078 0 0 diff --git a/src/doc/4.0-alpha4/_sources/operating/security.rst.txt b/src/doc/4.0-alpha4/_sources/operating/security.rst.txt deleted file mode 100644 index 12f2d24c2..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/security.rst.txt +++ /dev/null @@ -1,441 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still: - -- Craft internode messages to insert users into authentication schema -- Craft internode messages to truncate or drop schema -- Use tools such as ``sstableloader`` to overwrite ``system_auth`` tables -- Attach to the cluster directly to capture write traffic - -Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra's -security features is crucial to configuring your cluster to meet your security needs. - - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -SSL Certificate Hot Reloading -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes. - -Certificate Hot reloading may also be triggered using the ``nodetool reloadssl`` command. Use this if you want to Cassandra to -immediately notice the changed certificates. - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -.. _authorization: - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -.. _auth-caching: - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``no``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/4.0-alpha4/_sources/operating/snitch.rst.txt b/src/doc/4.0-alpha4/_sources/operating/snitch.rst.txt deleted file mode 100644 index b716e8290..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _snitch: - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this will allow 'pinning' of replicas to hosts - in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region, or in multiple regions with inter-region VPC enabled (available - since the end of 2017, see `AWS announcement `_). - Loads Region and Availability Zone information from the EC2 API. The Region is treated as the datacenter, and the - Availability Zone as the rack. Only private IPs are used, so this will work across multiple regions only if - inter-region VPC is enabled. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/4.0-alpha4/_sources/operating/topo_changes.rst.txt b/src/doc/4.0-alpha4/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index 6c8f8ecdf..000000000 --- a/src/doc/4.0-alpha4/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,129 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in ``nodetool netstats``. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344) - -Once the bootstrapping is complete the node will be marked "UP". - -.. Note:: If any of the following cases apply, you **MUST** run repair to make the replaced node consistent again, since - it missed ongoing writes during/prior to bootstrapping. The *replacement* timeframe refers to the period from when the - node initially dies to when a new node completes the replacement process. - - 1. The node is down for longer than ``max_hint_window_in_ms`` before being replaced. - 2. You are replacing using the same IP address as the dead node **and** replacement takes longer than ``max_hint_window_in_ms``. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/4.0-alpha4/_sources/plugins/index.rst.txt b/src/doc/4.0-alpha4/_sources/plugins/index.rst.txt deleted file mode 100644 index 4073a92cb..000000000 --- a/src/doc/4.0-alpha4/_sources/plugins/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Third-Party Plugins -=================== - -Available third-party plugins for Apache Cassandra - -CAPI-Rowcache -------------- - -The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments. - -The official page for the `CAPI-Rowcache plugin `__ contains further details how to build/run/download the plugin. - - -Stratio’s Cassandra Lucene Index --------------------------------- - -Stratio’s Lucene index is a Cassandra secondary index implementation based on `Apache Lucene `__. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or `Apache Solr `__, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed. - -The official Github repository `Cassandra Lucene Index `__ contains everything you need to build/run/configure the plugin. \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/tools/cassandra_stress.rst.txt b/src/doc/4.0-alpha4/_sources/tools/cassandra_stress.rst.txt deleted file mode 100644 index bcac54ec1..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/cassandra_stress.rst.txt +++ /dev/null @@ -1,269 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: yaml - -.. _cassandra_stress: - -Cassandra Stress ----------------- - -cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model. - -This documentation focuses on user mode as this allows the testing of your -actual schema. - -Usage -^^^^^ -There are several operation types: - - * write-only, read-only, and mixed workloads of standard data - * write-only and read-only workloads for counter columns - * user configured workloads, running custom queries on custom schemas - -The syntax is `cassandra-stress [options]`. If you want more information on a given command -or options, just run `cassandra-stress help `. - -Commands: - read: - Multiple concurrent reads - the cluster must first be populated by a write test - write: - Multiple concurrent writes against the cluster - mixed: - Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test - counter_write: - Multiple concurrent updates of counters. - counter_read: - Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. - user: - Interleaving of user provided queries, with configurable ratio and distribution. - help: - Print help for a command or option - print: - Inspect the output of a distribution definition - legacy: - Legacy support mode - -Primary Options: - -pop: - Population distribution and intra-partition visit order - -insert: - Insert specific options relating to various methods for batching and splitting partition updates - -col: - Column details such as size and count distribution, data generator, names, comparator and if super columns should be used - -rate: - Thread count, rate limit or automatic mode (default is auto) - -mode: - Thrift or CQL with options - -errors: - How to handle errors when encountered during stress - -sample: - Specify the number of samples to collect for measuring latency - -schema: - Replication settings, compression, compaction, etc. - -node: - Nodes to connect to - -log: - Where to log progress to, and the interval at which to do it - -transport: - Custom transport factories - -port: - The port to connect to cassandra nodes on - -sendto: - Specify a stress server to send this command to - -graph: - Graph recorded metrics - -tokenrange: - Token range settings - - -Suboptions: - Every command and primary option has its own collection of suboptions. These are too numerous to list here. - For information on the suboptions for each command or option, please use the help command, - `cassandra-stress help `. - -User mode -^^^^^^^^^ - -User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn't scale. - -Profile -+++++++ - -User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname. - -An identifier for the profile:: - - specname: staff_activities - -The keyspace for the test:: - - keyspace: staff - -CQL for the keyspace. Optional if the keyspace already exists:: - - keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -The table to be stressed:: - - table: staff_activities - -CQL for the table. Optional if the table already exists:: - - table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when, what) - ) - - -Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:: - - columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -Supported types are: - -An exponential distribution over the range [min..max]:: - - EXP(min..max) - -An extreme value (Weibull) distribution over the range [min..max]:: - - EXTREME(min..max,shape) - -A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:: - - GAUSSIAN(min..max,stdvrng) - -A gaussian/normal distribution, with explicitly defined mean and stdev:: - - GAUSSIAN(min..max,mean,stdev) - -A uniform distribution over the range [min, max]:: - - UNIFORM(min..max) - -A fixed distribution, always returning the same value:: - - FIXED(val) - -If preceded by ~, the distribution is inverted - -Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) - -Insert distributions:: - - insert: - # How many partition to insert per batch - partitions: fixed(1) - # How many rows to update per partition - select: fixed(1)/500 - # UNLOGGED or LOGGED batch for insert - batchtype: UNLOGGED - - -Currently all inserts are done inside batches. - -Read statements to use during the test:: - - queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - -Running a user mode test:: - - cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once - -This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test. - -The full example can be found here :download:`yaml <./stress-example.yaml>` - -Running a user mode test with multiple yaml files:: - cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m "ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)" truncate=once - -This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table - although care must be taken that the table definition is identical (data generation specs can be different). - -Lightweight transaction support -+++++++++++++++++++++++++++++++ - -cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s). - -Lightweight transaction update query:: - - queries: - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow - -The full example can be found here :download:`yaml <./stress-lwt-example.yaml>` - -Graphing -^^^^^^^^ - -Graphs can be generated for each run of stress. - -.. image:: example-stress-graph.png - -To create a new graph:: - - cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" - -To add a new run to an existing graph point to an existing file and add a revision name:: - - cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run" - -FAQ -^^^^ - -**How do you use NetworkTopologyStrategy for the keyspace?** - -Use the schema option making sure to either escape the parenthesis or enclose in quotes:: - - cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)" - -**How do you use SSL?** - -Use the transport option:: - - cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra" \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_sources/tools/cqlsh.rst.txt b/src/doc/4.0-alpha4/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index b800b88f4..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,458 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--python /path/to/python`` - Specify the full path to Python interpreter to override default on systems with multiple interpreters installed - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/4.0-alpha4/_sources/tools/index.rst.txt b/src/doc/4.0-alpha4/_sources/tools/index.rst.txt deleted file mode 100644 index d28929c84..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 3 - - cqlsh - nodetool/nodetool - sstable/index - cassandra_stress diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/compact.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/decommission.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/describering.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/drain.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/flush.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/help.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/import.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/info.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/join.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/move.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/netstats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c20d0ac21..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,256 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-u | --username )] - [(-h | --host )] [(-p | --port )] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-pp | --print-port)] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`sjk` - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk --help' for more information. - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/profileload.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/refresh.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/removenode.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/repair.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/ring.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/scrub.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/sjk.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/status.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/stop.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/verify.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/version.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/4.0-alpha4/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/index.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/index.rst.txt deleted file mode 100644 index b9e483f45..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -SSTable Tools -============= - -This section describes the functionality of the various sstable tools. - -Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped. - -.. toctree:: - :maxdepth: 2 - - sstabledump - sstableexpiredblockers - sstablelevelreset - sstableloader - sstablemetadata - sstableofflinerelevel - sstablerepairedset - sstablescrub - sstablesplit - sstableupgrade - sstableutil - sstableverify - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstabledump.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstabledump.rst.txt deleted file mode 100644 index 8f38afa09..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstabledump.rst.txt +++ /dev/null @@ -1,294 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstabledump ------------ - -Dump contents of a given SSTable to standard output in JSON format. - -You must supply exactly one sstable. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstabledump - -=================================== ================================================================================ --d CQL row per line internal representation --e Enumerate partition keys only --k Partition key --x Excluded partition key(s) --t Print raw timestamps instead of iso8601 date strings --l Output each row as a separate JSON object -=================================== ================================================================================ - -If necessary, use sstableutil first to find out the sstables used by a table. - -Dump entire table -^^^^^^^^^^^^^^^^^ - -Dump the entire table without any options. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26 - - cat eventlog_dump_2018Jul26 - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - ] - -Dump table in a more manageable format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848 - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines - - cat eventlog_dump_2018Jul26_justlines - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Dump only keys -^^^^^^^^^^^^^^ - -Dump only the keys by using the -e option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys - - cat eventlog_dump_2018Jul26b - [ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ] - -Dump row for a single key -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a single key using the -k option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey - - cat eventlog_dump_2018Jul26_singlekey - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Exclude a key or keys in dump of rows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a table except for the rows excluded with the -x option. Multiple keys can be used. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e > eventlog_dump_2018Jul26_excludekeys - - cat eventlog_dump_2018Jul26_excludekeys - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Display raw timestamps -^^^^^^^^^^^^^^^^^^^^^^ - -By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times - - cat eventlog_dump_2018Jul26_times - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "1532118147028809" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - - -Display internal structure in output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump the table in a format that reflects the internal structure. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d - - cat eventlog_dump_2018Jul26_d - [3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]: | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711] - [d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]: | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522] - [cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]: | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809] - - - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableexpiredblockers.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableexpiredblockers.rst.txt deleted file mode 100644 index ec837944c..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableexpiredblockers.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableexpiredblockers ----------------------- - -During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable. - -This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-10015 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableexpiredblockers
- -Output blocked sstables -^^^^^^^^^^^^^^^^^^^^^^^ - -If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing. - -Otherwise, the script will return ` blocks <#> expired sstables from getting dropped` followed by a list of the blocked sstables. - -Example:: - - sstableexpiredblockers keyspace1 standard1 - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablelevelreset.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstablelevelreset.rst.txt deleted file mode 100644 index 7069094dd..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablelevelreset.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablelevelreset ------------------ - -If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration. - -See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5271 - -Usage -^^^^^ - -sstablelevelreset --really-reset
- -The really-reset flag is required, to ensure this intrusive command is not run accidentally. - -Table not found -^^^^^^^^^^^^^^^ - -If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error. - -Example:: - - ColumnFamily not found: keyspace/evenlog. - -Table has no sstables -^^^^^^^^^^^^^^^^^^^^^ - -Example:: - - Found no sstables, did you give the correct keyspace/table? - - -Table already at level 0 -^^^^^^^^^^^^^^^^^^^^^^^^ - -The script will not set the level if it is already set to 0. - -Example:: - - Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0 - -Table levels reduced to 0 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the level is not already 0, then this will reset it to 0. - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 1 - - sstablelevelreset --really-reset keyspace eventlog - Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 0 - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableloader.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableloader.rst.txt deleted file mode 100644 index a9b37342c..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableloader.rst.txt +++ /dev/null @@ -1,273 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableloader ---------------- - -Bulk-load the sstables found in the directory to the configured cluster. The parent directories of are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files. - -Several of the options listed below don't work quite as intended, and in those cases, workarounds are mentioned for specific use cases. - -To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-1278 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableloader - -=================================================== ================================================================================ --d, --nodes Required. Try to connect to these hosts (comma-separated) - initially for ring information --u, --username username for Cassandra authentication --pw, --password password for Cassandra authentication --p, --port port used for native connection (default 9042) --sp, --storage-port port used for internode communication (default 7000) --ssp, --ssl-storage-port port used for TLS internode communication (default 7001) ---no-progress don't display progress --t, --throttle throttle speed in Mbits (default unlimited) --idct, --inter-dc-throttle inter-datacenter throttle speed in Mbits (default unlimited) --cph, --connections-per-host number of concurrent connections-per-host --i, --ignore don't stream to this (comma separated) list of nodes --alg, --ssl-alg Client SSL: algorithm (default: SunX509) --ciphers, --ssl-ciphers Client SSL: comma-separated list of encryption suites to use --ks, --keystore Client SSL: full path to keystore --kspw, --keystore-password Client SSL: password of the keystore --st, --store-type Client SSL: type of store --ts, --truststore Client SSL: full path to truststore --tspw, --truststore-password Client SSL: password of the truststore --prtcl, --ssl-protocol Client SSL: connections protocol to use (default: TLS) --ap, --auth-provider custom AuthProvider class name for cassandra authentication --f, --conf-path cassandra.yaml file path for streaming throughput and client/server SSL --v, --verbose verbose output --h, --help display this help message -=================================================== ================================================================================ - -You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options. - -Load sstables from a Snapshot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Copy the snapshot sstables into an accessible directory and use sstableloader to restore them. - -Example:: - - cp snapshots/1535397029191/* /path/to/keyspace1/standard1/ - - sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4700000 - Total duration (ms): : 4390 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -The -d or --nodes option is required, or the script will not run. - -Example:: - - sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Initial hosts must be specified (-d) - -Use a Config File for SSL Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If SSL encryption is enabled in the cluster, use the --conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line. - -Example:: - - sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 9.165KiB/s (avg: 9.165KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 5.147MiB/s (avg: 18.299KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 9.751MiB/s (avg: 27.423KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 8.203MiB/s (avg: 36.524KiB/s) - ... - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 9356 ms - Average transfer rate : 480.105KiB/s - Peak transfer rate : 586.410KiB/s - -Hide Progress Output -^^^^^^^^^^^^^^^^^^^^ - -To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the --no-progress option. - -Example:: - - sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2] - -Get More Detail -^^^^^^^^^^^^^^^ - -Using the --verbose option will provide much more progress output. - -Example:: - - sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 12.056KiB/s (avg: 12.056KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 9.092MiB/s (avg: 24.081KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 18.832MiB/s (avg: 36.099KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 2.253MiB/s (avg: 47.882KiB/s) - progress: [/172.17.0.2]0:0/1 7 % total: 7% 6.388MiB/s (avg: 59.743KiB/s) - progress: [/172.17.0.2]0:0/1 8 % total: 8% 14.606MiB/s (avg: 71.635KiB/s) - progress: [/172.17.0.2]0:0/1 9 % total: 9% 8.880MiB/s (avg: 83.465KiB/s) - progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s) - progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s) - progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s) - progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s) - progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s) - progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s) - progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s) - progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s) - progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s) - progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s) - progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s) - progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s) - progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s) - progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s) - progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s) - progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s) - progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s) - progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s) - progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s) - progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s) - progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s) - progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s) - progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s) - progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s) - progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s) - progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s) - progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s) - progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s) - progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s) - progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s) - progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s) - progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s) - progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s) - progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s) - progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s) - progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s) - progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s) - progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s) - progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s) - progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s) - progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s) - progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s) - progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s) - progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s) - progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s) - progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s) - progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s) - progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s) - progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s) - progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s) - progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s) - progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s) - progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s) - progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s) - progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s) - progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s) - progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s) - progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s) - progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s) - progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s) - progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s) - progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s) - progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 6706 ms - Average transfer rate : 669.835KiB/s - Peak transfer rate : 767.802KiB/s - - -Throttling Load -^^^^^^^^^^^^^^^ - -To prevent the table loader from overloading the system resources, you can throttle the process with the --throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below. - -Example:: - - sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 0 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 37634 - Average transfer rate (MB/s): : 0 - Peak transfer rate (MB/s): : 0 - -Speeding up Load -^^^^^^^^^^^^^^^^ - -To speed up the load process, the number of connections per host can be increased. - -Example:: - - sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 100 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 3486 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -This small data set doesn't benefit much from the increase in connections per host, but note that the total duration has decreased in this example. - - - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablemetadata.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstablemetadata.rst.txt deleted file mode 100644 index 0a7a42211..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablemetadata.rst.txt +++ /dev/null @@ -1,300 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablemetadata ---------------- - -Print information about an sstable from the related Statistics.db and Summary.db files to standard output. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstablemetadata - -========================= ================================================================================ ---gc_grace_seconds The gc_grace_seconds to use when calculating droppable tombstones -========================= ================================================================================ - -Print all the metadata -^^^^^^^^^^^^^^^^^^^^^^ - -Run sstablemetadata against the *Data.db file(s) related to a table. If necessary, find the *Data.db file(s) using sstableutil. - -Example:: - - sstableutil keyspace1 standard1 | grep Data - /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - SSTable: /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big - Partitioner: org.apache.cassandra.dht.Murmur3Partitioner - Bloom Filter FP chance: 0.010000 - Minimum timestamp: 1535025576141000 - Maximum timestamp: 1535025604309000 - SSTable min local deletion time: 2147483647 - SSTable max local deletion time: 2147483647 - Compressor: org.apache.cassandra.io.compress.LZ4Compressor - TTL min: 86400 - TTL max: 86400 - First token: -9223004712949498654 (key=39373333373831303130) - Last token: 9222554117157811897 (key=4f3438394e39374d3730) - Estimated droppable tombstones: 0.9188263888888889 - SSTable Level: 0 - Repaired at: 0 - Replay positions covered: {CommitLogPosition(segmentId=1535025390651, position=226400)=CommitLogPosition(segmentId=1535025390651, position=6849139)} - totalColumnsSet: 100000 - totalRows: 20000 - Estimated tombstone drop times: - 1535039100: 80390 - 1535039160: 5645 - 1535039220: 13965 - Count Row Size Cell Count - 1 0 0 - 2 0 0 - 3 0 0 - 4 0 0 - 5 0 20000 - 6 0 0 - 7 0 0 - 8 0 0 - 10 0 0 - 12 0 0 - 14 0 0 - 17 0 0 - 20 0 0 - 24 0 0 - 29 0 0 - 35 0 0 - 42 0 0 - 50 0 0 - 60 0 0 - 72 0 0 - 86 0 0 - 103 0 0 - 124 0 0 - 149 0 0 - 179 0 0 - 215 0 0 - 258 20000 0 - 310 0 0 - 372 0 0 - 446 0 0 - 535 0 0 - 642 0 0 - 770 0 0 - 924 0 0 - 1109 0 0 - 1331 0 0 - 1597 0 0 - 1916 0 0 - 2299 0 0 - 2759 0 0 - 3311 0 0 - 3973 0 0 - 4768 0 0 - 5722 0 0 - 6866 0 0 - 8239 0 0 - 9887 0 0 - 11864 0 0 - 14237 0 0 - 17084 0 0 - 20501 0 0 - 24601 0 0 - 29521 0 0 - 35425 0 0 - 42510 0 0 - 51012 0 0 - 61214 0 0 - 73457 0 0 - 88148 0 0 - 105778 0 0 - 126934 0 0 - 152321 0 0 - 182785 0 0 - 219342 0 0 - 263210 0 0 - 315852 0 0 - 379022 0 0 - 454826 0 0 - 545791 0 0 - 654949 0 0 - 785939 0 0 - 943127 0 0 - 1131752 0 0 - 1358102 0 0 - 1629722 0 0 - 1955666 0 0 - 2346799 0 0 - 2816159 0 0 - 3379391 0 0 - 4055269 0 0 - 4866323 0 0 - 5839588 0 0 - 7007506 0 0 - 8409007 0 0 - 10090808 0 0 - 12108970 0 0 - 14530764 0 0 - 17436917 0 0 - 20924300 0 0 - 25109160 0 0 - 30130992 0 0 - 36157190 0 0 - 43388628 0 0 - 52066354 0 0 - 62479625 0 0 - 74975550 0 0 - 89970660 0 0 - 107964792 0 0 - 129557750 0 0 - 155469300 0 0 - 186563160 0 0 - 223875792 0 0 - 268650950 0 0 - 322381140 0 0 - 386857368 0 0 - 464228842 0 0 - 557074610 0 0 - 668489532 0 0 - 802187438 0 0 - 962624926 0 0 - 1155149911 0 0 - 1386179893 0 0 - 1663415872 0 0 - 1996099046 0 0 - 2395318855 0 0 - 2874382626 0 - 3449259151 0 - 4139110981 0 - 4966933177 0 - 5960319812 0 - 7152383774 0 - 8582860529 0 - 10299432635 0 - 12359319162 0 - 14831182994 0 - 17797419593 0 - 21356903512 0 - 25628284214 0 - 30753941057 0 - 36904729268 0 - 44285675122 0 - 53142810146 0 - 63771372175 0 - 76525646610 0 - 91830775932 0 - 110196931118 0 - 132236317342 0 - 158683580810 0 - 190420296972 0 - 228504356366 0 - 274205227639 0 - 329046273167 0 - 394855527800 0 - 473826633360 0 - 568591960032 0 - 682310352038 0 - 818772422446 0 - 982526906935 0 - 1179032288322 0 - 1414838745986 0 - Estimated cardinality: 20196 - EncodingStats minTTL: 0 - EncodingStats minLocalDeletionTime: 1442880000 - EncodingStats minTimestamp: 1535025565275000 - KeyType: org.apache.cassandra.db.marshal.BytesType - ClusteringTypes: [org.apache.cassandra.db.marshal.UTF8Type] - StaticColumns: {C3:org.apache.cassandra.db.marshal.BytesType, C4:org.apache.cassandra.db.marshal.BytesType, C0:org.apache.cassandra.db.marshal.BytesType, C1:org.apache.cassandra.db.marshal.BytesType, C2:org.apache.cassandra.db.marshal.BytesType} - RegularColumns: {} - -Specify gc grace seconds -^^^^^^^^^^^^^^^^^^^^^^^^ - -To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn't access the schema directly, this is a way to more accurately estimate droppable tombstones -- for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds). - -ref: https://issues.apache.org/jira/browse/CASSANDRA-12208 - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4 - Estimated tombstone drop times: - 1536599100: 1 - 1536599640: 1 - 1536599700: 2 - - echo $(date +%s) - 1536602005 - - # if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 4.0E-5 - - # if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 9.61111111111111E-6 - - # if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 0.0 - -Explanation of each value printed above -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -=================================== ================================================================================ - Value Explanation -=================================== ================================================================================ -SSTable prefix of the sstable filenames related to this sstable -Partitioner partitioner type used to distribute data across nodes; defined in cassandra.yaml -Bloom Filter FP precision of Bloom filter used in reads; defined in the table definition -Minimum timestamp minimum timestamp of any entry in this sstable, in epoch microseconds -Maximum timestamp maximum timestamp of any entry in this sstable, in epoch microseconds -SSTable min local deletion time minimum timestamp of deletion date, based on TTL, in epoch seconds -SSTable max local deletion time maximum timestamp of deletion date, based on TTL, in epoch seconds -Compressor blank (-) by default; if not blank, indicates type of compression enabled on the table -TTL min time-to-live in seconds; default 0 unless defined in the table definition -TTL max time-to-live in seconds; default 0 unless defined in the table definition -First token lowest token and related key found in the sstable summary -Last token highest token and related key found in the sstable summary -Estimated droppable tombstones ratio of tombstones to columns, using configured gc grace seconds if relevant -SSTable level compaction level of this sstable, if leveled compaction (LCS) is used -Repaired at the timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds -Replay positions covered the interval of time and commitlog positions related to this sstable -totalColumnsSet number of cells in the table -totalRows number of rows in the table -Estimated tombstone drop times approximate number of rows that will expire, ordered by epoch seconds -Count Row Size Cell Count two histograms in two columns; one represents distribution of Row Size - and the other represents distribution of Cell Count -Estimated cardinality an estimate of unique values, used for compaction -EncodingStats* minTTL in epoch milliseconds -EncodingStats* minLocalDeletionTime in epoch seconds -EncodingStats* minTimestamp in epoch microseconds -KeyType the type of partition key, useful in reading and writing data - from/to storage; defined in the table definition -ClusteringTypes the type of clustering key, useful in reading and writing data - from/to storage; defined in the table definition -StaticColumns a list of the shared columns in the table -RegularColumns a list of non-static, non-key columns in the table -=================================== ================================================================================ -* For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way. - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableofflinerelevel.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableofflinerelevel.rst.txt deleted file mode 100644 index c031d2987..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableofflinerelevel.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableofflinerelevel ---------------------- - -When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-8301 - -The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):: - - L3 [][][][][][][][][][][] - L2 [ ][ ][ ][ ] - L1 [ ][ ] - L0 [ ] - -Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):: - - [][][] - [ ][][][] - [ ] - [ ] - ... - -Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below. - -If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableofflinerelevel [--dry-run]
- -Doing a dry run -^^^^^^^^^^^^^^^ - -Use the --dry-run option to see the current level distribution and predicted level after the change. - -Example:: - - sstableofflinerelevel --dry-run keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - Potential leveling: - L0=1 - L1=1 - -Running a relevel -^^^^^^^^^^^^^^^^^ - -Example:: - - sstableofflinerelevel keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - New leveling: - L0=1 - L1=1 - -Keyspace or table not found -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an invalid keyspace and/or table is provided, an exception will be thrown. - -Example:: - - sstableofflinerelevel --dry-run keyspace evenlog - - Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog - at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96) - - - - - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablerepairedset.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstablerepairedset.rst.txt deleted file mode 100644 index ebacef335..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablerepairedset.rst.txt +++ /dev/null @@ -1,79 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablerepairedset ------------------- - -Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired. - -Note that running a repair (e.g., via nodetool repair) doesn't set the status of this metadata. Only setting the status of this metadata via this tool does. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5351 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablerepairedset --really-set [-f | ] - -=================================== ================================================================================ ---really-set required if you want to really set the status ---is-repaired set the repairedAt status to the last modified time ---is-unrepaired set the repairedAt status to 0 --f use a file containing a list of sstables as the input -=================================== ================================================================================ - -Set a lot of sstables to unrepaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many ways to do this programmatically. This way would likely include variables for the keyspace and table. - -Example:: - - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired % - -Set one to many sstables to repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice. - -Example:: - - nodetool repair keyspace1 standard1 - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired % - -Print metadata showing repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -sstablemetadata can be used to view the status set or unset using this command. - -Example: - - sstablerepairedset --really-set --is-repaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 1534443974000 - - sstablerepairedset --really-set --is-unrepaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 0 - -Using command in a script -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you know you ran repair 2 weeks ago, you can do something like the following:: - - sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablescrub.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstablescrub.rst.txt deleted file mode 100644 index 0bbda9f32..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablescrub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablescrub ------------- - -Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4321 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablescrub
- -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --m,--manifest-check only check and repair the leveled manifest, without actually scrubbing the sstables --n,--no-validate do not validate columns using column validator --r,--reinsert-overflowed-ttl Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 - with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows. --s,--skip-corrupted skip corrupt rows in counter tables --v,--verbose verbose output -=================================== ================================================================================ - -Basic Scrub -^^^^^^^^^^^ - -The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable. - -Example:: - - sstablescrub keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped - Checking leveled manifest - -Scrub without Validation -^^^^^^^^^^^^^^^^^^^^^^^^ -ref: https://issues.apache.org/jira/browse/CASSANDRA-9406 - -Use the --no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client. - -Example:: - - sstablescrub --no-validate keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned - -Skip Corrupted Counter Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5930 - -If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the --skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+. - -Example:: - - sstablescrub --skip-corrupted keyspace1 counter1 - -Dealing with Overflow Dates -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-14092 - -Using the option --reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow). - -Example:: - - sstablescrub --reinsert-overflowed-ttl keyspace1 counter1 - -Manifest Check -^^^^^^^^^^^^^^ - -As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata. - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablesplit.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstablesplit.rst.txt deleted file mode 100644 index 5386fa48b..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstablesplit.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablesplit ------------- - -Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4766 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablesplit - -=================================== ================================================================================ ---debug display stack traces --h, --help display this help message ---no-snapshot don't snapshot the sstables before splitting --s, --size maximum size in MB for the output sstables (default: 50) -=================================== ================================================================================ - -This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped. - -Split a File -^^^^^^^^^^^^ - -Split a large sstable into smaller sstables. By default, unless the option --no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - Pre-split sstables snapshotted into snapshot pre-split-1533144514795 - -Split Multiple Files -^^^^^^^^^^^^^^^^^^^^ - -Wildcards can be used in the filename portion of the command to split multiple files. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1* - -Attempt to Split a Small File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the file is already smaller than the split size provided, the sstable will not be split. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB) - No sstables needed splitting. - -Split a File into Specified Size -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default size used for splitting is 50MB. Specify another size with the --size option. The size is in megabytes (MB). Specify only the number, not the units. For example --size 50 is correct, but --size 50MB is not. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db - Pre-split sstables snapshotted into snapshot pre-split-1533144996008 - - -Split Without Snapshot -^^^^^^^^^^^^^^^^^^^^^^ - -By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the --no-snapshot option to skip it. - -Example:: - - sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db - -Note: There is no output, but you can see the results in your file system. - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableupgrade.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableupgrade.rst.txt deleted file mode 100644 index 66386aca1..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableupgrade.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableupgrade --------------- - -Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version. - -The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableupgrade
[snapshot_name] - -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --k,--keep-source do not delete the source sstables -=================================== ================================================================================ - -Rewrite tables to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start with a set of sstables in one version of Cassandra:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables:: - - sstableupgrade keyspace1 standard1 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 13:48 backups - -rw-r--r-- 1 user wheel 292 Aug 22 13:48 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4599475 Aug 22 13:48 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:48 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 13:48 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330807 Aug 22 13:48 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 13:48 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 13:48 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 13:48 mc-2-big-TOC.txt - -Rewrite tables to the current Cassandra version, and keep tables in old version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Again, starting with a set of sstables in one version:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:: - - sstableupgrade keyspace1 standard1 -k - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 14:00 backups - -rw-r--r--@ 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r--@ 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r--@ 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r--@ 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r--@ 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r--@ 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r--@ 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r--@ 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -rw-r--r-- 1 user wheel 292 Aug 22 14:01 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4596370 Aug 22 14:01 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 14:01 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 14:01 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330801 Aug 22 14:01 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 14:01 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 14:01 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 14:01 mc-2-big-TOC.txt - - -Rewrite a snapshot to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Find the snapshot name:: - - nodetool listsnapshots - - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - ... - 1534962986979 keyspace1 standard1 5.85 MB 5.85 MB - -Then rewrite the snapshot:: - - sstableupgrade keyspace1 standard1 1534962986979 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete. - - - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableutil.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableutil.rst.txt deleted file mode 100644 index 30becd0e0..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableutil.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableutil ------------ - -List sstable files for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7066 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableutil
- -=================================== ================================================================================ --c, --cleanup clean up any outstanding transactions --d, --debug display stack traces --h, --help display this help message --o, --oplog include operation logs --t, --type all (list all files, final or temporary), tmp (list temporary files only), - final (list final files only), --v, --verbose verbose output -=================================== ================================================================================ - -List all sstables -^^^^^^^^^^^^^^^^^ - -The basic command lists the sstables associated with a given keyspace/table. - -Example:: - - sstableutil keyspace eventlog - Listing files... - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt - -List only temporary sstables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `tmp` will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra. - -List only final sstables -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `final` will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option. - -Include transaction logs -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -o option will include transaction logs in the listing, in the format above. - -Clean up sstables -^^^^^^^^^^^^^^^^^ - -Using the -c option removes any transactions left over from incomplete writes or compactions. - -From the 3.0 upgrade notes: - -New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix "add:" or "remove:". They also contain a special line "commit", only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the "add" prefix) and delete the old sstables (those with the "remove" prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first. - - - diff --git a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableverify.rst.txt b/src/doc/4.0-alpha4/_sources/tools/sstable/sstableverify.rst.txt deleted file mode 100644 index dad3f4487..000000000 --- a/src/doc/4.0-alpha4/_sources/tools/sstable/sstableverify.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableverify -------------- - -Check sstable(s) for errors or corruption, for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5791 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableverify
- -=================================== ================================================================================ ---debug display stack traces --e, --extended extended verification --h, --help display this help message --v, --verbose verbose output -=================================== ================================================================================ - -Basic Verification -^^^^^^^^^^^^^^^^^^ - -This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - -Extended Verification -^^^^^^^^^^^^^^^^^^^^^ - -During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time. - -Example:: - - root@DC1C1:/# sstableverify -e keyspace eventlog - WARN 14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully - -Corrupted File -^^^^^^^^^^^^^^ - -Corrupted files are listed if they are detected by the script. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db - -A similar (but less verbose) tool will show the suggested actions:: - - nodetool verify keyspace eventlog - error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair - - - diff --git a/src/doc/4.0-alpha4/_sources/troubleshooting/finding_nodes.rst.txt b/src/doc/4.0-alpha4/_sources/troubleshooting/finding_nodes.rst.txt deleted file mode 100644 index df5e16c93..000000000 --- a/src/doc/4.0-alpha4/_sources/troubleshooting/finding_nodes.rst.txt +++ /dev/null @@ -1,149 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Find The Misbehaving Nodes -========================== - -The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware). - -There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below. - -Client Logs and Errors ----------------------- -Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter's nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with. - -Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax :ref:`drivers `: - -* ``SyntaxError`` (**client**). This and other ``QueryValidationException`` - indicate that the client sent a malformed request. These are rarely server - issues and usually indicate bad queries. -* ``UnavailableException`` (**server**): This means that the Cassandra - coordinator node has rejected the query as it believes that insufficent - replica nodes are available. If many coordinators are throwing this error it - likely means that there really are (typically) multiple nodes down in the - cluster and you can identify them using :ref:`nodetool status - ` If only a single coordinator is throwing this error it may - mean that node has been partitioned from the rest. -* ``OperationTimedOutException`` (**server**): This is the most frequent - timeout message raised when clients set timeouts and means that the query - took longer than the supplied timeout. This is a *client side* timeout - meaning that it took longer than the client specified timeout. The error - message will include the coordinator node that was last tried which is - usually a good starting point. This error usually indicates either - aggressive client timeout values or latent server coordinators/replicas. -* ``ReadTimeoutException`` or ``WriteTimeoutException`` (**server**): These - are raised when clients do not specify lower timeouts and there is a - *coordinator* timeouts based on the values supplied in the ``cassandra.yaml`` - configuration file. They usually indicate a serious server side problem as - the default values are usually multiple seconds. - -Metrics -------- - -If you have Cassandra :ref:`metrics ` reporting to a -centralized location such as `Graphite `_ or -`Grafana `_ you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are: - -Errors -^^^^^^ -Cassandra refers to internode messaging errors as "drops", and provided a -number of :ref:`Dropped Message Metrics ` to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue. - -Latency -^^^^^^^ -For timeouts or latency related issues you can start with :ref:`Table -Metrics ` by comparing Coordinator level metrics e.g. -``CoordinatorReadLatency`` or ``CoordinatorWriteLatency`` with their associated -replica metrics e.g. ``ReadLatency`` or ``WriteLatency``. Issues usually show -up on the ``99th`` percentile before they show up on the ``50th`` percentile or -the ``mean``. While ``maximum`` coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, ``maximum`` replica latencies that correlate with increased ``99th`` -percentiles on coordinators can help narrow down the problem. - -There are usually three main possibilities: - -1. Coordinator latencies are high on all nodes, but only a few node's local - read latencies are high. This points to slow replica nodes and the - coordinator's are just side-effects. This usually happens when clients are - not token aware. -2. Coordinator latencies and replica latencies increase at the - same time on the a few nodes. If clients are token aware this is almost - always what happens and points to slow replicas of a subset of token - ranges (only part of the ring). -3. Coordinator and local latencies are high on many nodes. This usually - indicates either a tipping point in the cluster capacity (too many writes or - reads per second), or a new query pattern. - -It's important to remember that depending on the client's load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use ``TokenAware`` policies the same -node's coordinator and replica latencies will often increase together, but if -you just use normal ``DCAwareRoundRobin`` coordinator latencies can increase -with unrelated replica node's latencies. For example: - -* ``TokenAware`` + ``LOCAL_ONE``: should always have coordinator and replica - latencies on the same node rise together -* ``TokenAware`` + ``LOCAL_QUORUM``: should always have coordinator and - multiple replica latencies rise together in the same datacenter. -* ``TokenAware`` + ``QUORUM``: replica latencies in other datacenters can - affect coordinator latencies. -* ``DCAwareRoundRobin`` + ``LOCAL_ONE``: coordinator latencies and unrelated - replica node's latencies will rise together. -* ``DCAwareRoundRobin`` + ``LOCAL_QUORUM``: different coordinator and replica - latencies will rise together with little correlation. - -Query Rates -^^^^^^^^^^^ -Sometimes the :ref:`Table ` query rate metrics can help -narrow down load issues as "small" increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with ``BATCH`` writes, where a client may send a single ``BATCH`` -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator ``BATCH`` write turns into 450 -replica writes! This is why keeping ``BATCH``'s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a "single" -query. - - -Next Step: Investigate the Node(s) ----------------------------------- - -Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -:ref:`logs `, :ref:`nodetool `, and -:ref:`os tools `. If you are not able to login you may still -have access to :ref:`logs ` and :ref:`nodetool ` -remotely. diff --git a/src/doc/4.0-alpha4/_sources/troubleshooting/index.rst.txt b/src/doc/4.0-alpha4/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 79b46d636..000000000 --- a/src/doc/4.0-alpha4/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you. - -These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don't -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use. - -.. toctree:: - :maxdepth: 2 - - finding_nodes - reading_logs - use_nodetool - use_tools diff --git a/src/doc/4.0-alpha4/_sources/troubleshooting/reading_logs.rst.txt b/src/doc/4.0-alpha4/_sources/troubleshooting/reading_logs.rst.txt deleted file mode 100644 index 08f7d4da6..000000000 --- a/src/doc/4.0-alpha4/_sources/troubleshooting/reading_logs.rst.txt +++ /dev/null @@ -1,267 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _reading-logs: - -Cassandra Logs -============== -Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs. - -Common Log Files ----------------- -Cassandra has three main logs, the ``system.log``, ``debug.log`` and -``gc.log`` which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively. - -These logs by default live in ``${CASSANDRA_HOME}/logs``, but most Linux -distributions relocate logs to ``/var/log/cassandra``. Operators can tune -this location as well as what levels are logged using the provided -``logback.xml`` file. - -``system.log`` -^^^^^^^^^^^^^^ -This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log: - -* Uncaught exceptions. These can be very useful for debugging errors. -* ``GCInspector`` messages indicating long garbage collector pauses. When long - pauses happen Cassandra will print how long and also what was the state of - the system (thread state) at the time of that pause. This can help narrow - down a capacity issue (either not enough heap or not enough spare CPU). -* Information about nodes joining and leaving the cluster as well as token - metadata (data ownersip) changes. This is useful for debugging network - partitions, data movements, and more. -* Keyspace/Table creation, modification, deletion. -* ``StartupChecks`` that ensure optimal configuration of the operating system - to run Cassandra -* Information about some background operational tasks (e.g. Index - Redistribution). - -As with any application, looking for ``ERROR`` or ``WARN`` lines can be a -great first step:: - - $ # Search for warnings or errors in the latest system.log - $ grep 'WARN\|ERROR' system.log | tail - ... - - $ # Search for warnings or errors in all rotated system.log - $ zgrep 'WARN\|ERROR' system.log.* | less - ... - -``debug.log`` -^^^^^^^^^^^^^^ -This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal ``system.log``. Some -examples of activities logged to this log: - -* Information about compactions, including when they start, which sstables - they contain, and when they finish. -* Information about memtable flushes to disk, including when they happened, - how large the flushes were, and which commitlog segments the flush impacted. - -This log can be *very* noisy, so it is highly recommended to use ``grep`` and -other log analysis tools to dive deep. For example:: - - $ # Search for messages involving a CompactionTask with 5 lines of context - $ grep CompactionTask debug.log -C 5 - ... - - $ # Look at the distribution of flush tasks per keyspace - $ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c - 6 compaction_history: - 1 test_keyspace: - 2 local: - 17 size_estimates: - 17 sstable_activity: - - -``gc.log`` -^^^^^^^^^^^^^^ -The gc log is a standard Java GC log. With the default ``jvm.options`` -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:: - - $ grep stopped gc.log.0.current | tail - 2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds - 2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds - 2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds - 2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds - 2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds - 2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds - 2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds - 2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds - 2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds - 2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds - - -This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current | sort -k 1 - 2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds - 2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds - 2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds - 2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds - 2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds - 2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds - 2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds - 2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds - 2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds - 2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds - -In this case any client waiting on a query would have experienced a `56ms` -latency at 17:13:41. - -Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn't know could have disk latency, so the JVM safepoint logic -doesn't handle a blocking memory mapped read particularly well). - -Using these logs you can even get a pause distribution with something like -`histogram.py `_:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py - # NumSamples = 410293; Min = 0.00; Max = 11.49 - # Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498 - # each ∎ represents a count of 5470 - 0.0001 - 1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ - 1.1496 - 2.2991 [ 15]: - 2.2991 - 3.4486 [ 5]: - 3.4486 - 4.5981 [ 1]: - 4.5981 - 5.7475 [ 5]: - 5.7475 - 6.8970 [ 9]: - 6.8970 - 8.0465 [ 1]: - 8.0465 - 9.1960 [ 0]: - 9.1960 - 10.3455 [ 0]: - 10.3455 - 11.4949 [ 2]: - -We can see in this case while we have very good average performance something -is causing multi second JVM pauses ... In this case it was mostly safepoint -pauses caused by slow disks:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current| sort -k 1 - 2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds - 2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds - 2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds - 2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds - 2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds - 2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds - 2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds - 2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds - 2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds - 2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds - -Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as `GCViewer -`_ which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -`200ms` and GC throughput greater than `99%` (ymmv). - -Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues. - - -Getting More Information ------------------------- - -If the default logging levels are insuficient, ``nodetool`` can set higher -or lower logging levels for various packages and classes using the -``nodetool setlogginglevel`` command. Start by viewing the current levels:: - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - -Perhaps the ``Gossiper`` is acting up and we wish to enable it at ``TRACE`` -level for even more insight:: - - - $ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - org.apache.cassandra.gms.Gossiper TRACE - - $ grep TRACE debug.log | tail -2 - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating - heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ... - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local - heartbeat version 2341 greater than 2340 for 127.0.0.1:7000 - - -Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -``logback.xml``. - -.. code-block:: diff - - diff --git a/conf/logback.xml b/conf/logback.xml - index b2c5b10..71b0a49 100644 - --- a/conf/logback.xml - +++ b/conf/logback.xml - @@ -98,4 +98,5 @@ appender reference in the root level section below. - - - - + - - -Full Query Logger -^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -``nodetool`` and the logs are read with the provided ``bin/fqltool`` utility:: - - $ mkdir /var/tmp/fql_logs - $ nodetool enablefullquerylog --path /var/tmp/fql_logs - - # ... do some querying - - $ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail - Query time: 1530750927224 - Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = - 'system_views' AND table_name = 'sstable_tasks'; - Values: - - Type: single - Protocol version: 4 - Query time: 1530750934072 - Query: select * from keyspace1.standard1 ; - Values: - - $ nodetool disablefullquerylog - -Note that if you want more information than this tool provides, there are other -live capture options available such as :ref:`packet capture `. diff --git a/src/doc/4.0-alpha4/_sources/troubleshooting/use_nodetool.rst.txt b/src/doc/4.0-alpha4/_sources/troubleshooting/use_nodetool.rst.txt deleted file mode 100644 index 5072f85d1..000000000 --- a/src/doc/4.0-alpha4/_sources/troubleshooting/use_nodetool.rst.txt +++ /dev/null @@ -1,245 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-nodetool: - -Use Nodetool -============ - -Cassandra's ``nodetool`` allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see ``nodetool help`` -for all the commands), but briefly some of the most useful for troubleshooting: - -.. _nodetool-status: - -Cluster Status --------------- - -You can use ``nodetool status`` to assess status of the cluster:: - - $ nodetool status - - Datacenter: dc1 - ======================= - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - UN 127.0.1.1 4.69 GiB 1 100.0% 35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e r1 - UN 127.0.1.2 4.71 GiB 1 100.0% 752e278f-b7c5-4f58-974b-9328455af73f r2 - UN 127.0.1.3 4.69 GiB 1 100.0% 9dc1a293-2cc0-40fa-a6fd-9e6054da04a7 r3 - -In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all "up". The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -``nodetool status`` on multiple nodes in a cluster to see the full view. - -You can use ``nodetool status`` plus a little grep to see which nodes are -down:: - - $ nodetool status | grep -v '^UN' - Datacenter: dc1 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - Datacenter: dc2 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - DN 127.0.0.5 105.73 KiB 1 33.3% df303ac7-61de-46e9-ac79-6e630115fd75 r1 - -In this case there are two datacenters and there is one node down in datacenter -``dc2`` and rack ``r1``. This may indicate an issue on ``127.0.0.5`` -warranting investigation. - -.. _nodetool-proxyhistograms: - -Coordinator Query Latency -------------------------- -You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using ``nodetool proxyhistograms``:: - - $ nodetool proxyhistograms - Percentile Read Latency Write Latency Range Latency CAS Read Latency CAS Write Latency View Write Latency - (micros) (micros) (micros) (micros) (micros) (micros) - 50% 454.83 219.34 0.00 0.00 0.00 0.00 - 75% 545.79 263.21 0.00 0.00 0.00 0.00 - 95% 654.95 315.85 0.00 0.00 0.00 0.00 - 98% 785.94 379.02 0.00 0.00 0.00 0.00 - 99% 3379.39 2346.80 0.00 0.00 0.00 0.00 - Min 42.51 105.78 0.00 0.00 0.00 0.00 - Max 25109.16 43388.63 0.00 0.00 0.00 0.00 - -Here you can see the full latency distribution of reads, writes, range requests -(e.g. ``select * from keyspace.table``), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds). - -.. _nodetool-tablehistograms: - -Local Query Latency -------------------- - -If you know which table is having latency/error issues, you can use -``nodetool tablehistograms`` to get a better idea of what is happening -locally on a node:: - - $ nodetool tablehistograms keyspace table - Percentile SSTables Write Latency Read Latency Partition Size Cell Count - (micros) (micros) (bytes) - 50% 0.00 73.46 182.79 17084 103 - 75% 1.00 88.15 315.85 17084 103 - 95% 2.00 126.93 545.79 17084 103 - 98% 2.00 152.32 654.95 17084 103 - 99% 2.00 182.79 785.94 17084 103 - Min 0.00 42.51 24.60 14238 87 - Max 2.00 12108.97 17436.92 17084 103 - -This shows you percentile breakdowns particularly critical metrics. - -The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. ``SizeTieredCompactionStrategy`` typically has many more reads -per read than ``LeveledCompactionStrategy`` does for update heavy workloads. - -The second column shows you a latency breakdown of *local* write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments. - -The third column shows you a latency breakdown of *local* read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read. - -The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it's read. - -.. _nodetool-tpstats: - -Threadpool State ----------------- - -You can use ``nodetool tpstats`` to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:: - - $ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 2 0 12 0 0 - MiscStage 0 0 0 0 0 - CompactionExecutor 0 0 1940 0 0 - MutationStage 0 0 0 0 0 - GossipStage 0 0 10293 0 0 - Repair-Task 0 0 0 0 0 - RequestResponseStage 0 0 16 0 0 - ReadRepairStage 0 0 0 0 0 - CounterMutationStage 0 0 0 0 0 - MemtablePostFlush 0 0 83 0 0 - ValidationExecutor 0 0 0 0 0 - MemtableFlushWriter 0 0 30 0 0 - ViewMutationStage 0 0 0 0 0 - CacheCleanupExecutor 0 0 0 0 0 - MemtableReclaimMemory 0 0 30 0 0 - PendingRangeCalculator 0 0 11 0 0 - SecondaryIndexManagement 0 0 0 0 0 - HintsDispatcher 0 0 0 0 0 - Native-Transport-Requests 0 0 192 0 0 - MigrationStage 0 0 14 0 0 - PerDiskMemtableFlushWriter_0 0 0 30 0 0 - Sampler 0 0 0 0 0 - ViewBuildExecutor 0 0 0 0 0 - InternalResponseStage 0 0 0 0 0 - AntiEntropyStage 0 0 0 0 0 - - Message type Dropped Latency waiting in queue (micros) - 50% 95% 99% Max - READ 0 N/A N/A N/A N/A - RANGE_SLICE 0 0.00 0.00 0.00 0.00 - _TRACE 0 N/A N/A N/A N/A - HINT 0 N/A N/A N/A N/A - MUTATION 0 N/A N/A N/A N/A - COUNTER_MUTATION 0 N/A N/A N/A N/A - BATCH_STORE 0 N/A N/A N/A N/A - BATCH_REMOVE 0 N/A N/A N/A N/A - REQUEST_RESPONSE 0 0.00 0.00 0.00 0.00 - PAGED_RANGE 0 N/A N/A N/A N/A - READ_REPAIR 0 N/A N/A N/A N/A - -This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the ``RequestResponseState`` queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ``ALL`` ties up RF -``RequestResponseState`` threads whereas ``LOCAL_ONE`` only uses a single -thread in the ``ReadStage`` threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the ``concurrent_compactors`` or ``compaction_throughput`` options. - -The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation. - -.. _nodetool-compactionstats: - -Compaction State ----------------- - -As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS `page cache `_, -and can put a lot of load on your disk drives. There are great -:ref:`os tools ` to determine if this is the case, but often it's a -good idea to check if compactions are even running using -``nodetool compactionstats``:: - - $ nodetool compactionstats - pending tasks: 2 - - keyspace.table: 2 - - id compaction type keyspace table completed total unit progress - 2062b290-7f3a-11e8-9358-cd941b956e60 Compaction keyspace table 21848273 97867583 bytes 22.32% - Active compaction remaining time : 0h00m04s - -In this case there is a single compaction running on the ``keyspace.table`` -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass ``-H`` to get the units in a human readable format. - -Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don't take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra's ``concurrent_compactors`` -or ``compaction_throughput`` options. diff --git a/src/doc/4.0-alpha4/_sources/troubleshooting/use_tools.rst.txt b/src/doc/4.0-alpha4/_sources/troubleshooting/use_tools.rst.txt deleted file mode 100644 index b1347cc6d..000000000 --- a/src/doc/4.0-alpha4/_sources/troubleshooting/use_tools.rst.txt +++ /dev/null @@ -1,542 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-os-tools: - -Diving Deep, Use External Tools -=============================== - -Machine access allows operators to dive even deeper than logs and ``nodetool`` -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes. - -JVM Tooling ------------ -The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks. - -**NOTE**: There are two common gotchas with JVM tooling and Cassandra: - -1. By default Cassandra ships with ``-XX:+PerfDisableSharedMem`` set to prevent - long pauses (see ``CASSANDRA-9242`` and ``CASSANDRA-9483`` for details). If - you want to use JVM tooling you can instead have ``/tmp`` mounted on an in - memory ``tmpfs`` which also effectively works around ``CASSANDRA-9242``. -2. Make sure you run the tools as the same user as Cassandra is running as, - e.g. if the database is running as ``cassandra`` the tool also has to be - run as ``cassandra``, e.g. via ``sudo -u cassandra ``. - -Garbage Collection State (jstat) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you suspect heap pressure you can use ``jstat`` to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):: - - - jstat -gcutil 500ms - S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - 0.00 0.00 81.53 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.94 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - -In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies. - -Thread Information (jstack) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To get a point in time snapshot of exactly what Cassandra is doing, run -``jstack`` against the Cassandra PID. **Note** that this does pause the JVM for -a very brief period (<20ms).:: - - $ jstack > threaddump - - # display the threaddump - $ cat threaddump - ... - - # look at runnable threads - $grep RUNNABLE threaddump -B 1 - "Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000] - java.lang.Thread.State: RUNNABLE - -- - "Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - ... - - # Note that the nid is the Linux thread id - -Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on. - -Basic OS Tooling ----------------- -A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of: - -* CPU cores. For executing concurrent user queries -* CPU processing time. For query activity (data decompression, row merging, - etc...) -* CPU processing time (low priority). For background tasks (compaction, - streaming, etc ...) -* RAM for Java Heap. Used to hold internal data-structures and by default the - Cassandra memtables. Heap space is a crucial component of write performance - as well as generally. -* RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS - disk cache is a crucial component of read performance. -* Disks. Cassandra cares a lot about disk read latency, disk write throughput, - and of course disk space. -* Network latency. Cassandra makes many internode requests, so network latency - between nodes can directly impact performance. -* Network throughput. Cassandra (as other databases) frequently have the - so called "incast" problem where a small request (e.g. ``SELECT * from - foo.bar``) returns a massively large result set (e.g. the entire dataset). - In such situations outgoing bandwidth is crucial. - -Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource. - -High Level Resource Usage (top/htop) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra makes signifiant use of system resources, and often the very first -useful action is to run ``top`` or ``htop`` (`website -`_)to see the state of the machine. - -Useful things to look at: - -* System load levels. While these numbers can be confusing, generally speaking - if the load average is greater than the number of CPU cores, Cassandra - probably won't have very good (sub 100 millisecond) latencies. See - `Linux Load Averages `_ - for more information. -* CPU utilization. ``htop`` in particular can help break down CPU utilization - into ``user`` (low and normal priority), ``system`` (kernel), and ``io-wait`` - . Cassandra query threads execute as normal priority ``user`` threads, while - compaction threads execute as low priority ``user`` threads. High ``system`` - time could indicate problems like thread contention, and high ``io-wait`` - may indicate slow disk drives. This can help you understand what Cassandra - is spending processing resources doing. -* Memory usage. Look for which programs have the most resident memory, it is - probably Cassandra. The number for Cassandra is likely inaccurately high due - to how Linux (as of 2018) accounts for memory mapped file memory. - -.. _os-iostat: - -IO Usage (iostat) -^^^^^^^^^^^^^^^^^ -Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:: - - $ sudo iostat -xdm 2 - Linux 4.13.0-13-generic (hostname) 07/03/2018 _x86_64_ (8 CPU) - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.28 0.32 5.42 0.01 0.13 48.55 0.01 2.21 0.26 2.32 0.64 0.37 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 79.34 0.00 0.20 0.20 0.00 0.16 0.00 - sdc 0.34 0.27 0.76 0.36 0.01 0.02 47.56 0.03 26.90 2.98 77.73 9.21 1.03 - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.00 2.00 32.00 0.01 4.04 244.24 0.54 16.00 0.00 17.00 1.06 3.60 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - sdc 0.00 24.50 0.00 114.00 0.00 11.62 208.70 5.56 48.79 0.00 48.79 1.12 12.80 - - -In this case we can see that ``/dev/sdc1`` is a very slow drive, having an -``await`` close to 50 milliseconds and an ``avgqu-sz`` close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user. - -Important metrics to assess using iostat: - -* Reads and writes per second. These numbers will change with the workload, - but generally speaking the more reads Cassandra has to do from disk the - slower Cassandra read latencies are. Large numbers of reads per second - can be a dead giveaway that the cluster has insufficient memory for OS - page caching. -* Write throughput. Cassandra's LSM model defers user writes and batches them - together, which means that throughput to the underlying medium is the most - important write metric for Cassandra. -* Read latency (``r_await``). When Cassandra missed the OS page cache and reads - from SSTables, the read latency directly determines how fast Cassandra can - respond with the data. -* Write latency. Cassandra is less sensitive to write latency except when it - syncs the commit log. This typically enters into the very high percentiles of - write latency. - -Note that to get detailed latency breakdowns you will need a more advanced -tool such as :ref:`bcc-tools `. - -OS page Cache Usage -^^^^^^^^^^^^^^^^^^^ -As Cassandra makes heavy use of memory mapped files, the health of the -operating system's `Page Cache `_ is -crucial to performance. Start by finding how much available cache is in the -system:: - - $ free -g - total used free shared buff/cache available - Mem: 15 9 2 0 3 5 - Swap: 0 0 0 - -In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap. - -If you suspect that you are missing the OS page cache frequently you can use -advanced tools like :ref:`cachestat ` or -:ref:`vmtouch ` to dive deeper. - -Network Latency and Reliability -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Whenever Cassandra does writes or reads that involve other replicas, -``LOCAL_QUORUM`` reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ``ping`` and ``traceroute`` or most -effectively ``mtr``:: - - $ mtr -nr www.google.com - Start: Sun Jul 22 13:10:28 2018 - HOST: hostname Loss% Snt Last Avg Best Wrst StDev - 1.|-- 192.168.1.1 0.0% 10 2.0 1.9 1.1 3.7 0.7 - 2.|-- 96.123.29.15 0.0% 10 11.4 11.0 9.0 16.4 1.9 - 3.|-- 68.86.249.21 0.0% 10 10.6 10.7 9.0 13.7 1.1 - 4.|-- 162.141.78.129 0.0% 10 11.5 10.6 9.6 12.4 0.7 - 5.|-- 162.151.78.253 0.0% 10 10.9 12.1 10.4 20.2 2.8 - 6.|-- 68.86.143.93 0.0% 10 12.4 12.6 9.9 23.1 3.8 - 7.|-- 96.112.146.18 0.0% 10 11.9 12.4 10.6 15.5 1.6 - 9.|-- 209.85.252.250 0.0% 10 13.7 13.2 12.5 13.9 0.0 - 10.|-- 108.170.242.238 0.0% 10 12.7 12.4 11.1 13.0 0.5 - 11.|-- 74.125.253.149 0.0% 10 13.4 13.7 11.8 19.2 2.1 - 12.|-- 216.239.62.40 0.0% 10 13.4 14.7 11.5 26.9 4.6 - 13.|-- 108.170.242.81 0.0% 10 14.4 13.2 10.9 16.0 1.7 - 14.|-- 72.14.239.43 0.0% 10 12.2 16.1 11.0 32.8 7.1 - 15.|-- 216.58.195.68 0.0% 10 25.1 15.3 11.1 25.1 4.8 - -In this example of ``mtr``, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between ``200ms`` and ``3s`` of additional latency, so that -can be a common cause of latency issues. - -Network Throughput -^^^^^^^^^^^^^^^^^^ -As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is `iftop `_ which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ``ccm`` cluster:: - - $ # remove the -t for ncurses instead of pure text - $ sudo iftop -nNtP -i lo - interface: lo - IP address is: 127.0.0.1 - MAC address is: 00:00:00:00:00:00 - Listening on lo - # Host name (port/service if enabled) last 2s last 10s last 40s cumulative - -------------------------------------------------------------------------------------------- - 1 127.0.0.1:58946 => 869Kb 869Kb 869Kb 217KB - 127.0.0.3:9042 <= 0b 0b 0b 0B - 2 127.0.0.1:54654 => 736Kb 736Kb 736Kb 184KB - 127.0.0.1:9042 <= 0b 0b 0b 0B - 3 127.0.0.1:51186 => 669Kb 669Kb 669Kb 167KB - 127.0.0.2:9042 <= 0b 0b 0b 0B - 4 127.0.0.3:9042 => 3.30Kb 3.30Kb 3.30Kb 845B - 127.0.0.1:58946 <= 0b 0b 0b 0B - 5 127.0.0.1:9042 => 2.79Kb 2.79Kb 2.79Kb 715B - 127.0.0.1:54654 <= 0b 0b 0b 0B - 6 127.0.0.2:9042 => 2.54Kb 2.54Kb 2.54Kb 650B - 127.0.0.1:51186 <= 0b 0b 0b 0B - 7 127.0.0.1:36894 => 1.65Kb 1.65Kb 1.65Kb 423B - 127.0.0.5:7000 <= 0b 0b 0b 0B - 8 127.0.0.1:38034 => 1.50Kb 1.50Kb 1.50Kb 385B - 127.0.0.2:7000 <= 0b 0b 0b 0B - 9 127.0.0.1:56324 => 1.50Kb 1.50Kb 1.50Kb 383B - 127.0.0.1:7000 <= 0b 0b 0b 0B - 10 127.0.0.1:53044 => 1.43Kb 1.43Kb 1.43Kb 366B - 127.0.0.4:7000 <= 0b 0b 0b 0B - -------------------------------------------------------------------------------------------- - Total send rate: 2.25Mb 2.25Mb 2.25Mb - Total receive rate: 0b 0b 0b - Total send and receive rate: 2.25Mb 2.25Mb 2.25Mb - -------------------------------------------------------------------------------------------- - Peak rate (sent/received/total): 2.25Mb 0b 2.25Mb - Cumulative (sent/received/total): 576KB 0B 576KB - ============================================================================================ - -In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring. - -Advanced tools --------------- -Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy. - -.. _use-bcc-tools: - -bcc-tools -^^^^^^^^^ -Most modern Linux distributions (kernels newer than ``4.1``) support `bcc-tools -`_ for diving deep into performance problems. -First install ``bcc-tools``, e.g. via ``apt`` on Debian:: - - $ apt install bcc-tools - -Then you can use all the tools that ``bcc-tools`` contains. One of the most -useful tools is ``cachestat`` -(`cachestat examples `_) -which allows you to determine exactly how many OS page cache hits and misses -are happening:: - - $ sudo /usr/share/bcc/tools/cachestat -T 1 - TIME TOTAL MISSES HITS DIRTIES BUFFERS_MB CACHED_MB - 18:44:08 66 66 0 64 88 4427 - 18:44:09 40 40 0 75 88 4427 - 18:44:10 4353 45 4308 203 88 4427 - 18:44:11 84 77 7 13 88 4428 - 18:44:12 2511 14 2497 14 88 4428 - 18:44:13 101 98 3 18 88 4428 - 18:44:14 16741 0 16741 58 88 4428 - 18:44:15 1935 36 1899 18 88 4428 - 18:44:16 89 34 55 18 88 4428 - -In this case there are not too many page cache ``MISSES`` which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node's "hot" dataset. If you don't have enough cache, ``MISSES`` will -be high and performance will be slow. If you have enough cache, ``MISSES`` will -be low and performance will be fast (as almost all reads are being served out -of memory). - -You can also measure disk latency distributions using ``biolatency`` -(`biolatency examples `_) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:: - - $ sudo /usr/share/bcc/tools/biolatency -D 10 - Tracing block device I/O... Hit Ctrl-C to end. - - - disk = 'sda' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 12 |****************************************| - 32 -> 63 : 9 |****************************** | - 64 -> 127 : 1 |*** | - 128 -> 255 : 3 |********** | - 256 -> 511 : 7 |*********************** | - 512 -> 1023 : 2 |****** | - - disk = 'sdc' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 0 | | - 32 -> 63 : 0 | | - 64 -> 127 : 41 |************ | - 128 -> 255 : 17 |***** | - 256 -> 511 : 13 |*** | - 512 -> 1023 : 2 | | - 1024 -> 2047 : 0 | | - 2048 -> 4095 : 0 | | - 4096 -> 8191 : 56 |***************** | - 8192 -> 16383 : 131 |****************************************| - 16384 -> 32767 : 9 |** | - -In this case most ios on the data drive (``sdc``) are fast, but many take -between 8 and 16 milliseconds. - -Finally ``biosnoop`` (`examples `_) -can be used to dive even deeper and see per IO latencies:: - - $ sudo /usr/share/bcc/tools/biosnoop | grep java | head - 0.000000000 java 17427 sdc R 3972458600 4096 13.58 - 0.000818000 java 17427 sdc R 3972459408 4096 0.35 - 0.007098000 java 17416 sdc R 3972401824 4096 5.81 - 0.007896000 java 17416 sdc R 3972489960 4096 0.34 - 0.008920000 java 17416 sdc R 3972489896 4096 0.34 - 0.009487000 java 17427 sdc R 3972401880 4096 0.32 - 0.010238000 java 17416 sdc R 3972488368 4096 0.37 - 0.010596000 java 17427 sdc R 3972488376 4096 0.34 - 0.011236000 java 17410 sdc R 3972488424 4096 0.32 - 0.011825000 java 17427 sdc R 3972488576 16384 0.65 - ... time passes - 8.032687000 java 18279 sdc R 10899712 122880 3.01 - 8.033175000 java 18279 sdc R 10899952 8192 0.46 - 8.073295000 java 18279 sdc R 23384320 122880 3.01 - 8.073768000 java 18279 sdc R 23384560 8192 0.46 - - -With ``biosnoop`` you see every single IO and how long they take. This data -can be used to construct the latency distributions in ``biolatency`` but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (``128kb``) of ``read_ahead_kb``. To improve point read -performance you may may want to decrease ``read_ahead_kb`` on fast data volumes -such as SSDs while keeping the a higher value like ``128kb`` value is probably -right for HDs. There are tradeoffs involved, see `queue-sysfs -`_ docs for more -information, but regardless ``biosnoop`` is useful for understanding *how* -Cassandra uses drives. - -.. _use-vmtouch: - -vmtouch -^^^^^^^ -Sometimes it's useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -`vmtouch `_. - -First install it:: - - $ git clone https://github.com/hoytech/vmtouch.git - $ cd vmtouch - $ make - -Then run it on the Cassandra data directory:: - - $ ./vmtouch /var/lib/cassandra/data/ - Files: 312 - Directories: 92 - Resident Pages: 62503/64308 244M/251M 97.2% - Elapsed: 0.005657 seconds - -In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn't really matter unless reads are missing the -cache (per e.g. :ref:`cachestat `), in which case having -additional memory may help read performance. - -CPU Flamegraphs -^^^^^^^^^^^^^^^ -Cassandra often uses a lot of CPU, but telling *what* it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -`CPU Flamegraphs `_ -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a "compaction problem dropping -tombstones" or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -`Java Flamegraphs -`_. - -Generally: - -1. Enable the ``-XX:+PreserveFramePointer`` option in Cassandra's - ``jvm.options`` configuation file. This has a negligible performance impact - but allows you actually see what Cassandra is doing. -2. Run ``perf`` to get some data. -3. Send that data through the relevant scripts in the FlameGraph toolset and - convert the data into a pretty flamegraph. View the resulting SVG image in - a browser or other image browser. - -For example just cloning straight off github we first install the -``perf-map-agent`` to the location of our JVMs (assumed to be -``/usr/lib/jvm``):: - - $ sudo bash - $ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/ - $ cd /usr/lib/jvm - $ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent - $ cd perf-map-agent - $ cmake . - $ make - -Now to get a flamegraph:: - - $ git clone --depth=1 https://github.com/brendangregg/FlameGraph - $ sudo bash - $ cd FlameGraph - $ # Record traces of Cassandra and map symbols for all java processes - $ perf record -F 49 -a -g -p -- sleep 30; ./jmaps - $ # Translate the data - $ perf script > cassandra_stacks - $ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \ - ./flamegraph.pl --color=java --hash > cassandra_flames.svg - - -The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser. - -.. _packet-capture: - -Packet Capture -^^^^^^^^^^^^^^ -Sometimes you have to understand what queries a Cassandra node is performing -*right now* to troubleshoot an issue. For these times trusty packet capture -tools like ``tcpdump`` and `Wireshark -`_ can be very helpful to dissect packet captures. -Wireshark even has native `CQL support -`_ although it sometimes has -compatibility issues with newer Cassandra protocol releases. - -To get a packet capture first capture some packets:: - - $ sudo tcpdump -U -s0 -i -w cassandra.pcap -n "tcp port 9042" - -Now open it up with wireshark:: - - $ wireshark cassandra.pcap - -If you don't see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> ``Decode as`` -> select CQL from the -dropdown for port 9042. - -If you don't want to do this manually or use a GUI, you can also use something -like `cqltrace `_ to ease obtaining and -parsing CQL packet captures. diff --git a/src/doc/4.0-alpha4/_static/ajax-loader.gif b/src/doc/4.0-alpha4/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/4.0-alpha4/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/basic.css b/src/doc/4.0-alpha4/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/4.0-alpha4/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_static/comment-bright.png b/src/doc/4.0-alpha4/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/4.0-alpha4/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/comment-close.png b/src/doc/4.0-alpha4/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/4.0-alpha4/_static/comment-close.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/comment.png b/src/doc/4.0-alpha4/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/4.0-alpha4/_static/comment.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/doctools.js b/src/doc/4.0-alpha4/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/4.0-alpha4/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/4.0-alpha4/_static/documentation_options.js b/src/doc/4.0-alpha4/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/4.0-alpha4/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/4.0-alpha4/_static/down-pressed.png b/src/doc/4.0-alpha4/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/4.0-alpha4/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/down.png b/src/doc/4.0-alpha4/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/4.0-alpha4/_static/down.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/extra.css b/src/doc/4.0-alpha4/_static/extra.css deleted file mode 100644 index 5e40dd7d2..000000000 --- a/src/doc/4.0-alpha4/_static/extra.css +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/4.0-alpha4/_static/file.png b/src/doc/4.0-alpha4/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/4.0-alpha4/_static/file.png and /dev/null differ diff --git a/src/doc/4.0-alpha4/_static/jquery-3.2.1.js b/src/doc/4.0-alpha4/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/4.0-alpha4/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-

Apache Cassandra relies on a number of techniques from Amazon’s Dynamo -distributed storage key-value system. Each node in the Dynamo system has three -main components:

-
    -
  • Request coordination over a partitioned dataset
  • -
  • Ring membership and failure detection
  • -
  • A local persistence (storage) engine
  • -
-

Cassandra primarily draws from the first two clustering components, -while using a storage engine based on a Log Structured Merge Tree -(LSM). -In particular, Cassandra relies on Dynamo style:

-
    -
  • Dataset partitioning using consistent hashing
  • -
  • Multi-master replication using versioned data and tunable consistency
  • -
  • Distributed cluster membership and failure detection via a gossip protocol
  • -
  • Incremental scale-out on commodity hardware
  • -
-

Cassandra was designed this way to meet large-scale (PiB+) business-critical -storage requirements. In particular, as applications demanded full global -replication of petabyte scale datasets along with always available low-latency -reads and writes, it became imperative to design a new kind of database model -as the relational database systems of the time struggled to meet the new -requirements of global scale applications.

-
-

Dataset Partitioning: Consistent Hashing

-

Cassandra achieves horizontal scalability by -partitioning -all data stored in the system using a hash function. Each partition is replicated -to multiple physical nodes, often across failure domains such as racks and even -datacenters. As every replica can independently accept mutations to every key -that it owns, every key must be versioned. Unlike in the original Dynamo paper -where deterministic versions and vector clocks were used to reconcile concurrent -updates to a key, Cassandra uses a simpler last write wins model where every -mutation is timestamped (including deletes) and then the latest version of data -is the “winning” value. Formally speaking, Cassandra uses a Last-Write-Wins Element-Set -conflict-free replicated data type for each CQL row (a.k.a LWW-Element-Set CRDT) -to resolve conflicting mutations on replica sets.

-
-
-
-

Consistent Hashing using a Token Ring

-

Cassandra partitions data over storage nodes using a special form of hashing -called consistent hashing. -In naive data hashing, you typically allocate keys to buckets by taking a hash -of the key modulo the number of buckets. For example, if you want to distribute -data to 100 nodes using naive hashing you might assign every node to a bucket -between 0 and 100, hash the input key modulo 100, and store the data on the -associated bucket. In this naive scheme, however, adding a single node might -invalidate almost all of the mappings.

-

Cassandra instead maps every node to one or more tokens on a continuous hash -ring, and defines ownership by hashing a key onto the ring and then “walking” -the ring in one direction, similar to the Chord -algorithm. The main difference of consistent hashing to naive data hashing is -that when the number of nodes (buckets) to hash into changes, consistent -hashing only has to move a small fraction of the keys.

-

For example, if we have an eight node cluster with evenly spaced tokens, and -a replication factor (RF) of 3, then to find the owning nodes for a key we -first hash that key to generate a token (which is just the hash of the key), -and then we “walk” the ring in a clockwise fashion until we encounter three -distinct nodes, at which point we have found all the replicas of that key. -This example of an eight node cluster with RF=3 can be visualized as follows:

-
-Dynamo Ring -
-

You can see that in a Dynamo like system, ranges of keys, also known as token -ranges, map to the same physical set of nodes. In this example, all keys that -fall in the token range excluding token 1 and including token 2 (range(t1, t2]) -are stored on nodes 2, 3 and 4.

-
-
-

Multiple Tokens per Physical Node (a.k.a. vnodes)

-

Simple single token consistent hashing works well if you have many physical -nodes to spread data over, but with evenly spaced tokens and a small number of -physical nodes, incremental scaling (adding just a few nodes of capacity) is -difficult because there are no token selections for new nodes that can leave -the ring balanced. Cassandra seeks to avoid token imbalance because uneven -token ranges lead to uneven request load. For example, in the previous example -there is no way to add a ninth token without causing imbalance; instead we -would have to insert 8 tokens in the midpoints of the existing ranges.

-

The Dynamo paper advocates for the use of “virtual nodes” to solve this -imbalance problem. Virtual nodes solve the problem by assigning multiple -tokens in the token ring to each physical node. By allowing a single physical -node to take multiple positions in the ring, we can make small clusters look -larger and therefore even with a single physical node addition we can make it -look like we added many more nodes, effectively taking many smaller pieces of -data from more ring neighbors when we add even a single node.

-

Cassandra introduces some nomenclature to handle these concepts:

-
    -
  • Token: A single position on the dynamo style hash ring.
  • -
  • Endpoint: A single physical IP and port on the network.
  • -
  • Host ID: A unique identifier for a single “physical” node, usually -present at one Endpoint and containing one or more Tokens.
  • -
  • Virtual Node (or vnode): A Token on the hash ring owned by the same -physical node, one with the same Host ID.
  • -
-

The mapping of Tokens to Endpoints gives rise to the Token Map -where Cassandra keeps track of what ring positions map to which physical -endpoints. For example, in the following figure we can represent an eight node -cluster using only four physical nodes by assigning two tokens to every node:

-
-Virtual Tokens Ring -
-

Multiple tokens per physical node provide the following benefits:

-
    -
  1. When a new node is added it accepts approximately equal amounts of data from -other nodes in the ring, resulting in equal distribution of data across the -cluster.
  2. -
  3. When a node is decommissioned, it loses data roughly equally to other members -of the ring, again keeping equal distribution of data across the cluster.
  4. -
  5. If a node becomes unavailable, query load (especially token aware query load), -is evenly distributed across many other nodes.
  6. -
-

Multiple tokens, however, can also have disadvantages:

-
    -
  1. Every token introduces up to 2 * (RF - 1) additional neighbors on the -token ring, which means that there are more combinations of node failures -where we lose availability for a portion of the token ring. The more tokens -you have, the higher the probability of an outage.
  2. -
  3. Cluster-wide maintenance operations are often slowed. For example, as the -number of tokens per node is increased, the number of discrete repair -operations the cluster must do also increases.
  4. -
  5. Performance of operations that span token ranges could be affected.
  6. -
-

Note that in Cassandra 2.x, the only token allocation algorithm available -was picking random tokens, which meant that to keep balance the default number -of tokens per node had to be quite high, at 256. This had the effect of -coupling many physical endpoints together, increasing the risk of -unavailability. That is why in 3.x + the new deterministic token allocator -was added which intelligently picks tokens such that the ring is optimally -balanced while requiring a much lower number of tokens per physical node.

-
-
-
-

Multi-master Replication: Versioned Data and Tunable Consistency

-

Cassandra replicates every partition of data to many nodes across the cluster -to maintain high availability and durability. When a mutation occurs, the -coordinator hashes the partition key to determine the token range the data -belongs to and then replicates the mutation to the replicas of that data -according to the Replication Strategy.

-

All replication strategies have the notion of a replication factor (RF), -which indicates to Cassandra how many copies of the partition should exist. -For example with a RF=3 keyspace, the data will be written to three -distinct replicas. Replicas are always chosen such that they are distinct -physical nodes which is achieved by skipping virtual nodes if needed. -Replication strategies may also choose to skip nodes present in the same failure -domain such as racks or datacenters so that Cassandra clusters can tolerate -failures of whole racks and even datacenters of nodes.

-
-

Replication Strategy

-

Cassandra supports pluggable replication strategies, which determine which -physical nodes act as replicas for a given token range. Every keyspace of -data has its own replication strategy. All production deployments should use -the NetworkTopologyStrategy while the SimpleStrategy replication -strategy is useful only for testing clusters where you do not yet know the -datacenter layout of the cluster.

-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each -datacenter in the cluster. Even if your cluster only uses a single datacenter, -NetworkTopologyStrategy should be preferred over SimpleStrategy to make it -easier to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified individually by -datacenter, NetworkTopologyStrategy also attempts to choose replicas within a -datacenter from different racks as specified by the Snitch. If -the number of racks is greater than or equal to the replication factor for the -datacenter, each replica is guaranteed to be chosen from a different rack. -Otherwise, each rack will hold at least one replica, but some racks may hold -more than one. Note that this rack-aware behavior has some potentially -surprising implications. For example, if -there are not an even number of nodes in each rack, the data load on the -smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a brand new rack, it will be considered a replica for the entire ring. -For this reason, many operators choose to configure all nodes in a single -availability zone or similar failure domain as a single “rack”.

-
-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

Transient Replication

-

Transient replication is an experimental feature in Cassandra 4.0 not present -in the original Dynamo paper. It allows you to configure a subset of replicas -to only replicate data that hasn’t been incrementally repaired. This allows you -to decouple data redundancy from availability. For instance, if you have a -keyspace replicated at rf 3, and alter it to rf 5 with 2 transient replicas, -you go from being able to tolerate one failed replica to being able to tolerate -two, without corresponding increase in storage usage. This is because 3 nodes -will replicate all the data for a given token range, and the other 2 will only -replicate data that hasn’t been incrementally repaired.

-

To use transient replication, you first need to enable it in -cassandra.yaml. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. -You configure it by specifying replication factor as -<total_replicas>/<transient_replicas Both SimpleStrategy and -NetworkTopologyStrategy support configuring transient replication.

-

Transiently replicated keyspaces only support tables created with read_repair -set to NONE and monotonic reads are not currently supported. You also -can’t use LWT, logged batches, or counters in 4.0. You will possibly never be -able to use materialized views with transiently replicated keyspaces and -probably never be able to use secondary indices with them.

-

Transient replication is an experimental feature that may not be ready for -production use. The expected audience is experienced users of Cassandra -capable of fully validating a deployment of their particular application. That -means being able check that operations like reads, writes, decommission, -remove, rebuild, repair, and replace all work with your queries, data, -configuration, operational practices, and availability requirements.

-

It is anticipated that 4.next will support monotonic reads with transient -replication as well as LWT, logged batches, and counters.

-
-
-
-

Data Versioning

-

Cassandra uses mutation timestamp versioning to guarantee eventual consistency of -data. Specifically all mutations that enter the system do so with a timestamp -provided either from a client clock or, absent a client provided timestamp, -from the coordinator node’s clock. Updates resolve according to the conflict -resolution rule of last write wins. Cassandra’s correctness does depend on -these clocks, so make sure a proper time synchronization process is running -such as NTP.

-

Cassandra applies separate mutation timestamps to every column of every row -within a CQL partition. Rows are guaranteed to be unique by primary key, and -each column in a row resolve concurrent mutations according to last-write-wins -conflict resolution. This means that updates to different primary keys within a -partition can actually resolve without conflict! Furthermore the CQL collection -types such as maps and sets use this same conflict free mechanism, meaning -that concurrent updates to maps and sets are guaranteed to resolve as well.

-
-

Replica Synchronization

-

As replicas in Cassandra can accept mutations independently, it is possible -for some replicas to have newer data than others. Cassandra has many best-effort -techniques to drive convergence of replicas including -Replica read repair <read-repair> in the read path and -Hinted handoff <hints> in the write path.

-

These techniques are only best-effort, however, and to guarantee eventual -consistency Cassandra implements anti-entropy repair <repair> where replicas -calculate hierarchical hash-trees over their datasets called Merkle Trees that can then be compared across -replicas to identify mismatched data. Like the original Dynamo paper Cassandra -supports “full” repairs where replicas hash their entire dataset, create Merkle -trees, send them to each other and sync any ranges that don’t match.

-

Unlike the original Dynamo paper, Cassandra also implements sub-range repair -and incremental repair. Sub-range repair allows Cassandra to increase the -resolution of the hash trees (potentially down to the single partition level) -by creating a larger number of trees that span only a portion of the data -range. Incremental repair allows Cassandra to only repair the partitions that -have changed since the last repair.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and -availability through Consistency Levels. Cassandra’s consistency levels -are a version of Dynamo’s R + W > N consistency mechanism where operators -could configure the number of nodes that must participate in reads (R) -and writes (W) to be larger than the replication factor (N). In -Cassandra, you instead choose from a menu of common consistency levels which -allow the operator to pick R and W behavior without knowing the -replication factor. Generally writes will be visible to subsequent reads when -the read consistency level contains enough nodes to guarantee a quorum intersection -with the write consistency level.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency -level. The consistency level simply controls how many responses the coordinator -waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to -enough replicas to satisfy the consistency level. The one exception to this is -when speculative retry may issue a redundant read request to an extra replica -if the original replicas have not responded within a specified time window.

-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels such that the replica -sets overlap, resulting in all acknowledged writes being visible to subsequent -reads. This is typically expressed in the same terms Dynamo does, in that W + -R > RF, where W is the write consistency level, R is the read -consistency level, and RF is the replication factor. For example, if RF -= 3, a QUORUM request will require responses from at least 2/3 -replicas. If QUORUM is used for both writes and reads, at least one of the -replicas is guaranteed to participate in both the write and the read request, -which in turn guarantees that the quorums will overlap and the write will be -visible to the read.

-

In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a -weaker but still useful guarantee: reads are guaranteed to see the latest write -from within the same datacenter. This is often sufficient as clients homed to -a single datacenter will read their own writes.

-

If this type of strong consistency isn’t required, lower consistency levels -like LOCAL_ONE or ONE may be used to improve throughput, latency, and -availability. With replication spanning multiple datacenters, LOCAL_ONE is -typically less available than ONE but is faster as a rule. Indeed ONE -will succeed if a single replica is available in any datacenter.

-
-
-
-
-

Distributed Cluster Membership and Failure Detection

-

The replication protocols and dataset partitioning rely on knowing which nodes -are alive and dead in the cluster so that write and read operations can be -optimally routed. In Cassandra liveness information is shared in a distributed -fashion through a failure detection mechanism based on a gossip protocol.

-
-

Gossip

-

Gossip is how Cassandra propagates basic cluster bootstrapping information such -as endpoint membership and internode network protocol versions. In Cassandra’s -gossip system, nodes exchange state information not only about themselves but -also about other nodes they know about. This information is versioned with a -vector clock of (generation, version) tuples, where the generation is a -monotonic timestamp and version is a logical clock the increments roughly every -second. These logical clocks allow Cassandra gossip to ignore old versions of -cluster state just by inspecting the logical clocks presented with gossip -messages.

-

Every node in the Cassandra cluster runs the gossip task independently and -periodically. Every second, every node in the cluster:

-
    -
  1. Updates the local node’s heartbeat state (the version) and constructs the -node’s local view of the cluster gossip endpoint state.
  2. -
  3. Picks a random other node in the cluster to exchange gossip endpoint state -with.
  4. -
  5. Probabilistically attempts to gossip with any unreachable nodes (if one exists)
  6. -
  7. Gossips with a seed node if that didn’t happen in step 2.
  8. -
-

When an operator first bootstraps a Cassandra cluster they designate certain -nodes as “seed” nodes. Any node can be a seed node and the only difference -between seed and non-seed nodes is seed nodes are allowed to bootstrap into the -ring without seeing any other seed nodes. Furthermore, once a cluster is -bootstrapped, seed nodes become “hotspots” for gossip due to step 4 above.

-

As non-seed nodes must be able to contact at least one seed node in order to -bootstrap into the cluster, it is common to include multiple seed nodes, often -one for each rack or datacenter. Seed nodes are often chosen using existing -off-the-shelf service discovery mechanisms.

-
-

Note

-

Nodes do not have to agree on the seed nodes, and indeed once a cluster is -bootstrapped, newly launched nodes can be configured to use any existing -nodes as “seeds”. The only advantage to picking the same nodes as seeds -is it increases their usefullness as gossip hotspots.

-
-

Currently, gossip also propagates token metadata and schema version -information. This information forms the control plane for scheduling data -movements and schema pulls. For example, if a node sees a mismatch in schema -version in gossip state, it will schedule a schema sync task with the other -nodes. As token information propagates via gossip it is also the control plane -for teaching nodes which endpoints own what data.

-
-
-

Ring Membership and Failure Detection

-

Gossip forms the basis of ring membership, but the failure detector -ultimately makes decisions about if nodes are UP or DOWN. Every node in -Cassandra runs a variant of the Phi Accrual Failure Detector, -in which every node is constantly making an independent decision of if their -peer nodes are available or not. This decision is primarily based on received -heartbeat state. For example, if a node does not see an increasing heartbeat -from a node for a certain amount of time, the failure detector “convicts” that -node, at which point Cassandra will stop routing reads to it (writes will -typically be written to hints). If/when the node starts heartbeating again, -Cassandra will try to reach out and connect, and if it can open communication -channels it will mark that node as available.

-
-

Note

-

UP and DOWN state are local node decisions and are not propagated with -gossip. Heartbeat state is propagated with gossip, but nodes will not -consider each other as “UP” until they can successfully message each other -over an actual network channel.

-
-

Cassandra will never remove a node from gossip state without explicit -instruction from an operator via a decommission operation or a new node -bootstrapping with a replace_address_first_boot option. This choice is -intentional to allow Cassandra nodes to temporarily fail without causing data -to needlessly re-balance. This also helps to prevent simultaneous range -movements, where multiple replicas of a token range are moving at the same -time, which can violate monotonic consistency and can even cause data loss.

-
-
-
-

Incremental Scale-out on Commodity Hardware

-

Cassandra scales-out to meet the requirements of growth in data size and -request rates. Scaling-out means adding additional nodes to the ring, and -every additional node brings linear improvements in compute and storage. In -contrast, scaling-up implies adding more capacity to the existing database -nodes. Cassandra is also capable of scale-up, and in certain environments it -may be preferable depending on the deployment. Cassandra gives operators the -flexibility to chose either scale-out or scale-up.

-

One key aspect of Dynamo that Cassandra follows is to attempt to run on -commodity hardware, and many engineering choices are made under this -assumption. For example, Cassandra assumes nodes can fail at any time, -auto-tunes to make the best use of CPU and memory resources available and makes -heavy use of advanced compression and caching techniques to get the most -storage out of limited memory and storage capabilities.

-
-

Simple Query Model

-

Cassandra, like Dynamo, chooses not to provide cross-partition transactions -that are common in SQL Relational Database Management Systems (RDBMS). This -both gives the programmer a simpler read and write API, and allows Cassandra to -more easily scale horizontally since multi-partition transactions spanning -multiple nodes are notoriously difficult to implement and typically very -latent.

-

Instead, Cassanda chooses to offer fast, consistent, latency at any scale for -single partition operations, allowing retrieval of entire partitions or only -subsets of partitions based on primary key filters. Furthermore, Cassandra does -support single partition compare and swap functionality via the lightweight -transaction CQL API.

-
-
-

Simple Interface for Storing Records

-

Cassandra, in a slight departure from Dynamo, chooses a storage interface that -is more sophisticated then “simple key value” stores but significantly less -complex than SQL relational data models. Cassandra presents a wide-column -store interface, where partitions of data contain multiple rows, each of which -contains a flexible set of individually typed columns. Every row is uniquely -identified by the partition key and one or more clustering keys, and every row -can have as many columns as needed.

-

This allows users to flexibly add new columns to existing datasets as new -requirements surface. Schema changes involve only metadata changes and run -fully concurrently with live workloads. Therefore, users can safely add columns -to existing Cassandra databases while remaining confident that query -performance will not degrade.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/architecture/guarantees.html b/src/doc/4.0-alpha4/architecture/guarantees.html deleted file mode 100644 index d2576c19d..000000000 --- a/src/doc/4.0-alpha4/architecture/guarantees.html +++ /dev/null @@ -1,175 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-

Apache Cassandra is a highly scalable and reliable database. Cassandra is used in web based applications that serve large number of clients and the quantity of data processed is web-scale (Petabyte) large. Cassandra makes some guarantees about its scalability, availability and reliability. To fully understand the inherent limitations of a storage system in an environment in which a certain level of network partition failure is to be expected and taken into account when designing the system it is important to first briefly introduce the CAP theorem.

-
-

What is CAP?

-

According to the CAP theorem it is not possible for a distributed data store to provide more than two of the following guarantees simultaneously.

-
    -
  • Consistency: Consistency implies that every read receives the most recent write or errors out
  • -
  • Availability: Availability implies that every request receives a response. It is not guaranteed that the response contains the most recent write or data.
  • -
  • Partition tolerance: Partition tolerance refers to the tolerance of a storage system to failure of a network partition. Even if some of the messages are dropped or delayed the system continues to operate.
  • -
-

CAP theorem implies that when using a network partition, with the inherent risk of partition failure, one has to choose between consistency and availability and both cannot be guaranteed at the same time. CAP theorem is illustrated in Figure 1.

-
-../_images/Figure_1_guarantees.jpg -
-

Figure 1. CAP Theorem

-

High availability is a priority in web based applications and to this objective Cassandra chooses Availability and Partition Tolerance from the CAP guarantees, compromising on data Consistency to some extent.

-

Cassandra makes the following guarantees.

-
    -
  • High Scalability
  • -
  • High Availability
  • -
  • Durability
  • -
  • Eventual Consistency of writes to a single table
  • -
  • Lightweight transactions with linearizable consistency
  • -
  • Batched writes across multiple tables are guaranteed to succeed completely or not at all
  • -
  • Secondary indexes are guaranteed to be consistent with their local replicas data
  • -
-
-
-

High Scalability

-

Cassandra is a highly scalable storage system in which nodes may be added/removed as needed. Using gossip-based protocol a unified and consistent membership list is kept at each node.

-
-
-

High Availability

-

Cassandra guarantees high availability of data by implementing a fault-tolerant storage system. Failure detection in a node is detected using a gossip-based protocol.

-
-
-

Durability

-

Cassandra guarantees data durability by using replicas. Replicas are multiple copies of a data stored on different nodes in a cluster. In a multi-datacenter environment the replicas may be stored on different datacenters. If one replica is lost due to unrecoverable node/datacenter failure the data is not completely lost as replicas are still available.

-
-
-

Eventual Consistency

-

Meeting the requirements of performance, reliability, scalability and high availability in production Cassandra is an eventually consistent storage system. Eventually consistent implies that all updates reach all replicas eventually. Divergent versions of the same data may exist temporarily but they are eventually reconciled to a consistent state. Eventual consistency is a tradeoff to achieve high availability and it involves some read and write latencies.

-
-
-

Lightweight transactions with linearizable consistency

-

Data must be read and written in a sequential order. Paxos consensus protocol is used to implement lightweight transactions. Paxos protocol implements lightweight transactions that are able to handle concurrent operations using linearizable consistency. Linearizable consistency is sequential consistency with real-time constraints and it ensures transaction isolation with compare and set (CAS) transaction. With CAS replica data is compared and data that is found to be out of date is set to the most consistent value. Reads with linearizable consistency allow reading the current state of the data, which may possibly be uncommitted, without making a new addition or update.

-
-
-

Batched Writes

-

The guarantee for batched writes across multiple tables is that they will eventually succeed, or none will. Batch data is first written to batchlog system data, and when the batch data has been successfully stored in the cluster the batchlog data is removed. The batch is replicated to another node to ensure the full batch completes in the event the coordinator node fails.

-
-
-

Secondary Indexes

-

A secondary index is an index on a column and is used to query a table that is normally not queryable. Secondary indexes when built are guaranteed to be consistent with their local replicas.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/architecture/index.html b/src/doc/4.0-alpha4/architecture/index.html deleted file mode 100644 index 453fe6352..000000000 --- a/src/doc/4.0-alpha4/architecture/index.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha4/architecture/overview.html b/src/doc/4.0-alpha4/architecture/overview.html deleted file mode 100644 index 9522aa6aa..000000000 --- a/src/doc/4.0-alpha4/architecture/overview.html +++ /dev/null @@ -1,198 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Overview

-

Apache Cassandra is an open source, distributed, NoSQL database. It presents -a partitioned wide column storage model with eventually consistent semantics.

-

Apache Cassandra was initially designed at Facebook -using a staged event-driven architecture (SEDA) to implement a combination of -Amazon’s Dynamo -distributed storage and replication techniques combined with Google’s Bigtable -data and storage engine model. Dynamo and Bigtable were both developed to meet -emerging requirements for scalable, reliable and highly available storage -systems, but each had areas that could be improved.

-

Cassandra was designed as a best in class combination of both systems to meet -emerging large scale, both in data footprint and query volume, storage -requirements. As applications began to require full global replication and -always available low-latency reads and writes, it became imperative to design a -new kind of database model as the relational database systems of the time -struggled to meet the new requirements of global scale applications.

-

Systems like Cassandra are designed for these challenges and seek the -following design objectives:

-
    -
  • Full multi-master database replication
  • -
  • Global availability at low latency
  • -
  • Scaling out on commodity hardware
  • -
  • Linear throughput increase with each additional processor
  • -
  • Online load balancing and cluster growth
  • -
  • Partitioned key-oriented queries
  • -
  • Flexible schema
  • -
-
-

Features

-

Cassandra provides the Cassandra Query Language (CQL), an SQL-like language, -to create and update database schema and access data. CQL allows users to -organize data within a cluster of Cassandra nodes using:

-
    -
  • Keyspace: defines how a dataset is replicated, for example in which -datacenters and how many copies. Keyspaces contain tables.
  • -
  • Table: defines the typed schema for a collection of partitions. Cassandra -tables have flexible addition of new columns to tables with zero downtime. -Tables contain partitions, which contain partitions, which contain columns.
  • -
  • Partition: defines the mandatory part of the primary key all rows in -Cassandra must have. All performant queries supply the partition key in -the query.
  • -
  • Row: contains a collection of columns identified by a unique primary key -made up of the partition key and optionally additional clustering keys.
  • -
  • Column: A single datum with a type which belong to a row.
  • -
-

CQL supports numerous advanced features over a partitioned dataset such as:

-
    -
  • Single partition lightweight transactions with atomic compare and set -semantics.
  • -
  • User-defined types, functions and aggregates
  • -
  • Collection types including sets, maps, and lists.
  • -
  • Local secondary indices
  • -
  • (Experimental) materialized views
  • -
-

Cassandra explicitly chooses not to implement operations that require cross -partition coordination as they are typically slow and hard to provide highly -available global semantics. For example Cassandra does not support:

-
    -
  • Cross partition transactions
  • -
  • Distributed joins
  • -
  • Foreign keys or referential integrity.
  • -
-
-
-

Operating

-

Apache Cassandra configuration settings are configured in the cassandra.yaml -file that can be edited by hand or with the aid of configuration management tools. -Some settings can be manipulated live using an online interface, but others -require a restart of the database to take effect.

-

Cassandra provides tools for managing a cluster. The nodetool command -interacts with Cassandra’s live control interface, allowing runtime manipulation -of many settings from cassandra.yaml. The auditlogviewer is used -to view the audit logs. The fqltool is used to view, replay and compare -full query logs. The auditlogviewer and fqltool are new tools in -Apache Cassandra 4.0.

-

In addition, Cassandra supports out of the box atomic snapshot functionality, -which presents a point in time snapshot of Cassandra’s data for easy -integration with many backup tools. Cassandra also supports incremental backups -where data can be backed up as it is written.

-

Apache Cassandra 4.0 has added several new features including virtual tables. -transient replication, audit logging, full query logging, and support for Java -11. Two of these features are experimental: transient replication and Java 11 -support.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/architecture/storage_engine.html b/src/doc/4.0-alpha4/architecture/storage_engine.html deleted file mode 100644 index 3259d0153..000000000 --- a/src/doc/4.0-alpha4/architecture/storage_engine.html +++ /dev/null @@ -1,294 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables.

-

All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the “commitlog_segment_size_in_mb” option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running “nodetool drain” before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup.

-
    -
  • commitlog_segment_size_in_mb: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
  • -
-

*NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*

-

Default Value: 32

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied.

-
    -
  • commitlog_sync: may be either “periodic” or “batch.”

    -
      -
    • batch: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait “commitlog_sync_batch_window_in_ms” milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason.

      -
        -
      • commitlog_sync_batch_window_in_ms: Time to wait between “batch” fsyncs
      • -
      -

      Default Value: 2

      -
    • -
    • periodic: In periodic mode, writes are immediately ack’ed, and the CommitLog is simply synced every “commitlog_sync_period_in_ms” milliseconds.

      -
        -
      • commitlog_sync_period_in_ms: Time to wait between “periodic” fsyncs
      • -
      -

      Default Value: 10000

      -
    • -
    -
  • -
-

Default Value: batch

-

* NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using “batch” mode, it is recommended to store commitlogs in a separate, dedicated device.

-
    -
  • commitlog_directory: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
  • -
-

Default Value: /var/lib/cassandra/commitlog

-
    -
  • commitlog_compression: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported.
  • -
-

(Default Value: (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
    -
  • commitlog_total_space_in_mb: Total space to use for commit logs on disk.
  • -
-

If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume.

-

Default Value: 8192

-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
-

SSTable Versions

-

This section was created using the following -gist -which utilized this original -source.

-

The version numbers, to date are:

-
-

Version 0

-
    -
  • b (0.7.0): added version to sstable filenames
  • -
  • c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings
  • -
  • d (0.7.0): row size in data component becomes a long instead of int
  • -
  • e (0.7.0): stores undecorated keys in data and index components
  • -
  • f (0.7.0): switched bloom filter implementations in data component
  • -
  • g (0.8): tracks flushed-at context in metadata component
  • -
-
-
-

Version 1

-
    -
  • h (1.0): tracks max client timestamp in metadata component
  • -
  • hb (1.0.3): records compression ration in metadata component
  • -
  • hc (1.0.4): records partitioner in metadata component
  • -
  • hd (1.0.10): includes row tombstones in maxtimestamp
  • -
  • he (1.1.3): includes ancestors generation in metadata component
  • -
  • hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782)
  • -
  • ia (1.2.0):
      -
    • column indexes are promoted to the index file
    • -
    • records estimated histogram of deletion times in tombstones
    • -
    • bloom filter (keys and columns) upgraded to Murmur3
    • -
    -
  • -
  • ib (1.2.1): tracks min client timestamp in metadata component
  • -
  • ic (1.2.5): omits per-row bloom filter of column names
  • -
-
-
-

Version 2

-
    -
  • ja (2.0.0):
      -
    • super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format)
    • -
    • tracks max local deletiontime in sstable metadata
    • -
    • records bloom_filter_fp_chance in metadata component
    • -
    • remove data size and column count from data file (CASSANDRA-4180)
    • -
    • tracks max/min column values (according to comparator)
    • -
    -
  • -
  • jb (2.0.1):
      -
    • switch from crc32 to adler32 for compression checksums
    • -
    • checksum the compressed data
    • -
    -
  • -
  • ka (2.1.0):
      -
    • new Statistics.db file format
    • -
    • index summaries can be downsampled and the sampling level is persisted
    • -
    • switch uncompressed checksums to adler32
    • -
    • tracks presense of legacy (local and remote) counter shards
    • -
    -
  • -
  • la (2.2.0): new file name format
  • -
  • lb (2.2.7): commit log lower bound included
  • -
-
-
-

Version 3

-
    -
  • ma (3.0.0):
      -
    • swap bf hash order
    • -
    • store rows natively
    • -
    -
  • -
  • mb (3.0.7, 3.7): commit log lower bound included
  • -
  • mc (3.0.8, 3.9): commit log intervals included
  • -
-
-
-

Example Code

-

The following example is useful for finding all sstables that do not match the “ib” SSTable version

-
find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots"
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/bugs.html b/src/doc/4.0-alpha4/bugs.html deleted file mode 100644 index 65650fd9c..000000000 --- a/src/doc/4.0-alpha4/bugs.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the cassandra Slack room.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Contributing to Cassandra section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/configuration/cassandra_config_file.html b/src/doc/4.0-alpha4/configuration/cassandra_config_file.html deleted file mode 100644 index 85cc3193d..000000000 --- a/src/doc/4.0-alpha4/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1950 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Replica factor is determined via the replication strategy used by the specified -keyspace.

-

Default Value: KEYSPACE

-
-
-

allocate_tokens_for_local_replication_factor

-

This option is commented out by default.

-

Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS.

-

Default Value: 3

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

network_authorizer

-

Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}.

-
    -
  • AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization.
  • -
  • CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllNetworkAuthorizer

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using.

-

The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down the node and kill the JVM, so the node can be replaced.
-
stop
-
shut down the node, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

commitlog_sync may be either “periodic”, “group”, or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed.

-

Default Value: 2

-
-
-

commitlog_sync_group_window_in_ms

-

This option is commented out by default.

-

group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes.

-

Default Value: 1000

-
-
-

commitlog_sync

-

the default option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

periodic_commitlog_sync_lag_block_in_ms

-

This option is commented out by default.

-

When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete.

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1:7000"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_space_in_mb

-

This option is commented out by default.

-

Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_frame_block_size_in_kb

-

This option is commented out by default.

-

If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed.

-

Default Value: 32

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_allow_older_protocols

-

Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored.

-

Default Value: true

-
-
-

native_transport_idle_timeout_in_ms

-

This option is commented out by default.

-

Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period.

-

Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side.

-

Idle connection timeouts are disabled by default.

-

Default Value: 60000

-
-
-

rpc_address

-

The address or interface to bind the native transport server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

concurrent_validations

-

This option is commented out by default.

-

Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default)

-

Default Value: 0

-
-
-

concurrent_materialized_view_builders

-

Number of simultaneous materialized view builder tasks to allow.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_entire_sstables

-

This option is commented out by default.

-

When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696.

-

Default Value: true

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms.

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms.

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

internode_application_send_queue_capacity_in_bytes

-

This option is commented out by default.

-

Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details.

-

The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000

-

The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000

-

The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000

-

Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received.

-

The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth.

-

The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_send_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_send_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

internode_application_receive_queue_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_receive_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_receive_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

This option is commented out by default.

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: It is generally assumed that users have setup NTP on their clusters, and that clocks are modestly in sync, -since this is a requirement for general correctness of last write wins.

-

Default Value: true

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

streaming_connections_per_host

-

This option is commented out by default.

-

Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files).

-

Default Value: 1

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html

-

NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
# set to true for allowing secure incoming connections
-enabled: false
-# If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port
-optional: false
-# if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used
-# during upgrade to 4.0; otherwise, set to false.
-enable_legacy_ssl_storage_port: false
-# on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true.
-internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client-to-server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

gc_warn_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature.

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

ideal_consistency_level

-

This option is commented out by default.

-

Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability.

-

Default Value: EACH_QUORUM

-
-
-

automatic_sstable_upgrade

-

This option is commented out by default.

-

Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version

-

Default Value: false

-
-
-

max_concurrent_automatic_sstable_upgrades

-

This option is commented out by default. -Limit the number of concurrent sstable upgrades

-

Default Value: 1

-
-
-

audit_logging_options

-

Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options.

-
-
-

full_query_logging_options

-

This option is commented out by default.

-

default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog

-
-
-

corrupted_tombstone_strategy

-

This option is commented out by default.

-

validate tombstones on reads and compaction -can be either “disabled”, “warn” or “exception”

-

Default Value: disabled

-
-
-

diagnostic_events_enabled

-

Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX.

-

Default Value: false

-
-
-

native_transport_flush_in_batches_legacy

-

This option is commented out by default.

-

Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.

-

Default Value: false

-
-
-

repaired_data_tracking_for_range_reads_enabled

-

Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don’t use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads

-

Default Value: false

-
-
-

repaired_data_tracking_for_partition_reads_enabled

-

Default Value: false

-
-
-

report_unconfirmed_repaired_data_mismatches

-

If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones.

-

Default Value: false

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-

enable_transient_replication

-

Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use.

-

Default Value: false

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/configuration/index.html b/src/doc/4.0-alpha4/configuration/index.html deleted file mode 100644 index 6e2e246db..000000000 --- a/src/doc/4.0-alpha4/configuration/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/contactus.html b/src/doc/4.0-alpha4/contactus.html deleted file mode 100644 index aaa3ec137..000000000 --- a/src/doc/4.0-alpha4/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or Slack rooms.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

Slack

-

To chat with developers or users in real-time, join our rooms on ASF Slack:

-
    -
  • cassandra - for user questions and general discussions.
  • -
  • cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/appendices.html b/src/doc/4.0-alpha4/cql/appendices.html deleted file mode 100644 index 0420a344d..000000000 --- a/src/doc/4.0-alpha4/cql/appendices.html +++ /dev/null @@ -1,568 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/changes.html b/src/doc/4.0-alpha4/cql/changes.html deleted file mode 100644 index 36982389a..000000000 --- a/src/doc/4.0-alpha4/cql/changes.html +++ /dev/null @@ -1,364 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.5

- -
-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

- -
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/ddl.html b/src/doc/4.0-alpha4/cql/ddl.html deleted file mode 100644 index 1f40366ba..000000000 --- a/src/doc/4.0-alpha4/cql/ddl.html +++ /dev/null @@ -1,908 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-    AND durable_writes = false;
-
-
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
-

SimpleStrategy

-

A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -NetworkTopologyStrategy. SimpleStrategy supports a single mandatory argument:

- ------ - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'replication_factor'intallThe number of replicas to store per range
-
-
-

NetworkTopologyStrategy

-

A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options:

- ------ - - - - - - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'<datacenter>'intallThe number of replicas to store per range in -the provided datacenter.
'replication_factor'int4.0The number of replicas to use as a default -per datacenter if not specifically provided. -Note that this always defers to existing -definitions or explicit datacenter settings. -For example, to have three replicas per -datacenter, supply this with a value of 3.
-

Note that when ALTER ing keyspaces and supplying replication_factor, -auto-expansion will only add new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying replication_factor, -explicitly zero out the datacenter you want to have zero replicas.

-

An example of auto-expanding datacenters with two datacenters: DC1 and DC2:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true;
-
-
-

An example of auto-expanding and overriding a datacenter:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true;
-
-
-

An example that excludes a datacenter while using replication_factor:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ;
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
-
-
-

If transient replication has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format '<total_replicas>/<transient_replicas>'

-

For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-
-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records';
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, b, c)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage. It only exists for historical reason and is preserved for backward compatibility And as COMPACT -STORAGE cannot, as of Cassandra 4.0-alpha4, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
comment -speculative_retrysimple -simplenone -99PERCENTILEA free-form, human-readable comment. -Speculative retry options.
cdcbooleanfalseCreate a Change Data Capture (CDC) log on the table.
additional_write_policysimple99PERCENTILESpeculative retry options.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
memtable_flush_period_in_mssimple0Time (in ms) before Cassandra flushes memtables to disk.
read_repairsimpleBLOCKINGSets read repair behavior (see below)
-

By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ONE, a quorum for QUORUM, and so on. -speculative_retry determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. Speculative retries are used to reduce the latency. The speculative_retry option may be -used to configure rapid read protection with which a coordinator sends more requests than needed to satisfy the Consistency level.

-

Pre-4.0 speculative Retry Policy takes a single string as a parameter, this can be NONE, ALWAYS, 99PERCENTILE (PERCENTILE), 50MS (CUSTOM).

-

Examples of setting speculative retry are:

-
ALTER TABLE users WITH speculative_retry = '10ms';
-
-
-

Or,

-
ALTER TABLE users WITH speculative_retry = '99PERCENTILE';
-
-
-

The problem with these settings is when a single host goes into an unavailable state this drags up the percentiles. This means if we -are set to use p99 alone, we might not speculate when we intended to to because the value at the specified percentile has gone so high. -As a fix 4.0 adds support for hybrid MIN(), MAX() speculative retry policies (CASSANDRA-14293). This means if the normal p99 for the -table is <50ms, we will still speculate at this value and not drag the tail latencies up… but if the p99th goes above what we know we -should never exceed we use that instead.

-

In 4.0 the values (case-insensitive) discussed in the following table are supported:

-

As of version 4.0 speculative retry allows more friendly params (CASSANDRA-13876). The speculative_retry is more flexible with case. As an example a -value does not have to be NONE, and the following are supported alternatives.

-
alter table users WITH speculative_retry = 'none';
-alter table users WITH speculative_retry = 'None';
-
-
-

The text component is case insensitive and for nPERCENTILE version 4.0 allows nP, for instance 99p. -In a hybrid value for speculative retry, one of the two values must be a fixed millisecond value and the other a percentile value.

-

Some examples:

-
min(99percentile,50ms)
-max(99p,50MS)
-MAX(99P,50ms)
-MIN(99.9PERCENTILE,50ms)
-max(90percentile,100MS)
-MAX(100.0PERCENTILE,60ms)
-
-
-

Two values of the same kind cannot be specified such as min(90percentile,99percentile) as it wouldn’t be a hybrid value. -This setting does not affect reads with consistency level ALL because they already query all replicas.

-

Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default 99PERCENTILE.

-

additional_write_policy specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas.

-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). The default is 'SizeTieredCompactionStrategy'. Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-

The compression options define if and how the sstables of the table are compressed. Compression is configured on a per-table -basis as an optional argument to CREATE TABLE or ALTER TABLE. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression. If the enabled option is set to false no other -options must be specified.
chunk_length_in_kb64

On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read. The default value is an optimal value for compressing tables. Chunk length must -be a power of 2 because so is assumed so when computing the chunk number from an uncompressed -file offset. Block size may be adjusted based on read/write access patterns such as:

-
-
    -
  • How much data is typically requested at once
  • -
  • Average size of rows in the table
  • -
-
-
crc_check_chance1.0Determines how likely Cassandra is to verify the checksum on each compression chunk during -reads.
compression_level3Compression level. It is only applicable for ZstdCompressor and accepts values between --131072 and 22.
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-

Caching optimizes the use of cache memory of a table. The cached data is weighed by size and access frequency. The caching -options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-

The read_repair options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back -in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of -replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. -Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement -is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it -is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a -batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-

The default setting. When read_repair is set to BLOCKING, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table';
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/definitions.html b/src/doc/4.0-alpha4/cql/definitions.html deleted file mode 100644 index af1477c99..000000000 --- a/src/doc/4.0-alpha4/cql/definitions.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term                 ::=  constant | literal | function_call | arithmetic_operation | type_hint | bind_marker
-literal              ::=  collection_literal | udt_literal | tuple_literal
-function_call        ::=  identifier '(' [ term (',' term)* ] ')'
-arithmetic_operation ::=  '-' term | term ('+' | '-' | '*' | '/' | '%') term
-type_hint            ::=  '(' cql_type `)` term
-bind_marker          ::=  '?' | ':' identifier
-
-

A term is thus one of:

- -
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/dml.html b/src/doc/4.0-alpha4/cql/dml.html deleted file mode 100644 index 86ed902a4..000000000 --- a/src/doc/4.0-alpha4/cql/dml.html +++ /dev/null @@ -1,561 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/functions.html b/src/doc/4.0-alpha4/cql/functions.html deleted file mode 100644 index 2bb5bc72b..000000000 --- a/src/doc/4.0-alpha4/cql/functions.html +++ /dev/null @@ -1,706 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-

currentTimeUUID is an alias of now.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Datetime functions

-
-
Retrieving the current date/time
-

The following functions can be used to retrieve the date/time at the time where the function is invoked:

- ---- - - - - - - - - - - - - - - - - - - - -
Function nameOutput type
currentTimestamptimestamp
currentDatedate
currentTimetime
currentTimeUUIDtimeUUID
-

For example the last 2 days of data can be retrieved using:

-
SELECT * FROM myTable WHERE date >= currentDate() - 2d
-
-
-
-
-
Time conversion functions
-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/index.html b/src/doc/4.0-alpha4/cql/index.html deleted file mode 100644 index 92c78c8d5..000000000 --- a/src/doc/4.0-alpha4/cql/index.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL.

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/indexes.html b/src/doc/4.0-alpha4/cql/indexes.html deleted file mode 100644 index f2f49c9d3..000000000 --- a/src/doc/4.0-alpha4/cql/indexes.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/json.html b/src/doc/4.0-alpha4/cql/json.html deleted file mode 100644 index 35eb62239..000000000 --- a/src/doc/4.0-alpha4/cql/json.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/mvs.html b/src/doc/4.0-alpha4/cql/mvs.html deleted file mode 100644 index 3d7891f82..000000000 --- a/src/doc/4.0-alpha4/cql/mvs.html +++ /dev/null @@ -1,261 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

Note

-

By default, materialized views are built in a single thread. The initial build can be parallelized by -increasing the number of threads specified by the property concurrent_materialized_view_builders in -cassandra.yaml. This property can also be manipulated at runtime through both JMX and the -setconcurrentviewbuilders and getconcurrentviewbuilders nodetool commands.

-
-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-

MV Limitations

-
-

Note

-

Removal of columns not selected in the Materialized View (via UPDATE base SET unselected_column = null or -DELETE unselected_column FROM base) may shadow missed updates to other columns received by hints or repair. -For this reason, we advise against doing deletions on base columns not selected in views until this is -fixed on CASSANDRA-13826.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/operators.html b/src/doc/4.0-alpha4/cql/operators.html deleted file mode 100644 index 785801496..000000000 --- a/src/doc/4.0-alpha4/cql/operators.html +++ /dev/null @@ -1,301 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Arithmetic Operators" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Arithmetic Operators

-

CQL supports the following operators:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - -
OperatorDescription
- (unary)Negates operand
+Addition
-Substraction
*Multiplication
/Division
%Returns the remainder of a division
-
-

Number Arithmetic

-

All arithmetic operations are supported on numeric types or counters.

-

The return type of the operation will be based on the operand types:

- ------------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
left/righttinyintsmallintintbigintcounterfloatdoublevarintdecimal
tinyinttinyintsmallintintbigintbigintfloatdoublevarintdecimal
smallintsmallintsmallintintbigintbigintfloatdoublevarintdecimal
intintintintbigintbigintfloatdoublevarintdecimal
bigintbigintbigintbigintbigintbigintdoubledoublevarintdecimal
counterbigintbigintbigintbigintbigintdoubledoublevarintdecimal
floatfloatfloatfloatdoubledoublefloatdoubledecimaldecimal
doubledoubledoubledoubledoubledoubledoubledoubledecimaldecimal
varintvarintvarintvarintdecimaldecimaldecimaldecimaldecimaldecimal
decimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimal
-

*, / and % operators have a higher precedence level than + and - operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression.

-
-
-

Datetime Arithmetic

-

A duration can be added (+) or substracted (-) from a timestamp or a date to create a new -timestamp or date. So for instance:

-
SELECT * FROM myTable WHERE t = '2017-01-01' - 2d
-
-
-

will select all the records with a value of t which is in the last 2 days of 2016.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/security.html b/src/doc/4.0-alpha4/cql/security.html deleted file mode 100644 index ef2a35fe3..000000000 --- a/src/doc/4.0-alpha4/cql/security.html +++ /dev/null @@ -1,743 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-                          | ACCESS TO DATACENTERS set_literal
-                          | ACCESS TO ALL DATACENTERS
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS;
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ACCESS TO ALL DATACENTERS can be used for -explicitness, but there’s no functional difference.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ACCESS TO ALL DATACENTERS clause.

-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-

Note

-

DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain -connected and will retain the ability to perform any database actions which do not require authorization. -However, if authorization is enabled, permissions of the dropped role are also revoked, -subject to the caching options configured in cassandra.yaml. -Should a dropped role be subsequently recreated and have new permissions or -roles granted to it, any client sessions still connected will acquire the newly granted -permissions and roles.

-
-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-

Because of their function in normal driver operations, certain tables cannot have their SELECT permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:

-
* `system_schema.keyspaces`
-* `system_schema.columns`
-* `system_schema.tables`
-* `system.local`
-* `system.peers`
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/triggers.html b/src/doc/4.0-alpha4/cql/triggers.html deleted file mode 100644 index f77161b5b..000000000 --- a/src/doc/4.0-alpha4/cql/triggers.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/cql/types.html b/src/doc/4.0-alpha4/cql/types.html deleted file mode 100644 index ece05fd28..000000000 --- a/src/doc/4.0-alpha4/cql/types.html +++ /dev/null @@ -1,700 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 4.0-alpha4, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_conceptual.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_conceptual.html deleted file mode 100644 index 794632a8d..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_conceptual.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Conceptual Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Conceptual Data Modeling

-

First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra.

-

Let’s use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about.

-

For example, let’s use a domain that is easily understood and that -everyone can relate to: making hotel reservations.

-

The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances.

-

The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection.

-../_images/data_modeling_hotel_erd.png -

Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_logical.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_logical.html deleted file mode 100644 index 92b6509a3..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_logical.html +++ /dev/null @@ -1,285 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Logical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Logical Data Modeling

-

Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model.

-

To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with _by_. For example, hotels_by_poi.

-

Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering.

-

The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads.

-

Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static.

-

Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models.

-

Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model.

-../_images/data_modeling_chebotko_logical.png -

Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as K for partition key -columns and C↑ or C↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support.

-
-

Hotel Logical Data Model

-

The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you’ll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access.

-../_images/data_modeling_hotel_logical.png -

Let’s explore the details of each of these tables.

-

The first query Q1 is to find hotels near a point of interest, so you’ll -call this table hotels_by_poi. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search.

-

You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column.

-

An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data.

-

Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the hotels_by_poi table, but you added -only those attributes that were required by the application workflow.

-

From the workflow diagram, you know that the hotels_by_poi table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -hotel_id from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called hotels.

-

Another option would have been to store a set of poi_names in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application.

-

Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -pois_by_hotel table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness.

-

At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the hotel_id as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the available_rooms_by_hotel_date table.

-

To support searching over a range, use clustering columns to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important.

-

The design of the available_rooms_by_hotel_date table is an instance -of the wide partition pattern. This -pattern is sometimes called the wide row pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query.

-

In order to round out the shopping portion of the data model, add the -amenities_by_room table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates.

-
-
-

Reservation Logical Data Model

-

Now let’s switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys.

-../_images/data_modeling_reservation_logical.png -

In order to satisfy Q6, the reservations_by_guest table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well.

-

Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on.

-

The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date.

-

Finally, you create a guests table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the guests table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example.

-
-
-

Patterns and Anti-Patterns

-

As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern.

-

The time series pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments.

-

The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application.

-

One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now tombstones that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance.

-

The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_physical.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_physical.html deleted file mode 100644 index cc90a1cc5..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_physical.html +++ /dev/null @@ -1,200 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Physical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Physical Data Modeling

-

Once you have a logical data model defined, creating the physical model -is a relatively simple process.

-

You walk through each of the logical model tables, assigning types to -each item. You can use any valid CQL data type, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design.

-

After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let’s cover the data -modeling process in more detail by working through an example.

-

Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table.

-../_images/data_modeling_chebotko_physical.png -

The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern.

-
-

Hotel Physical Data Model

-

Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -hotel keyspace to contain tables for hotel and availability -data, and a reservation keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns.

-

For the hotels table, use Cassandra’s text type to -represent the hotel’s id. For the address, create an -address user defined type. Use the text type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries.

-

While it would make sense to use the uuid type for attributes such -as the hotel_id, this document uses mostly text attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like “AZ123” or “NY229”. This example uses these values -for hotel_ids, while acknowledging they are not necessarily globally -unique.

-

You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these uuids as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type.

-

As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure:

-../_images/data_modeling_hotel_physical.png -

Note that the address type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the hotels and hotels_by_poi tables.

-

User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the address -user-defined type. This can reduce complexity in the design.

-

Remember that the scope of a UDT is the keyspace in which it is defined. -To use address in the reservation keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design.

-
-
-

Reservation Physical Data Model

-

Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you’re going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature.

-../_images/data_modeling_reservation_physical.png -

Note that the address type is reproduced in this keyspace and -guest_id is modeled as a uuid type in all of the tables.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_queries.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_queries.html deleted file mode 100644 index 1b76418fa..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_queries.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Application Queries" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Application Queries

-

Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following:

-
    -
  • Q1. Find hotels near a given point of interest.
  • -
  • Q2. Find information about a given hotel, such as its name and -location.
  • -
  • Q3. Find points of interest near a given hotel.
  • -
  • Q4. Find an available room in a given date range.
  • -
  • Q5. Find the rate and amenities for a room.
  • -
-

It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example.

-

Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases.

-

You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations:

-
    -
  • Q6. Lookup a reservation by confirmation number.
  • -
  • Q7. Lookup a reservation by hotel, date, and guest name.
  • -
  • Q8. Lookup all reservations by guest name.
  • -
  • Q9. View guest details.
  • -
-

All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries.

-../_images/data_modeling_hotel_queries.png -

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_rdbms.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_rdbms.html deleted file mode 100644 index 7bcf47890..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_rdbms.html +++ /dev/null @@ -1,252 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "RDBMS Design" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

RDBMS Design

-

When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables.

-

The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation).

-../_images/data_modeling_hotel_relational.png -
-

Design Differences Between RDBMS and Cassandra

-

Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database.

-
-

No joins

-

You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead.

-
-
-

No referential integrity

-

Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available.

-
-
-

Denormalization

-

In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances.

-

A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems.

-

In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it.

-

Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as materialized views -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table.

-
-
-

Query-first design

-

Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true.

-

By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them.

-

Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS.

-
-
-

Designing for optimal storage

-

In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table.

-

A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance.

-
-
-

Sorting is a design decision

-

In an RDBMS, you can easily change the order in which records are -returned to you by using ORDER BY in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns.

-

In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the CREATE TABLE command. The CQL SELECT statement does support -ORDER BY semantics, but only in the order specified by the -clustering columns.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_refining.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_refining.html deleted file mode 100644 index 8529afdf2..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_refining.html +++ /dev/null @@ -1,288 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Evaluating and Refining Data Models" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Evaluating and Refining Data Models

-

Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance.

-
-

Calculating Partition Size

-

The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit.

-

In order to calculate the size of partitions, use the following -formula:

-
-\[N_v = N_r (N_c - N_{pk} - N_s) + N_s\]
-

The number of values (or cells) in the partition (Nv) is equal to -the number of static columns (Ns) plus the product of the number -of rows (Nr) and the number of of values per row. The number of -values per row is defined as the number of columns (Nc) minus the -number of primary key columns (Npk) and static columns -(Ns).

-

The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast.

-

Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the available_rooms_by_hotel_date table. The table has -four columns total (Nc = 4), including three primary key columns -(Npk = 3) and no static columns (Ns = 0). Plugging these -values into the formula, the result is:

-
-\[N_v = N_r (4 - 3 - 0) + 0 = 1N_r\]
-

Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let’s assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel.

-

Since there is a partition for each hotel, the estimated number of rows -per partition is as follows:

-
-\[N_r = 100 rooms/hotel \times 730 days = 73,000 rows\]
-

This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you’ll see how to do shortly.

-

When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems.

-
-
-

Calculating Size on Disk

-

In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -St of a partition:

-
-\[S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) +\]
-
-\[N_v\times sizeOf\big(t_{avg}\big)\]
-

This is a bit more complex than the previous formula, but let’s break it -down a bit at a time. Let’s take a look at the notation first:

-
    -
  • In this formula, ck refers to partition key columns, -cs to static columns, cr to regular columns, and -cc to clustering columns.
  • -
  • The term tavg refers to the average number of bytes of -metadata stored per cell, such as timestamps. It is typical to use an -estimate of 8 bytes for this value.
  • -
  • You’ll recognize the number of rows Nr and number of values -Nv from previous calculations.
  • -
  • The sizeOf() function refers to the size in bytes of the CQL data -type of each referenced column.
  • -
-

The first term asks you to sum the size of the partition key columns. For -this example, the available_rooms_by_hotel_date table has a single -partition key column, the hotel_id, which is of type -text. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes.

-

The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes.

-

The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the date, which is 4 bytes, and the room_number, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean is_available, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB).

-

The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB.

-

Adding these terms together, you get a final estimate:

-
-\[Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB\]
-

This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage.

-

Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size.

-

Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster.

-
-
-

Breaking Up Large Partitions

-

As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both.

-

The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic.

-

Continuing to examine the available rooms example, if you add the date -column to the partition key for the available_rooms_by_hotel_date -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes.

-

Another technique known as bucketing is often used to break the data -into moderate-size partitions. For example, you could bucketize the -available_rooms_by_hotel_date table by adding a month column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -month column is partially duplicative of the date, it provides -a nice way of grouping related data in a partition that will not get -too large.

-../_images/data_modeling_hotel_bucketing.png -

If you really felt strongly about preserving a wide partition design, you -could instead add the room_id to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_schema.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_schema.html deleted file mode 100644 index bfb847039..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_schema.html +++ /dev/null @@ -1,236 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Database Schema" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Database Schema

-

Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -hotel keyspace, using CQL’s comment feature to document the query -pattern supported by each table:

-
CREATE KEYSPACE hotel WITH replication =
-  {class: SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE hotel.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE hotel.hotels_by_poi (
-  poi_name text,
-  hotel_id text,
-  name text,
-  phone text,
-  address frozen<address>,
-  PRIMARY KEY ((poi_name), hotel_id) )
-  WITH comment = Q1. Find hotels near given poi
-  AND CLUSTERING ORDER BY (hotel_id ASC) ;
-
-CREATE TABLE hotel.hotels (
-  id text PRIMARY KEY,
-  name text,
-  phone text,
-  address frozen<address>,
-  pois set )
-  WITH comment = Q2. Find information about a hotel;
-
-CREATE TABLE hotel.pois_by_hotel (
-  poi_name text,
-  hotel_id text,
-  description text,
-  PRIMARY KEY ((hotel_id), poi_name) )
-  WITH comment = Q3. Find pois near a hotel;
-
-CREATE TABLE hotel.available_rooms_by_hotel_date (
-  hotel_id text,
-  date date,
-  room_number smallint,
-  is_available boolean,
-  PRIMARY KEY ((hotel_id), date, room_number) )
-  WITH comment = Q4. Find available rooms by hotel date;
-
-CREATE TABLE hotel.amenities_by_room (
-  hotel_id text,
-  room_number smallint,
-  amenity_name text,
-  description text,
-  PRIMARY KEY ((hotel_id, room_number), amenity_name) )
-  WITH comment = Q5. Find amenities for a room;
-
-
-

Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column poi_name. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL.

-

Similarly, here is the schema for the reservation keyspace:

-
CREATE KEYSPACE reservation WITH replication = {class:
-  SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE reservation.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE reservation.reservations_by_confirmation (
-  confirm_number text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  guest_id uuid,
-  PRIMARY KEY (confirm_number) )
-  WITH comment = Q6. Find reservations by confirmation number;
-
-CREATE TABLE reservation.reservations_by_hotel_date (
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((hotel_id, start_date), room_number) )
-  WITH comment = Q7. Find reservations by hotel and date;
-
-CREATE TABLE reservation.reservations_by_guest (
-  guest_last_name text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((guest_last_name), hotel_id) )
-  WITH comment = Q8. Find reservations by guest name;
-
-CREATE TABLE reservation.guests (
-  guest_id uuid PRIMARY KEY,
-  first_name text,
-  last_name text,
-  title text,
-  emails set,
-  phone_numbers list,
-  addresses map<text,
-  frozen<address>,
-  confirm_number text )
-  WITH comment = Q9. Find guest by ID;
-
-
-

You now have a complete Cassandra schema for storing data for a hotel -application.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/data_modeling_tools.html b/src/doc/4.0-alpha4/data_modeling/data_modeling_tools.html deleted file mode 100644 index 553a677cd..000000000 --- a/src/doc/4.0-alpha4/data_modeling/data_modeling_tools.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Cassandra Data Modeling Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Data Modeling Tools

-

There are several tools available to help you design and -manage your Cassandra schema and build queries.

-
    -
  • Hackolade -is a data modeling tool that supports schema design for Cassandra and -many other NoSQL databases. Hackolade supports the unique concepts of -CQL such as partition keys and clustering columns, as well as data types -including collections and UDTs. It also provides the ability to create -Chebotko diagrams.
  • -
  • Kashlev Data Modeler is a Cassandra -data modeling tool that automates the data modeling methodology -described in this documentation, including identifying -access patterns, conceptual, logical, and physical data modeling, and -schema generation. It also includes model patterns that you can -optionally leverage as a starting point for your designs.
  • -
  • DataStax DevCenter is a tool for managing -schema, executing queries and viewing results. While the tool is no -longer actively supported, it is still popular with many developers and -is available as a free download. -DevCenter features syntax highlighting for CQL commands, types, and name -literals. DevCenter provides command completion as you type out CQL -commands and interprets the commands you type, highlighting any errors -you make. The tool provides panes for managing multiple CQL scripts and -connections to multiple clusters. The connections are used to run CQL -commands against live clusters and view the results. The tool also has a -query trace feature that is useful for gaining insight into the -performance of your queries.
  • -
  • IDE Plugins - There are CQL plugins available for several Integrated -Development Environments (IDEs), such as IntelliJ IDEA and Apache -NetBeans. These plugins typically provide features such as schema -management and query execution.
  • -
-

Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/index.html b/src/doc/4.0-alpha4/data_modeling/index.html deleted file mode 100644 index 7b3281c99..000000000 --- a/src/doc/4.0-alpha4/data_modeling/index.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha4/data_modeling/intro.html b/src/doc/4.0-alpha4/data_modeling/intro.html deleted file mode 100644 index b356fb328..000000000 --- a/src/doc/4.0-alpha4/data_modeling/intro.html +++ /dev/null @@ -1,230 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Introduction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Introduction

-

Apache Cassandra stores data in tables, with each table consisting of rows and columns. CQL (Cassandra Query Language) is used to query the data stored in tables. Apache Cassandra data model is based around and optimized for querying. Cassandra does not support relational data modeling intended for relational databases.

-
-

What is Data Modeling?

-

Data modeling is the process of identifying entities and their relationships. In relational databases, data is placed in normalized tables with foreign keys used to reference related data in other tables. Queries that the application will make are driven by the structure of the tables and related data are queried as table joins.

-

In Cassandra, data modeling is query-driven. The data access patterns and application queries determine the structure and organization of data which then used to design the database tables.

-

Data is modeled around specific queries. Queries are best designed to access a single table, which implies that all entities involved in a query must be in the same table to make data access (reads) very fast. Data is modeled to best suit a query or a set of queries. A table could have one or more entities as best suits a query. As entities do typically have relationships among them and queries could involve entities with relationships among them, a single entity may be included in multiple tables.

-
-
-

Query-driven modeling

-

Unlike a relational database model in which queries make use of table joins to get data from multiple tables, joins are not supported in Cassandra so all required fields (columns) must be grouped together in a single table. Since each query is backed by a table, data is duplicated across multiple tables in a process known as denormalization. Data duplication and a high write throughput are used to achieve a high read performance.

-
-
-

Goals

-

The choice of the primary key and partition key is important to distribute data evenly across the cluster. Keeping the number of partitions read for a query to a minimum is also important because different partitions could be located on different nodes and the coordinator would need to send a request to each node adding to the request overhead and latency. Even if the different partitions involved in a query are on the same node, fewer partitions make for a more efficient query.

-
-
-

Partitions

-

Apache Cassandra is a distributed database that stores data across a cluster of nodes. A partition key is used to partition data among the nodes. Cassandra partitions data over the storage nodes using a variant of consistent hashing for data distribution. Hashing is a technique used to map data with which given a key, a hash function generates a hash value (or simply a hash) that is stored in a hash table. A partition key is generated from the first field of a primary key. Data partitioned into hash tables using partition keys provides for rapid lookup. Fewer the partitions used for a query faster is the response time for the query.

-

As an example of partitioning, consider table t in which id is the only field in the primary key.

-
CREATE TABLE t (
-   id int,
-   k int,
-   v text,
-   PRIMARY KEY (id)
-);
-
-
-

The partition key is generated from the primary key id for data distribution across the nodes in a cluster.

-

Consider a variation of table t that has two fields constituting the primary key to make a composite or compound primary key.

-
CREATE TABLE t (
-   id int,
-   c text,
-   k int,
-   v text,
-   PRIMARY KEY (id,c)
-);
-
-
-

For the table t with a composite primary key the first field id is used to generate the partition key and the second field c is the clustering key used for sorting within a partition. Using clustering keys to sort data makes retrieval of adjacent data more efficient.

-

In general, the first field or component of a primary key is hashed to generate the partition key and the remaining fields or components are the clustering keys that are used to sort data within a partition. Partitioning data improves the efficiency of reads and writes. The other fields that are not primary key fields may be indexed separately to further improve query performance.

-

The partition key could be generated from multiple fields if they are grouped as the first component of a primary key. As another variation of the table t, consider a table with the first component of the primary key made of two fields grouped using parentheses.

-
CREATE TABLE t (
-   id1 int,
-   id2 int,
-   c1 text,
-   c2 text
-   k int,
-   v text,
-   PRIMARY KEY ((id1,id2),c1,c2)
-);
-
-
-

For the preceding table t the first component of the primary key constituting fields id1 and id2 is used to generate the partition key and the rest of the fields c1 and c2 are the clustering keys used for sorting within a partition.

-
-
-

Comparing with Relational Data Model

-

Relational databases store data in tables that have relations with other tables using foreign keys. A relational database’s approach to data modeling is table-centric. Queries must use table joins to get data from multiple tables that have a relation between them. Apache Cassandra does not have the concept of foreign keys or relational integrity. Apache Cassandra’s data model is based around designing efficient queries; queries that don’t involve multiple tables. Relational databases normalize data to avoid duplication. Apache Cassandra in contrast de-normalizes data by duplicating data in multiple tables for a query-centric data model. If a Cassandra data model cannot fully integrate the complexity of relationships between the different entities for a particular query, client-side joins in application code may be used.

-
-
-

Examples of Data Modeling

-

As an example, a magazine data set consists of data for magazines with attributes such as magazine id, magazine name, publication frequency, publication date, and publisher. A basic query (Q1) for magazine data is to list all the magazine names including their publication frequency. As not all data attributes are needed for Q1 the data model would only consist of id ( for partition key), magazine name and publication frequency as shown in Figure 1.

-
-../_images/Figure_1_data_model.jpg -
-

Figure 1. Data Model for Q1

-

Another query (Q2) is to list all the magazine names by publisher. For Q2 the data model would consist of an additional attribute publisher for the partition key. The id would become the clustering key for sorting within a partition. Data model for Q2 is illustrated in Figure 2.

-
-../_images/Figure_2_data_model.jpg -
-

Figure 2. Data Model for Q2

-
-
-

Designing Schema

-

After the conceptual data model has been created a schema may be designed for a query. For Q1 the following schema may be used.

-
CREATE TABLE magazine_name (id int PRIMARY KEY, name text, publicationFrequency text)
-
-
-

For Q2 the schema definition would include a clustering key for sorting.

-
CREATE TABLE magazine_publisher (publisher text,id int,name text, publicationFrequency text,
-PRIMARY KEY (publisher, id)) WITH CLUSTERING ORDER BY (id DESC)
-
-
-
-
-

Data Model Analysis

-

The data model is a conceptual model that must be analyzed and optimized based on storage, capacity, redundancy and consistency. A data model may need to be modified as a result of the analysis. Considerations or limitations that are used in data model analysis include:

-
    -
  • Partition Size
  • -
  • Data Redundancy
  • -
  • Disk space
  • -
  • Lightweight Transactions (LWT)
  • -
-

The two measures of partition size are the number of values in a partition and partition size on disk. Though requirements for these measures may vary based on the application a general guideline is to keep number of values per partition to below 100,000 and disk space per partition to below 100MB.

-

Data redundancies as duplicate data in tables and multiple partition replicates are to be expected in the design of a data model , but nevertheless should be kept in consideration as a parameter to keep to the minimum. LWT transactions (compare-and-set, conditional update) could affect performance and queries using LWT should be kept to the minimum.

-
-
-

Using Materialized Views

-
-

Warning

-

Materialized views (MVs) are experimental in the latest (4.0) release.

-
-

Materialized views (MVs) could be used to implement multiple queries for a single table. A materialized view is a table built from data from another table, the base table, with new primary key and new properties. Changes to the base table data automatically add and update data in a MV. Different queries may be implemented using a materialized view as an MV’s primary key differs from the base table. Queries are optimized by the primary key definition.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/ci.html b/src/doc/4.0-alpha4/development/ci.html deleted file mode 100644 index 9c156c6dc..000000000 --- a/src/doc/4.0-alpha4/development/ci.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Jenkins CI Environment" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Jenkins CI Environment

-
-

About CI testing and Apache Cassandra

-

Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the dtest scripts written in Python. As outlined in Testing, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at builds.apache.org, running Jenkins.

-
-
-

Setting up your own Jenkins server

-

Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution.

-

Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment.

-
-

Required plugins

-

The following plugins need to be installed additionally to the standard plugins (git, ant, ..).

-

You can install any missing plugins through the install manager.

-

Go to Manage Jenkins -> Manage Plugins -> Available and install the following plugins and respective dependencies:

-
    -
  • Job DSL
  • -
  • Javadoc Plugin
  • -
  • description setter plugin
  • -
  • Throttle Concurrent Builds Plug-in
  • -
  • Test stability history
  • -
  • Hudson Post build task
  • -
-
-
-

Setup seed job

-

Config New Item

-
    -
  • Name it Cassandra-Job-DSL
  • -
  • Select Freestyle project
  • -
-

Under Source Code Management select Git using the repository: https://github.com/apache/cassandra-builds

-

Under Build, confirm Add build step -> Process Job DSLs and enter at Look on Filesystem: jenkins-dsl/cassandra_job_dsl_seed.groovy

-

Generated jobs will be created based on the Groovy script’s default settings. You may want to override settings by checking This project is parameterized and add String Parameter for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches).

-

When done, confirm “Save”

-

You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message “Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use”. Goto Manage Jenkins -> In-process Script Approval to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates.

-

Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label “cassandra”, once the job is to be run. Please make sure to make any executors available by selecting Build Executor Status -> Configure -> Add “cassandra” as label and save.

-

Executors need to have “JDK 1.8 (latest)” installed. This is done under Manage Jenkins -> Global Tool Configuration -> JDK Installations…. Executors also need to have the virtualenv package installed on their system.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/code_style.html b/src/doc/4.0-alpha4/development/code_style.html deleted file mode 100644 index fafdf665e..000000000 --- a/src/doc/4.0-alpha4/development/code_style.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/dependencies.html b/src/doc/4.0-alpha4/development/dependencies.html deleted file mode 100644 index 0b0584e9e..000000000 --- a/src/doc/4.0-alpha4/development/dependencies.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Dependency Management" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Dependency Management

-

Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the Jenkins CI Environment and reported related issues on Jira/ML, in case of any project dependency changes.

-

As Cassandra is an Apache product, all included libraries must follow Apache’s software license requirements.

-
-

Required steps to add or update libraries

-
    -
  • Add or replace jar file in lib directory
  • -
  • Add or update lib/license files
  • -
  • Update dependencies in build.xml
      -
    • Add to parent-pom with correct version
    • -
    • Add to all-pom if simple Cassandra dependency (see below)
    • -
    -
  • -
-
-
-

POM file types

-
    -
  • parent-pom - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here.
  • -
  • build-deps-pom(-sources) + coverage-deps-pom - used by ant build compile target. Listed dependenices will be resolved and copied to build/lib/{jar,sources} by executing the maven-ant-tasks-retrieve-build target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution.
  • -
  • test-deps-pom - refered by maven-ant-tasks-retrieve-test to retrieve and save dependencies to build/test/lib. Exclusively used during JUnit test execution.
  • -
  • all-pom - pom for cassandra-all.jar that can be installed or deployed to public maven repos via ant publish
  • -
-
-
-

Troubleshooting and conflict resolution

-

Here are some useful commands that may help you out resolving conflicts.

-
    -
  • ant realclean - gets rid of the build directory, including build artifacts.
  • -
  • mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ant mvn-install.
  • -
  • rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/ - removes cached local Cassandra maven artifacts
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/documentation.html b/src/doc/4.0-alpha4/development/documentation.html deleted file mode 100644 index 3d905535d..000000000 --- a/src/doc/4.0-alpha4/development/documentation.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Working on Documentation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Working on Documentation

-
-

How Cassandra is documented

-

The official Cassandra documentation lives in the project’s git repository. We use a static site generator, Sphinx, to create pages hosted at cassandra.apache.org. You’ll also find developer centric content about Cassandra internals in our retired wiki (not covered by this guide).

-

Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses reStructuredText for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at existing documents to get a better idea how we use reStructuredText to write our documents.

-

So how do you actually start making contributions?

-
-
-

GitHub based work flow

-

Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)

-

Follow these steps to contribute using GitHub. It’s assumed that you’re logged in with an existing account.

-
    -
  1. Fork the GitHub mirror of the Cassandra repository
  2. -
-../_images/docs_fork.png -
    -
  1. Create a new branch that you can use to make your edits. It’s recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work.
  2. -
-../_images/docs_create_branch.png -
    -
  1. Navigate to document sources doc/source to find the .rst file to edit. The URL of the document should correspond to the directory structure. New files can be created using the “Create new file” button:
  2. -
-../_images/docs_create_file.png -
    -
  1. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing .rst files to get a better idea what format elements to use.
  2. -
-../_images/docs_editor.png -

Make sure to preview added content before committing any changes.

-../_images/docs_preview.png -
    -
  1. Commit your work when you’re done. Make sure to add a short description of all your edits since the last time you committed before.
  2. -
-../_images/docs_commit.png -
    -
  1. Finally if you decide that you’re done working on your branch, it’s time to create a pull request!
  2. -
-../_images/docs_pr.png -

Afterwards the GitHub Cassandra mirror will list your pull request and you’re done. Congratulations! Please give us some time to look at your suggested changes before we get back to you.

-
-
-

Jira based work flow

-

Recommended for major changes

-

Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same contribution guides as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed.

-
-
-

Working on documents locally using Sphinx

-

Recommended for advanced editing

-

Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at doc/README.md. Setup is very easy (at least on OSX and Linux).

-
-
-

Notes for committers

-

Please feel free to get involved and merge pull requests created on the GitHub mirror if you’re a committer. As this is a read-only repository, you won’t be able to merge a PR directly on GitHub. You’ll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub.

-

You may use a git work flow like this:

-
git remote add github https://github.com/apache/cassandra.git
-git fetch github pull/<PR-ID>/head:<PR-ID>
-git checkout <PR-ID>
-
-
-

Now either rebase or squash the commit, e.g. for squashing:

-
git reset --soft origin/trunk
-git commit --author <PR Author>
-
-
-

Make sure to add a proper commit message including a “Closes #<PR-ID>” text to automatically close the PR.

-
-

Publishing

-

Details for building and publishing of the site at cassandra.apache.org can be found here.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/gettingstarted.html b/src/doc/4.0-alpha4/development/gettingstarted.html deleted file mode 100644 index 9d7337852..000000000 --- a/src/doc/4.0-alpha4/development/gettingstarted.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Getting Started" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Getting Started

-
-

Initial Contributions

-
-
Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we’d suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work.
-
    -
  • Add to or update the documentation
  • -
  • Answer questions on the user list
  • -
  • Review and test a submitted patch
  • -
  • Investigate and fix a reported bug
  • -
  • Create unit tests and d-tests
  • -
-
-
-
-
-

Updating documentation

-

The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (Contributing Code Changes).

-
-
-

Answering questions on the user list

-

Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the community page for details on how to subscribe to the mailing list.

-
-
-

Reviewing and testing a submitted patch

-

Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in _development_how_to_review or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, “I tested this performance enhacement on our application’s standard production load test and found a 3% improvement.”)

-
-
-

Investigate and/or fix a reported bug

-

Often, the hardest work in fixing a bug is reproducing it. Even if you don’t have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (Contributing Code Changes).

-
-
-

Create unit tests and Dtests

-

Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See Testing and Contributing Code Changes for more detail.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/how_to_commit.html b/src/doc/4.0-alpha4/development/how_to_commit.html deleted file mode 100644 index cd0deaee2..000000000 --- a/src/doc/4.0-alpha4/development/how_to_commit.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

-atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/how_to_review.html b/src/doc/4.0-alpha4/development/how_to_review.html deleted file mode 100644 index 138920887..000000000 --- a/src/doc/4.0-alpha4/development/how_to_review.html +++ /dev/null @@ -1,179 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/ide.html b/src/doc/4.0-alpha4/development/ide.html deleted file mode 100644 index 746e326a6..000000000 --- a/src/doc/4.0-alpha4/development/ide.html +++ /dev/null @@ -1,268 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-

-
-
-
-
-

Opening Cassandra in Apache NetBeans

-

Apache NetBeans is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans.

-
-

Open Cassandra as a Project (C* 4.0 and newer)

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Start Apache NetBeans
  2. -
  3. Open the NetBeans project from the ide/ folder of the checked out Cassandra directory using the menu item “Open Project…” in NetBeans’ File menu
  4. -
-

The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant build.xml script.

-
-
    -
  • Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu.
  • -
  • Profile Project is available via the Profile menu. In the opened Profiler tab, click the green “Profile” button.
  • -
  • Cassandra’s code style is honored in ide/nbproject/project.properties
  • -
-
-

The JAVA8_HOME system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute.

-
-

-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/index.html b/src/doc/4.0-alpha4/development/index.html deleted file mode 100644 index f2668b203..000000000 --- a/src/doc/4.0-alpha4/development/index.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contributing to Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- - -
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/patches.html b/src/doc/4.0-alpha4/development/patches.html deleted file mode 100644 index 7af9371a3..000000000 --- a/src/doc/4.0-alpha4/development/patches.html +++ /dev/null @@ -1,274 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue marked as Low Hanging Fruit Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or Slack.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - - - - -
VersionPolicy
4.0Code freeze (see below)
3.11Critical bug fixes only
3.0Critical bug fixes only
2.2Critical bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

4.0 Code Freeze

-

Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance:

-
-
    -
  • Bug fixes
  • -
  • Measurable performance improvements
  • -
  • Changes not distributed as part of the release such as:
  • -
  • Testing related improvements and fixes
  • -
  • Build and infrastructure related changes
  • -
  • Documentation
  • -
-
-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. Please note that only user-impacting items should be listed in CHANGES.txt. If you fix a test that does not affect users and does not require changes in runtime code, then no CHANGES.txt entry is necessary.

    -
    <One sentence description, usually Jira title and CHANGES.txt summary>
    -<Optional lengthier description>
    -patch by <Authors>; reviewed by <Reviewers> for CASSANDRA-#####
    -
    -
    -
  2. -
  3. When you’re happy with the result, create a patch:

    -
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/release_process.html b/src/doc/4.0-alpha4/development/release_process.html deleted file mode 100644 index 13aea177d..000000000 --- a/src/doc/4.0-alpha4/development/release_process.html +++ /dev/null @@ -1,370 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Release Process" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Release Process

- -
-

-

-
-
-

Attention

-
-
WORK IN PROGRESS
-
    -
  • A number of these steps still have been finalised/tested.
  • -
  • The use of people.apache.org needs to be replaced with svnpubsub and dist.apache.org
  • -
-
-
-
-

The steps for Release Managers to create, vote and publish releases for Apache Cassandra.

-

While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC can complete the process of publishing and announcing the release.

-
-

Prerequisites

-
-
Background docs
-
-
-
-

A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools.

-
-

Create and publish your GPG key

-

To create a GPG key, follow the guidelines. -Include your public key in:

-
https://dist.apache.org/repos/dist/release/cassandra/KEYS
-
-
-

Publish your GPG key in a PGP key server, such as MIT Keyserver.

-
-
-
-

Create Release Artifacts

-

Any committer can perform the following steps to create and call a vote on a proposed release.

-

Check that there are no open urgent jira tickets currently being worked on. Also check with a PMC that there’s security vulnerabilities currently being worked on in private.’ -Current project habit is to check the timing for a new release on the dev mailing lists.

-
-

Perform the Release

-

Run the following commands to generate and upload release artifacts, to a nexus staging repository and distribution location:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# Edit the variables at the top of `cassandra-builds/cassandra-release/prepare_release.sh`
-
-# After cloning cassandra-builds repo, the prepare_release.sh is run from the actual cassandra git checkout,
-# on the branch/commit that we wish to tag for the tentative release along with version number to tag.
-# For example here <version-branch> might be `3.11` and <version> `3.11.3`
-cd ~/git/cassandra/
-git checkout cassandra-<version-branch>
-../cassandra-builds/cassandra-release/prepare_release.sh -v <version>
-
-
-

If successful, take note of the email text output which can be used in the next section “Call for a Vote”.

-

After validating the uploaded artifacts in staging, increment the version number in Cassandra on the cassandra-<version-branch>

-
-
cd ~/git/cassandra/ -git checkout cassandra-<version-branch> -edit build.xml # update `<property name=”base.version” value=”…”/> ` -edit debian/changelog # add entry for new version -edit CHANGES.txt # add entry for new version -git commit -m “Update version to <next-version>” build.xml debian/changelog CHANGES.txt -git push
-
-
-
-

Call for a Vote

-

Fill out the following email template and send to the dev mailing list:

-
I propose the following artifacts for release as <version>.
-
-sha1: <git-sha>
-
-Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/<version>-tentative
-
-Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/org/apache/cassandra/apache-cassandra/<version>/
-
-Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/
-
-The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/
-
-The vote will be open for 72 hours (longer if needed).
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>-tentative
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>-tentative
-
-
-
-
-

Post-vote operations

-

Any PMC can perform the following steps to formalize and publish a successfully voted release.

-
-

Publish Artifacts

-

Run the following commands to publish the voted release artifacts:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-# edit the variables at the top of `finish_release.sh`
-
-# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout,
-# on the tentative release tag that we wish to tag for the final release version number tag.
-cd ~/git/cassandra/
-git checkout <version>-tentative
-../cassandra-builds/cassandra-release/finish_release.sh -v <version> <staging_number>
-
-
-

If successful, take note of the email text output which can be used in the next section “Send Release Announcement”. -The output will also list the next steps that are required. The first of these is to commit changes made to your https://dist.apache.org/repos/dist/release/cassandra/ checkout.

-
-
-

Promote Nexus Repository

-
-
    -
  • Login to Nexus repository again.
  • -
  • Click on “Staging” and then on the repository with id “cassandra-staging”.
  • -
  • Find your closed staging repository, right click on it and choose “Promote”.
  • -
  • Select the “Releases” repository and click “Promote”.
  • -
  • Next click on “Repositories”, select the “Releases” repository and validate that your artifacts exist as you expect them.
  • -
-
-
-
-

Sign and Upload Distribution Packages to Bintray

-

Run the following command:

-
cd ~/git
-# FIXME the next command is wrong while people.apache.org is used instead of svnpubsub and dist.apache.org
-svn mv https://dist.apache.org/repos/dist/dev/cassandra/<version> https://dist.apache.org/repos/dist/release/cassandra/
-
-# Create the yum metadata, sign the metadata, and sign some files within the signed repo metadata that the ASF sig tool errors out on
-svn co https://dist.apache.org/repos/dist/release/cassandra/redhat/ cassandra-dist-redhat
-cd cassandra-dist-redhat/<abbreviated-version>x/
-createrepo .
-gpg --detach-sign --armor repodata/repomd.xml
-for f in `find repodata/ -name *.bz2`; do
-  gpg --detach-sign --armor $f;
-done
-
-svn co https://dist.apache.org/repos/dist/release/cassandra/<version> cassandra-dist-<version>
-cd cassandra-dist-<version>
-cassandra-build/cassandra-release/upload_bintray.sh cassandra-dist-<version>
-
-
-
-
-

Update and Publish Website

-

See `docs https://svn.apache.org/repos/asf/cassandra/site/src/README`_ for building and publishing the website. -Also update the CQL doc if appropriate.

-
-
-

Release version in JIRA

-

Release the JIRA version.

-
-
    -
  • In JIRA go to the version that you want to release and release it.
  • -
  • Create a new version, if it has not been done before.
  • -
-
-
-
-

Update to Next Development Version

-

Edit and commit build.xml so the base.version property points to the next version.

-
- -
-

Send Release Announcement

-

Fill out the following email template and send to both user and dev mailing lists:

-
The Cassandra team is pleased to announce the release of Apache Cassandra version <version>.
-
-Apache Cassandra is a fully distributed database. It is the right choice
-when you need scalability and high availability without compromising
-performance.
-
- http://cassandra.apache.org/
-
-Downloads of source and binary distributions are listed in our download
-section:
-
- http://cassandra.apache.org/download/
-
-This version is <the first|a bug fix> release[1] on the <version-base> series. As always,
-please pay attention to the release notes[2] and let us know[3] if you
-were to encounter any problem.
-
-Enjoy!
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>
-[3]: https://issues.apache.org/jira/browse/CASSANDRA
-
-
-
-
-

Update Slack Cassandra topic

-
-
Update topic in cassandra Slack room
-
/topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don’t ask to ask
-
-
-
-

Tweet from @Cassandra

-

Tweet the new release, from the @Cassandra account

-
-
-

Delete Old Releases

-

As described in When to Archive. -Also check people.apache.org as previous release scripts used it.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/development/testing.html b/src/doc/4.0-alpha4/development/testing.html deleted file mode 100644 index 30d8cb3bb..000000000 --- a/src/doc/4.0-alpha4/development/testing.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-

If you see an error like this:

-
Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found:
-org/apache/tools/ant/taskdefs/optional/junit/JUnitTask  using the classloader
-AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar]
-
-
-

You will need to install the ant-optional package since it contains the JUnitTask class.

-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

See Cassandra Stress

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/faq/index.html b/src/doc/4.0-alpha4/faq/index.html deleted file mode 100644 index af8203522..000000000 --- a/src/doc/4.0-alpha4/faq/index.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair -full to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it. Note that you will need to run a full repair (-full) to make sure that already repaired -sstables are not skipped.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/genindex.html b/src/doc/4.0-alpha4/genindex.html deleted file mode 100644 index b1861e687..000000000 --- a/src/doc/4.0-alpha4/genindex.html +++ /dev/null @@ -1,95 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/configuring.html b/src/doc/4.0-alpha4/getting_started/configuring.html deleted file mode 100644 index a72af56f0..000000000 --- a/src/doc/4.0-alpha4/getting_started/configuring.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the default configuration file present at ./conf/cassandra.yaml is enough, -you shouldn’t need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/drivers.html b/src/doc/4.0-alpha4/getting_started/drivers.html deleted file mode 100644 index f06c7cd3e..000000000 --- a/src/doc/4.0-alpha4/getting_started/drivers.html +++ /dev/null @@ -1,248 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
- -
-

Elixir

- -
-
-

Dart

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/index.html b/src/doc/4.0-alpha4/getting_started/index.html deleted file mode 100644 index 011d43672..000000000 --- a/src/doc/4.0-alpha4/getting_started/index.html +++ /dev/null @@ -1,165 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/installing.html b/src/doc/4.0-alpha4/getting_started/installing.html deleted file mode 100644 index ff9b1a9e5..000000000 --- a/src/doc/4.0-alpha4/getting_started/installing.html +++ /dev/null @@ -1,388 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-

These are the instructions for deploying the supported releases of Apache Cassandra on Linux servers.

-

Cassandra runs on a wide array of Linux distributions including (but not limited to):

-
    -
  • Ubuntu, most notably LTS releases 16.04 to 18.04
  • -
  • CentOS & RedHat Enterprise Linux (RHEL) including 6.6 to 7.7
  • -
  • Amazon Linux AMIs including 2016.09 through to Linux 2
  • -
  • Debian versions 8 & 9
  • -
  • SUSE Enterprise Linux 12
  • -
-

This is not an exhaustive list of operating system platforms, nor is it prescriptive. However users will be -well-advised to conduct exhaustive tests of their own particularly for less-popular distributions of Linux. -Deploying on older versions is not recommended unless you have previous experience with the older distribution -in a production environment.

-
-

Prerequisites

-
    -
  • Install the latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • NOTE: Experimental support for Java 11 was added in Cassandra 4.0 (CASSANDRA-9608). -Running Cassandra on Java 11 is experimental. Do so at your own risk. For more information, see -NEWS.txt.
  • -
  • For using cqlsh, the latest version of Python 2.7 or Python 3.6+. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Choosing an installation method

-

For most users, installing the binary tarball is the simplest choice. The tarball unpacks all its contents -into a single location with binaries and configuration files located in their own subdirectories. The most -obvious attribute of the tarball installation is it does not require root permissions and can be -installed on any Linux distribution.

-

Packaged installations require root permissions. Install the RPM build on CentOS and RHEL-based -distributions if you want to install Cassandra using YUM. Install the Debian build on Ubuntu and other -Debian-based distributions if you want to install Cassandra using APT. Note that both the YUM and APT -methods required root permissions and will install the binaries and configuration files as the -cassandra OS user.

-
-
-

Installing the binary tarball

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10)
-OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode)
-
-
-
    -
  1. Download the binary tarball from one of the mirrors on the Apache Cassandra Download -site. For example, to download 4.0:
  2. -
-
$ curl -OL http://apache.mirror.digitalpacific.com.au/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz
-
-
-

NOTE: The mirrors only host the latest versions of each major supported release. To download an earlier -version of Cassandra, visit the Apache Archives.

-
    -
  1. OPTIONAL: Verify the integrity of the downloaded tarball using one of the methods here. -For example, to verify the hash of the downloaded file using GPG:
  2. -
-
$ gpg --print-md SHA256 apache-cassandra-4.0.0-bin.tar.gz
-apache-cassandra-4.0.0-bin.tar.gz: 28757DDE 589F7041 0F9A6A95 C39EE7E6
-                                   CDE63440 E2B06B91 AE6B2006 14FA364D
-
-
-

Compare the signature with the SHA256 file from the Downloads site:

-
$ curl -L https://downloads.apache.org/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz.sha256
-28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d
-
-
-
    -
  1. Unpack the tarball:
  2. -
-
$ tar xzvf apache-cassandra-4.0.0-bin.tar.gz
-
-
-

The files will be extracted to the apache-cassandra-4.0.0/ directory. This is the tarball installation -location.

-
    -
  1. Located in the tarball installation location are the directories for the scripts, binaries, utilities, configuration, data and log files:
  2. -
-
<tarball_installation>/
-    bin/
-    conf/
-    data/
-    doc/
-    interface/
-    javadoc/
-    lib/
-    logs/
-    pylib/
-    tools/
-
-
-

For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Start Cassandra:
  2. -
-
$ cd apache-cassandra-4.0.0/
-$ bin/cassandra
-
-
-

NOTE: This will run Cassandra as the authenticated Linux user.

-

You can monitor the progress of the startup with:

-
$ tail -f logs/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-
    -
  1. Check the status of Cassandra:
  2. -
-
$ bin/nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ bin/cqlsh
-
-
-
-
-

Installing the Debian packages

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10)
-OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode)
-
-
-
    -
  1. Add the Apache repository of Cassandra to the file cassandra.sources.list. The latest major version -is 4.0 and the corresponding distribution name is 40x (with an “x” as the suffix). -For older releases use 311x for C* 3.11 series, 30x for 3.0, 22x for 2.2 and 21x for 2.1. -For example, to add the repository for version 4.0 (40x):
  2. -
-
$ echo "deb http://www.apache.org/dist/cassandra/debian 40x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-deb http://www.apache.org/dist/cassandra/debian 40x main
-
-
-
    -
  1. Add the Apache Cassandra repository keys to the list of trusted keys on the server:
  2. -
-
$ curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-100  266k  100  266k    0     0   320k      0 --:--:-- --:--:-- --:--:--  320k
-OK
-
-
-
    -
  1. Update the package index from sources:
  2. -
-
$ sudo apt-get update
-
-
-
    -
  1. Install Cassandra with APT:
  2. -
-
$ sudo apt-get install cassandra
-
-
-

NOTE: A new Linux user cassandra will get created as part of the installation. The Cassandra service -will also be run as this user.

-
    -
  1. The Cassandra service gets started automatically after installation. Monitor the progress of -the startup with:
  2. -
-
$ tail -f /var/log/cassandra/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-

NOTE: For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Check the status of Cassandra:
  2. -
-
$ nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ cqlsh
-
-
-
-
-

Installing the RPM packages

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_232-b09)
-OpenJDK 64-Bit Server VM (build 25.232-b09, mixed mode)
-
-
-
    -
  1. Add the Apache repository of Cassandra to the file /etc/yum.repos.d/cassandra.repo (as the root -user). The latest major version is 4.0 and the corresponding distribution name is 40x (with an “x” as the suffix). -For older releases use 311x for C* 3.11 series, 30x for 3.0, 22x for 2.2 and 21x for 2.1. -For example, to add the repository for version 4.0 (40x):
  2. -
-
[cassandra]
-name=Apache Cassandra
-baseurl=https://downloads.apache.org/cassandra/redhat/40x/
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://downloads.apache.org/cassandra/KEYS
-
-
-
    -
  1. Update the package index from sources:
  2. -
-
$ sudo yum update
-
-
-
    -
  1. Install Cassandra with YUM:
  2. -
-
$ sudo yum install cassandra
-
-
-

NOTE: A new Linux user cassandra will get created as part of the installation. The Cassandra service -will also be run as this user.

-
    -
  1. Start the Cassandra service:
  2. -
-
$ sudo service cassandra start
-
-
-
    -
  1. Monitor the progress of the startup with:
  2. -
-
$ tail -f /var/log/cassandra/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-

NOTE: For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Check the status of Cassandra:
  2. -
-
$ nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ cqlsh
-
-
-
-
-

Further installation info

-

For help with installation issues, see the Troubleshooting section.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/production.html b/src/doc/4.0-alpha4/getting_started/production.html deleted file mode 100644 index 782629951..000000000 --- a/src/doc/4.0-alpha4/getting_started/production.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Production Recommendations" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Production Recommendations

-

The cassandra.yaml and jvm.options files have a number of notes and recommendations for production usage. This page -expands on some of the notes in these files with additional information.

-
-

Tokens

-

Using more than 1 token (referred to as vnodes) allows for more flexible expansion and more streaming peers when -bootstrapping new nodes into the cluster. This can limit the negative impact of streaming (I/O and CPU overhead) -as well as allow for incremental cluster expansion.

-

As a tradeoff, more tokens will lead to sharing data with more peers, which can result in decreased availability. To learn more about this we -recommend reading this paper.

-

The number of tokens can be changed using the following setting:

-

num_tokens: 16

-

Here are the most common token counts with a brief explanation of when and why you would use each one.

- ---- - - - - - - - - - - - - - - - - -
Token CountDescription
1Maximum availablility, maximum cluster size, fewest peers, -but inflexible expansion. Must always -double size of cluster to expand and remain balanced.
4A healthy mix of elasticity and availability. Recommended for clusters which will eventually -reach over 30 nodes. Requires adding approximately 20% more nodes to remain balanced. -Shrinking a cluster may result in cluster imbalance.
16Best for heavily elastic clusters which expand and shrink regularly, but may have issues -availability with larger clusters. Not recommended for clusters over 50 nodes.
-

In addition to setting the token count, it’s extremely important that allocate_tokens_for_local_replication_factor be -set as well, to ensure even token allocation.

-
-
-

Read Ahead

-

Read ahead is an operating system feature that attempts to keep as much data loaded in the page cache as possible. The -goal is to decrease latency by using additional throughput on reads where the latency penalty is high due to seek times -on spinning disks. By leveraging read ahead, the OS can pull additional data into memory without the cost of additional -seeks. This works well when available RAM is greater than the size of the hot dataset, but can be problematic when the -hot dataset is much larger than available RAM. The benefit of read ahead decreases as the size of your hot dataset gets -bigger in proportion to available memory.

-

With small partitions (usually tables with no partition key, but not limited to this case) and solid state drives, read -ahead can increase disk usage without any of the latency benefits, and in some cases can result in up to -a 5x latency and throughput performance penalty. Read heavy, key/value tables with small (under 1KB) rows are especially -prone to this problem.

-

We recommend the following read ahead settings:

- ---- - - - - - - - - - - - - - -
HardwareInitial Recommendation
Spinning Disks64KB
SSD4KB
-

Read ahead can be adjusted on Linux systems by using the blockdev tool.

-

For example, we can set read ahead of ``/dev/sda1` to 4KB by doing the following:

-
blockdev --setra 8 /dev/sda1
-
-
-

Note: blockdev accepts the number of 512 byte sectors to read ahead. The argument of 8 above is equivilent to 4KB.

-

Since each system is different, use the above recommendations as a starting point and tuning based on your SLA and -throughput requirements. To understand how read ahead impacts disk resource usage we recommend carefully reading through the -troubleshooting portion of the documentation.

-
-
-

Compression

-

Compressed data is stored by compressing fixed size byte buffers and writing the data to disk. The buffer size is -determined by the chunk_length_in_kb element in the compression map of the schema settings.

-

The default setting is 16KB starting with Cassandra 4.0.

-

Since the entire compressed buffer must be read off disk, using too high of a compression chunk length can lead to -significant overhead when reading small records. Combined with the default read ahead setting this can result in massive -read amplification for certain workloads.

-

LZ4Compressor is the default and recommended compression algorithm.

-

There is additional information on this topic on The Last Pickle Blog.

-
-
-

Compaction

-

There are different compaction strategies available for different workloads. -We recommend reading up on the different strategies to understand which is the best for your environment. Different tables -may (and frequently do) use different compaction strategies on the same cluster.

-
-
-

Encryption

-

It is significantly easier to set up peer to peer encryption and client server encryption when setting up your production -cluster as opposed to setting it up once the cluster is already serving production traffic. If you are planning on using network encryption -eventually (in any form), we recommend setting it up now. Changing these configurations down the line is not impossible, -but mistakes can result in downtime or data loss.

-
-
-

Ensure Keyspaces are Created with NetworkTopologyStrategy

-

Production clusters should never use SimpleStrategy. Production keyspaces should use the NetworkTopologyStrategy (NTS).

-

For example:

-
create KEYSPACE mykeyspace WITH replication =
-{'class': 'NetworkTopologyStrategy', 'datacenter1': 3};
-
-
-

NetworkTopologyStrategy allows Cassandra to take advantage of multiple racks and data centers.

-
-
-

Configure Racks and Snitch

-

Correctly configuring or changing racks after a cluster has been provisioned is an unsupported process. Migrating from -a single rack to multiple racks is also unsupported and can result in data loss.

-

Using GossipingPropertyFileSnitch is the most flexible solution for on premise or mixed cloud environments. Ec2Snitch -is reliable for AWS EC2 only environments.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/getting_started/querying.html b/src/doc/4.0-alpha4/getting_started/querying.html deleted file mode 100644 index 4083b7f72..000000000 --- a/src/doc/4.0-alpha4/getting_started/querying.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/index.html b/src/doc/4.0-alpha4/index.html deleted file mode 100644 index d12078972..000000000 --- a/src/doc/4.0-alpha4/index.html +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v4.0-alpha4

- -
This documentation is a work-in-progress. - Contributions are welcome.
- -

Main documentation

- -
-
-
- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/4.0-alpha4/new/auditlogging.html b/src/doc/4.0-alpha4/new/auditlogging.html deleted file mode 100644 index 1d0f81a0e..000000000 --- a/src/doc/4.0-alpha4/new/auditlogging.html +++ /dev/null @@ -1,549 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Audit Logging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit Logging is a new feature in Apache Cassandra 4.0 (CASSANDRA-12151). All database activity is logged to a directory in the local filesystem and the audit log files are rolled periodically. All database operations are monitored and recorded. Audit logs are stored in local directory files instead of the database itself as it provides several benefits, some of which are:

-
    -
  • No additional database capacity is needed to store audit logs
  • -
  • No query tool is required while storing the audit logs in the database would require a query tool
  • -
  • Latency of database operations is not affected; no performance impact
  • -
  • It is easier to implement file based logging than database based logging
  • -
-
-

What does Audit Logging Log?

-

Audit logging logs:

-
    -
  1. All authentication which includes successful and failed login attempts
  2. -
  3. All database command requests to CQL. Both failed and successful CQL is logged
  4. -
-

More specifically an audit log entry could be one of two types:

-
    -
  1. CQL Audit Log Entry Type or
  2. -
  3. Common Audit Log Entry Type
  4. -
-

Each of these types comprises of several database operations. The CQL Audit Log Entry Type could be one of the following; the category of the CQL audit log entry type is listed in parentheses.

-
    -
  1. SELECT(QUERY),
  2. -
  3. UPDATE(DML),
  4. -
  5. DELETE(DML),
  6. -
  7. TRUNCATE(DDL),
  8. -
  9. CREATE_KEYSPACE(DDL),
  10. -
  11. ALTER_KEYSPACE(DDL),
  12. -
  13. DROP_KEYSPACE(DDL),
  14. -
  15. CREATE_TABLE(DDL),
  16. -
  17. DROP_TABLE(DDL),
  18. -
  19. PREPARE_STATEMENT(PREPARE),
  20. -
  21. DROP_TRIGGER(DDL),
  22. -
  23. LIST_USERS(DCL),
  24. -
  25. CREATE_INDEX(DDL),
  26. -
  27. DROP_INDEX(DDL),
  28. -
  29. GRANT(DCL),
  30. -
  31. REVOKE(DCL),
  32. -
  33. CREATE_TYPE(DDL),
  34. -
  35. DROP_AGGREGATE(DDL),
  36. -
  37. ALTER_VIEW(DDL),
  38. -
  39. CREATE_VIEW(DDL),
  40. -
  41. DROP_ROLE(DCL),
  42. -
  43. CREATE_FUNCTION(DDL),
  44. -
  45. ALTER_TABLE(DDL),
  46. -
  47. BATCH(DML),
  48. -
  49. CREATE_AGGREGATE(DDL),
  50. -
  51. DROP_VIEW(DDL),
  52. -
  53. DROP_TYPE(DDL),
  54. -
  55. DROP_FUNCTION(DDL),
  56. -
  57. ALTER_ROLE(DCL),
  58. -
  59. CREATE_TRIGGER(DDL),
  60. -
  61. LIST_ROLES(DCL),
  62. -
  63. LIST_PERMISSIONS(DCL),
  64. -
  65. ALTER_TYPE(DDL),
  66. -
  67. CREATE_ROLE(DCL),
  68. -
  69. USE_KEYSPACE (OTHER).
  70. -
-

The Common Audit Log Entry Type could be one of the following; the category of the Common audit log entry type is listed in parentheses.

-
    -
  1. REQUEST_FAILURE(ERROR),
  2. -
  3. LOGIN_ERROR(AUTH),
  4. -
  5. UNAUTHORIZED_ATTEMPT(AUTH),
  6. -
  7. LOGIN_SUCCESS (AUTH).
  8. -
-
-
-

What Audit Logging does not Log?

-

Audit logging does not log:

-
    -
  1. Configuration changes made in cassandra.yaml
  2. -
  3. Nodetool Commands
  4. -
-
-
-

Audit Logging is Flexible and Configurable

-

Audit logging is flexible and configurable in cassandra.yaml as follows:

-
    -
  • Keyspaces and tables to be monitored and audited may be specified.
  • -
  • Users to be included/excluded may be specified. By default all users are audit logged.
  • -
  • Categories of operations to audit or exclude may be specified.
  • -
  • The frequency at which to roll the log files may be specified. Default frequency is hourly.
  • -
-
-
-

Configuring Audit Logging

-

Audit Logging is configured on each node separately. Audit Logging is configured in cassandra.yaml in the audit_logging_options setting. -The settings may be same/different on each node.

-
-

Enabling Audit Logging

-

Audit logging is enabled by setting the enabled option to true in the audit_logging_options setting.

-
audit_logging_options:
-   enabled: true
-
-
-
-
-

Setting the Logger

-

The audit logger is set with the logger option.

-
logger: BinAuditLogger
-
-
-

Two types of audit loggers are supported: FileAuditLogger and BinAuditLogger. -BinAuditLogger is the default setting. The BinAuditLogger is an efficient way to log events to file in a binary format.

-

FileAuditLogger is synchronous, file-based audit logger; just uses the standard logging mechanism. FileAuditLogger logs events to audit/audit.log file using slf4j logger.

-

The NoOpAuditLogger is a No-Op implementation of the audit logger to be used as a default audit logger when audit logging is disabled.

-
-
-

Setting the Audit Logs Directory

-

The audit logs directory is set with the audit_logs_dir option. A new directory is not created automatically and an existing directory must be set. Audit Logs directory can be configured using cassandra.logdir.audit system property or default is set to cassandra.logdir + /audit/. A user created directory may be set. As an example, create a directory for the audit logs and set its permissions.

-
sudo mkdir –p  /cassandra/audit/logs/hourly
-sudo chmod -R 777 /cassandra/audit/logs/hourly
-
-
-

Set the directory for the audit logs directory using the audit_logs_dir option.

-
audit_logs_dir: "/cassandra/audit/logs/hourly"
-
-
-
-
-

Setting Keyspaces to Audit

-

Set the keyspaces to include with the included_keyspaces option and the keyspaces to exclude with the excluded_keyspaces option. By default all keyspaces are included. By default, system, system_schema and system_virtual_schema are excluded.

-
# included_keyspaces:
-# excluded_keyspaces: system, system_schema, system_virtual_schema
-
-
-
-
-

Setting Categories to Audit

-

The categories of database operations to be included are specified with the included_categories option as a comma separated list. By default all supported categories are included. The categories of database operations to be excluded are specified with excluded_categories option as a comma separated list. By default no category is excluded.

-
# included_categories:
-# excluded_categories:
-
-
-

The supported categories for audit log are:

-
    -
  1. QUERY
  2. -
  3. DML
  4. -
  5. DDL
  6. -
  7. DCL
  8. -
  9. OTHER
  10. -
  11. AUTH
  12. -
  13. ERROR
  14. -
  15. PREPARE
  16. -
-
-
-

Setting Users to Audit

-

Users to audit log are set with the included_users and excluded_users options. The included_users option specifies a comma separated list of users to include explicitly and by default all users are included. The excluded_users option specifies a comma separated list of users to exclude explicitly and by default no user is excluded.

-
# included_users:
-# excluded_users:
-
-
-
-
-

Setting the Roll Frequency

-

The roll_cycle option sets the frequency at which the audit log file is rolled. Supported values are MINUTELY, HOURLY, and DAILY. Default value is HOURLY, which implies that after every hour a new audit log file is created.

-
roll_cycle: HOURLY
-
-
-

An audit log file could get rolled for other reasons as well such as a log file reaches the configured size threshold.

-
-
-

Setting Archiving Options

-

The archiving options are for archiving the rolled audit logs. The archive command to use is set with the archive_command option and the max_archive_retries sets the maximum # of tries of failed archive commands.

-
# archive_command:
-# max_archive_retries: 10
-
-
-

Default archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:

-
-
-

Other Settings

-

The other audit logs settings are as follows.

-
# block: true
-# max_queue_weight: 268435456 # 256 MiB
-# max_log_size: 17179869184 # 16 GiB
-
-
-

The block option specifies whether the audit logging should block if the logging falls behind or should drop log records.

-

The max_queue_weight option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping.

-

The max_log_size option sets the maximum size of the rolled files to retain on disk before deleting the oldest.

-
-
-
-

Using Nodetool to Enable Audit Logging

-

The nodetool  enableauditlog command may be used to enable audit logs and it overrides the settings in cassandra.yaml. The nodetool enableauditlog command syntax is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-        [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-        [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-        [(-u <username> | --username <username>)] enableauditlog
-        [--excluded-categories <excluded_categories>]
-        [--excluded-keyspaces <excluded_keyspaces>]
-        [--excluded-users <excluded_users>]
-        [--included-categories <included_categories>]
-        [--included-keyspaces <included_keyspaces>]
-        [--included-users <included_users>] [--logger <logger>]
-
-
-
-
OPTIONS
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
---excluded-categories <excluded_categories>
 Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
---excluded-keyspaces <excluded_keyspaces>
 Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used
---excluded-users <excluded_users>
 Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
--h <host>, --host <host>
 Node hostname or ip address
---included-categories <included_categories>
 Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
---included-keyspaces <included_keyspaces>
 Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
---included-users <included_users>
 Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
---logger <logger>
 Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
--p <port>, --port <port>
 Remote jmx agent port number
--pp, --print-port
 Operate in 4.0 mode with hosts disambiguated by port number
-
-
-pw <password>, –password <password>
-
Remote jmx agent password
-
-pwf <passwordFilePath>, –password-file <passwordFilePath>
-
Path to the JMX password file
-
- --- - - - - -
--u <username>, --username <username>
 Remote jmx agent username
-
-
-

The nodetool disableauditlog command disables audit log. The command syntax is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-        [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-        [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-        [(-u <username> | --username <username>)] disableauditlog
-
-
-
-
OPTIONS
-
--- - - - - - - - - - - -
--h <host>, --host <host>
 Node hostname or ip address
--p <port>, --port <port>
 Remote jmx agent port number
--pp, --print-port
 Operate in 4.0 mode with hosts disambiguated by port number
-
-
-pw <password>, –password <password>
-
Remote jmx agent password
-
-pwf <passwordFilePath>, –password-file <passwordFilePath>
-
Path to the JMX password file
-
- --- - - - - -
--u <username>, --username <username>
 Remote jmx agent username
-
-
-
-
-

Viewing the Audit Logs

-

An audit log event comprises of a keyspace that is being audited, the operation that is being logged, the scope and the user. An audit log entry comprises of the following attributes concatenated with a “|”.

-
type (AuditLogEntryType): Type of request
-source (InetAddressAndPort): Source IP Address from which request originated
-user (String): User name
-timestamp (long ): Timestamp of the request
-batch (UUID): Batch of request
-keyspace (String): Keyspace on which request is made
-scope (String): Scope of request such as Table/Function/Aggregate name
-operation (String): Database operation such as CQL command
-options (QueryOptions): CQL Query options
-state (QueryState): State related to a given query
-
-
-

Some of these attributes may not be applicable to a given request and not all of these options must be set.

-
-
-

An Audit Logging Demo

-

To demonstrate audit logging enable and configure audit logs with following settings.

-
audit_logging_options:
-   enabled: true
-   logger: BinAuditLogger
-   audit_logs_dir: "/cassandra/audit/logs/hourly"
-   # included_keyspaces:
-   # excluded_keyspaces: system, system_schema, system_virtual_schema
-   # included_categories:
-   # excluded_categories:
-   # included_users:
-   # excluded_users:
-   roll_cycle: HOURLY
-   # block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

Create the audit log directory /cassandra/audit/logs/hourly and set its permissions as discussed earlier. Run some CQL commands such as create a keyspace, create a table and query a table. Any supported CQL commands may be run as discussed in section What does Audit Logging Log?. Change directory (with cd command) to the audit logs directory.

-
cd /cassandra/audit/logs/hourly
-
-
-

List the files/directories and some .cq4 files should get listed. These are the audit logs files.

-
[ec2-user@ip-10-0-2-238 hourly]$ ls -l
-total 28
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 03:01 20190802-02.cq4
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 03:01 20190802-03.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 03:01 directory-listing.cq4t
-
-
-

The auditlogviewer tool is used to dump audit logs. Run the auditlogviewer tool. Audit log files directory path is a required argument. The output should be similar to the following output.

-
[ec2-user@ip-10-0-2-238 hourly]$ auditlogviewer /cassandra/audit/logs/hourly
-WARN  03:12:11,124 Using Pauser.sleepy() as not enough processors, have 2, needs 8+
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427328|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE AuditLogKeyspace;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427329|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE "auditlogkeyspace"
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711446279|type :SELECT|category:QUERY|ks:auditlogkeyspace|scope:t|operation:SELECT * FROM t;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564713878834|type :DROP_TABLE|category:DDL|ks:auditlogkeyspace|scope:t|operation:DROP TABLE IF EXISTS
-AuditLogKeyspace.t;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42382|timestamp:1564714618360|ty
-pe:REQUEST_FAILURE|category:ERROR|operation:CREATE KEYSPACE AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};; Cannot add
-existing keyspace "auditlogkeyspace"
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714690968|type :DROP_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:DROP KEYSPACE AuditLogKeyspace;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42406|timestamp:1564714708329|ty pe:CREATE_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:CREATE KEYSPACE
-AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714870678|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE auditlogkeyspace;
-[ec2-user@ip-10-0-2-238 hourly]$
-
-
-

The auditlogviewer tool usage syntax is as follows.

-
./auditlogviewer
-Audit log files directory path is a required argument.
-usage: auditlogviewer <path1> [<path2>...<pathN>] [options]
---
-View the audit log contents in human readable format
---
-Options are:
--f,--follow       Upon reaching the end of the log continue indefinitely
-                  waiting for more records
--h,--help         display this help message
--r,--roll_cycle   How often to roll the log file was rolled. May be
-                  necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY,
-                  DAILY). Default HOURLY.
-
-
-
-
-

Diagnostic events for user audit logging

-

Any native transport enabled client is able to subscribe to diagnostic events that are raised around authentication and CQL operations. These events can then be consumed and used by external tools to implement a Cassandra user auditing solution.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/fqllogging.html b/src/doc/4.0-alpha4/new/fqllogging.html deleted file mode 100644 index 4e344e009..000000000 --- a/src/doc/4.0-alpha4/new/fqllogging.html +++ /dev/null @@ -1,721 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Full Query Logging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Full Query Logging

-

Apache Cassandra 4.0 adds a new feature to support a means of logging all queries as they were invoked (CASSANDRA-13983). For correctness testing it’s useful to be able to capture production traffic so that it can be replayed against both the old and new versions of Cassandra while comparing the results.

-

Cassandra 4.0 includes an implementation of a full query logging (FQL) that uses chronicle-queue to implement a rotating log of queries. Some of the features of FQL are:

-
    -
  • Single thread asynchronously writes log entries to disk to reduce impact on query latency
  • -
  • Heap memory usage bounded by a weighted queue with configurable maximum weight sitting in front of logging thread
  • -
  • If the weighted queue is full producers can be blocked or samples can be dropped
  • -
  • Disk utilization is bounded by deleting old log segments once a configurable size is reached
  • -
  • The on disk serialization uses a flexible schema binary format (chronicle-wire) making it easy to skip unrecognized fields, add new ones, and omit old ones.
  • -
  • Can be enabled and configured via JMX, disabled, and reset (delete on disk data), logging path is configurable via both JMX and YAML
  • -
  • Introduce new fqltool in /bin that currently implements Dump which can dump in a readable format full query logs as well as follow active full query logs. FQL Replay and Compare are also available.
  • -
-

Cassandra 4.0 has a binary full query log based on Chronicle Queue that can be controlled using nodetool enablefullquerylog, disablefullquerylog, and resetfullquerylog. The log contains all queries invoked, approximate time they were invoked, any parameters necessary to bind wildcard values, and all query options. A readable version of the log can be dumped or tailed using the new bin/fqltool utility. The full query log is designed to be safe to use in production and limits utilization of heap memory and disk space with limits you can specify when enabling the log.

-
-

Objective

-

Full Query Logging logs all requests to the CQL interface. The full query logs could be used for debugging, performance benchmarking, testing and auditing CQL queries. The audit logs also include CQL requests but full query logging is dedicated to CQL requests only with features such as FQL Replay and FQL Compare that are not available in audit logging.

-
-
-

Full Query Logger

-

The Full Query Logger is a logger that logs entire query contents after the query finishes. FQL only logs the queries that successfully complete. The other queries (e.g. timed out, failed) are not to be logged. Queries are logged in one of two modes: single query or batch of queries. The log for an invocation of a batch of queries includes the following attributes:

-
type - The type of the batch
-queries - CQL text of the queries
-values - Values to bind to as parameters for the queries
-queryOptions - Options associated with the query invocation
-queryState - Timestamp state associated with the query invocation
-batchTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked
-
-
-

The log for single CQL query includes the following attributes:

-
query - CQL query text
-queryOptions - Options associated with the query invocation
-queryState - Timestamp state associated with the query invocation
-queryTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked
-
-
-

Full query logging is backed up by BinLog. BinLog is a quick and dirty binary log. Its goal is good enough performance, predictable footprint, simplicity in terms of implementation and configuration and most importantly minimal impact on producers of log records. Performance safety is accomplished by feeding items to the binary log using a weighted queue and dropping records if the binary log falls sufficiently far behind. Simplicity and good enough performance is achieved by using a single log writing thread as well as Chronicle Queue to handle writing the log, making it available for readers, as well as log rolling.

-

Weighted queue is a wrapper around any blocking queue that turns it into a blocking weighted queue. The queue will weigh each element being added and removed. Adding to the queue is blocked if adding would violate the weight bound. If an element weighs in at larger than the capacity of the queue then exactly one such element will be allowed into the queue at a time. If the weight of an object changes after it is added it could create issues. Checking weight should be cheap so memorize expensive to compute weights. If weight throws that can also result in leaked permits so it’s always a good idea to memorize weight so it doesn’t throw. In the interests of not writing unit tests for methods no one uses there is a lot of UnsupportedOperationException. If you need them then add them and add proper unit tests to WeightedQueueTest. “Good” tests. 100% coverage including exception paths and resource leaks.

-

The FQL tracks information about store files:

-
    -
  • Store files as they are added and their storage impact. Delete them if over storage limit.
  • -
  • The files in the chronicle queue that have already rolled
  • -
  • The number of bytes in store files that have already rolled
  • -
-

FQL logger sequence is as follows:

-
    -
  1. Start the consumer thread that writes log records. Can only be done once.
  2. -
  3. Offer a record to the log. If the in memory queue is full the record will be dropped and offer will return false.
  4. -
  5. Put a record into the log. If the in memory queue is full the putting thread will be blocked until there is space or it is interrupted.
  6. -
  7. Clean up the buffers on thread exit, finalization will check again once this is no longer reachable ensuring there are no stragglers in the queue.
  8. -
  9. Stop the consumer thread that writes log records. Can be called multiple times.
  10. -
-

Next, we shall demonstrate full query logging with an example.

-
-
-

Configuring Full Query Logging

-

Full Query Logger default options are configured on a per node basis in cassandra.yaml with following configuration property.

-
full_query_logging_options:
-
-
-

As an example setup create a three node Cassandra 4.0 cluster. The nodetool status command lists the nodes in the cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool status
-Datacenter: us-east-1
-=====================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  AddressLoad   Tokens  Owns (effective)  Host ID Rack
-UN  10.0.1.115  442.42 KiB  25632.6%   b64cb32a-b32a-46b4-9eeb-e123fa8fc287  us-east-1b
-UN  10.0.3.206  559.52 KiB  25631.9%   74863177-684b-45f4-99f7-d1006625dc9e  us-east-1d
-UN  10.0.2.238  587.87 KiB  25635.5%   4dcdadd2-41f9-4f34-9892-1f20868b27c7  us-east-1c
-
-
-

In subsequent sub-sections we shall discuss enabling and configuring full query logging.

-
-

Setting the FQL Directory

-

A dedicated directory path must be provided to write full query log data to when the full query log is enabled. The directory for FQL must exist, and have permissions set. The full query log will recursively delete the contents of this path at times. It is recommended not to place links in this directory to other sections of the filesystem. The full_query_log_dir property in cassandra.yaml is pre-configured.

-
full_query_log_dir: /tmp/cassandrafullquerylog
-
-
-

The log_dir option may be used to configure the FQL directory if the full_query_log_dir is not set.

-
full_query_logging_options:
-   # log_dir:
-
-
-

Create the FQL directory if it does not exist and set its permissions.

-
sudo mkdir -p /tmp/cassandrafullquerylog
-sudo chmod -R 777 /tmp/cassandrafullquerylog
-
-
-
-
-

Setting the Roll Cycle

-

The roll_cycle option sets how often to roll FQL log segments so they can potentially be reclaimed. Supported values are MINUTELY, HOURLY and DAILY. Default setting is HOURLY.

-
roll_cycle: HOURLY
-
-
-
-
-

Setting Other Options

-

The block option specifies whether the FQL should block if the FQL falls behind or should drop log records. Default value of block is true. The max_queue_weight option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. The max_log_size option sets the maximum size of the rolled files to retain on disk before deleting the oldest file. The archive_command option sets the archive command to execute on rolled log files. The max_archive_retries option sets the max number of retries of failed archive commands.

-
# block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file
-being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

The max_queue_weight must be > 0. Similarly max_log_size must be > 0. An example full query logging options is as follows.

-
full_query_log_dir: /tmp/cassandrafullquerylog
-
-# default options for full query logging - these can be overridden from command line when
-executing
-# nodetool enablefullquerylog
-# nodetool enablefullquerylog
-#full_query_logging_options:
-   # log_dir:
-   roll_cycle: HOURLY
-   # block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file
-being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

The full_query_log_dir setting is not within the full_query_logging_options but still is for full query logging.

-
-
-

Enabling Full Query Logging

-

Full Query Logging is enabled on a per-node basis. . The nodetool enablefullquerylog command is used to enable full query logging. Defaults for the options are configured in cassandra.yaml and these can be overridden from command line.

-

The syntax of the nodetool enablefullquerylog command is as follows:

-
 nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-[(-pp | --print-port)] [(-pw <password> | --password <password>)]
-[(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-[(-u <username> | --username <username>)] enablefullquerylog
-[--archive-command <archive_command>] [--blocking]
-[--max-archive-retries <archive_retries>]
-[--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-[--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-  --archive-command <archive_command>
- Command that will handle archiving rolled full query log files.
- Format is "/path/to/script.sh %path" where %path will be replaced
- with the file to archive
-
-  --blocking
- If the queue is full whether to block producers or drop samples.
-
-  -h <host>, --host <host>
- Node hostname or ip address
-
-  --max-archive-retries <archive_retries>
- Max number of archive retries.
-
-  --max-log-size <max_log_size>
- How many bytes of log data to store before dropping segments. Might
- not be respected if a log file hasn't rolled so it can be deleted.
-
-  --max-queue-weight <max_queue_weight>
- Maximum number of bytes of query data to queue to disk before
- blocking or dropping samples.
-
-  -p <port>, --port <port>
- Remote jmx agent port number
-
-  --path <path>
- Path to store the full query log at. Will have it's contents
- recursively deleted.
-
-  -pp, --print-port
- Operate in 4.0 mode with hosts disambiguated by port number
-
-  -pw <password>, --password <password>
- Remote jmx agent password
-
-  -pwf <passwordFilePath>, --password-file <passwordFilePath>
- Path to the JMX password file
-
-  --roll-cycle <roll_cycle>
- How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-  -u <username>, --username <username>
- Remote jmx agent username
-
-
-

Run the following command on each node in the cluster.

-
nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-
-
-

After the full query logging has been enabled run some CQL statements to generate full query logs.

-
-
-
-

Running CQL Statements

-

Start CQL interface with cqlsh command.

-
[ec2-user@ip-10-0-2-238 ~]$ cqlsh
-Connected to Cassandra Cluster at 127.0.0.1:9042.
-[cqlsh 5.0.1 | Cassandra 4.0-SNAPSHOT | CQL spec 3.4.5 | Native protocol v4]
-Use HELP for help.
-cqlsh>
-
-
-

Run some CQL statements. Create a keyspace. Create a table and add some data. Query the table.

-
cqlsh> CREATE KEYSPACE AuditLogKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-cqlsh> USE AuditLogKeyspace;
-cqlsh:auditlogkeyspace> CREATE TABLE t (
-...id int,
-...k int,
-...v text,
-...PRIMARY KEY (id)
-... );
-cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 1, 'val1');
-cqlsh:auditlogkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 0 | 1 | val1
-
-(1 rows)
-cqlsh:auditlogkeyspace>
-
-
-
-
-

Viewing the Full Query Logs

-

The fqltool is used to view the full query logs. The fqltool has the following usage syntax.

-
fqltool <command> [<args>]
-
-The most commonly used fqltool commands are:
-   compare   Compare result files generated by fqltool replay
-   dump Dump the contents of a full query log
-   help Display help information
-   replay    Replay full query logs
-
-See 'fqltool help <command>' for more information on a specific command.
-
-
-

The fqltool dump command is used to dump (list) the contents of a full query log. Run the fqltool dump command after some CQL statements have been run.

-

The full query logs get listed. Truncated output is as follows:

-
[ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ fqltool dump ./
-WARN  [main] 2019-08-02 03:07:53,635 Slf4jExceptionHandler.java:42 - Using Pauser.sleepy() as not enough processors, have 2, needs 8+
-Type: single-query
-Query start time: 1564708322030
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system.peers
-Values:
-
-Type: single-query
-Query start time: 1564708322054
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system.local WHERE key='local'
-Values:
-
-Type: single-query
-Query start time: 1564708322109
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.keyspaces
-Values:
-
-Type: single-query
-Query start time: 1564708322116
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.tables
-Values:
-
-Type: single-query
-Query start time: 1564708322139
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.columns
-Values:
-
-Type: single-query
-Query start time: 1564708322142
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.functions
-Values:
-
-Type: single-query
-Query start time: 1564708322141
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.aggregates
-Values:
-
-Type: single-query
-Query start time: 1564708322143
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.types
-Values:
-
-Type: single-query
-Query start time: 1564708322144
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.indexes
-Values:
-
-Type: single-query
-Query start time: 1564708322142
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.triggers
-Values:
-
-Type: single-query
-Query start time: 1564708322145
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.views
-Values:
-
-Type: single-query
-Query start time: 1564708345408
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: CREATE KEYSPACE AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-Values:
-
-Type: single-query
-Query start time: 1564708345675
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708345
-Query: SELECT peer, rpc_address, schema_version FROM system.peers
-Values:
-
-Type: single-query
-Query start time: 1564708345676
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708345
-Query: SELECT schema_version FROM system.local WHERE key='local'
-Values:
-
-Type: single-query
-Query start time: 1564708346323
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708346
-Query: SELECT * FROM system_schema.keyspaces WHERE keyspace_name = 'auditlogkeyspace'
-Values:
-
-Type: single-query
-Query start time: 1564708360873
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: USE AuditLogKeyspace;
-Values:
-
-Type: single-query
-Query start time: 1564708360874
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: USE "auditlogkeyspace"
-Values:
-
-Type: single-query
-Query start time: 1564708378837
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: CREATE TABLE t (
-    id int,
-    k int,
-    v text,
-    PRIMARY KEY (id)
-);
-Values:
-
-Type: single-query
-Query start time: 1564708379247
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708379
-Query: SELECT * FROM system_schema.tables WHERE keyspace_name = 'auditlogkeyspace' AND table_name = 't'
-Values:
-
-Type: single-query
-Query start time: 1564708379255
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708379
-Query: SELECT * FROM system_schema.views WHERE keyspace_name = 'auditlogkeyspace' AND view_name = 't'
-Values:
-
-Type: single-query
-Query start time: 1564708397144
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708397
-Query: INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-Values:
-
-Type: single-query
-Query start time: 1564708397167
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708397
-Query: INSERT INTO t (id, k, v) VALUES (0, 1, 'val1');
-Values:
-
-Type: single-query
-Query start time: 1564708434782
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708434
-Query: SELECT * FROM t;
-Values:
-
-[ec2-user@ip-10-0-2-238 cassandrafullquerylog]$
-
-
-

Full query logs are generated on each node. Enabling of full query logging on one node and the log files generated on the node are as follows:

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@52.1.243.83
-Last login: Fri Aug  2 00:14:53 2019 from 75.155.255.51
-[ec2-user@ip-10-0-3-206 ~]$ sudo mkdir /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ cd /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 01:24 20190802-01.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 01:23 directory-listing.cq4t
-[ec2-user@ip-10-0-3-206 cassandrafullquerylog]$
-
-
-

Enabling of full query logging on another node and the log files generated on the node are as follows:

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@3.86.103.229
-Last login: Fri Aug  2 00:13:04 2019 from 75.155.255.51
-[ec2-user@ip-10-0-1-115 ~]$ sudo mkdir /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ cd /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 01:24 20190802-01.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 01:23 directory-listing.cq4t
-[ec2-user@ip-10-0-1-115 cassandrafullquerylog]$
-
-
-

The nodetool resetfullquerylog resets the full query logger if it is enabled. Also deletes any generated files in the last used full query log path as well as the one configured in cassandra.yaml. It stops the full query log and cleans files in the configured full query log directory from cassandra.yaml as well as JMX.

-
-
-

Full Query Replay

-

The fqltool provides the replay command (CASSANDRA-14618) to replay the full query logs. The FQL replay could be run on a different machine or even a different cluster for testing, debugging and performance benchmarking.

-

The main objectives of fqltool replay are:

-
    -
  • To be able to compare different runs of production traffic against different versions/configurations of Cassandra.
  • -
  • Take FQL logs from several machines and replay them in “order” by the timestamps recorded.
  • -
  • Record the results from each run to be able to compare different runs (against different clusters/versions/etc).
  • -
  • If fqltool replay is run against 2 or more clusters, the results could be compared.
  • -
-

The FQL replay could also be used on the same node on which the full query log are generated to recreate a dropped database object.

-
-
The syntax of fqltool replay is as follows:
-
 fqltool replay [--keyspace <keyspace>] [--results <results>]
-[--store-queries <store_queries>] --target <target>... [--] <path1>
-[<path2>...<pathN>]
-
-OPTIONS
-  --keyspace <keyspace>
- Only replay queries against this keyspace and queries without
- keyspace set.
-
-  --results <results>
- Where to store the results of the queries, this should be a
- directory. Leave this option out to avoid storing results.
-
-  --store-queries <store_queries>
- Path to store the queries executed. Stores queries in the same order
- as the result sets are in the result files. Requires --results
-
-  --target <target>
- Hosts to replay the logs to, can be repeated to replay to more
- hosts.
-
-  --
- This option can be used to separate command-line options from the
- list of argument, (useful when arguments might be mistaken for
- command-line options
-
-  <path1> [<path2>...<pathN>]
- Paths containing the full query logs to replay.
-
-
-

As an example of using fqltool replay, drop a keyspace.

-
cqlsh:auditlogkeyspace> DROP KEYSPACE AuditLogKeyspace;
-
-
-

Subsequently run fqltool replay. The directory to store results of queries and the directory to store the queries run are specified and these directories must be created and permissions set before running fqltool replay. The --results and --store-queries directories are optional but if --store-queries is to be set the --results must also be set.

-
[ec2-user@ip-10-0-2-238 cassandra]$ fqltool replay --keyspace AuditLogKeyspace --results
-/cassandra/fql/logs/results/replay --store-queries /cassandra/fql/logs/queries/replay --
-target 3.91.56.164 -- /tmp/cassandrafullquerylog
-
-
-

Describe the keyspaces after running fqltool replay and the keyspace that was dropped gets listed again.

-
cqlsh:auditlogkeyspace> DESC KEYSPACES;
-
-system_schema  system  system_distributed  system_virtual_schema
-system_auth    auditlogkeyspace  system_traces  system_views
-
-cqlsh:auditlogkeyspace>
-
-
-
-
-

Full Query Compare

-

The fqltool compare command (CASSANDRA-14619) is used to compare result files generated by fqltool replay. The fqltool compare command that can take the recorded runs from fqltool replay and compares them, it should output any differences and potentially all queries against the mismatching partition up until the mismatch.

-

The fqltool compare could be used for comparing result files generated by different versions of Cassandra or different Cassandra configurations as an example. The command usage is as follows:

-
[ec2-user@ip-10-0-2-238 ~]$ fqltool help compare
-NAME
-  fqltool compare - Compare result files generated by fqltool replay
-
-SYNOPSIS
-  fqltool compare --queries <queries> [--] <path1> [<path2>...<pathN>]
-
-OPTIONS
-  --queries <queries>
- Directory to read the queries from. It is produced by the fqltool
- replay --store-queries option.
-
-  --
- This option can be used to separate command-line options from the
- list of argument, (useful when arguments might be mistaken for
- command-line options
-
-  <path1> [<path2>...<pathN>]
- Directories containing result files to compare.
-
-
-

The fqltool compare stores each row as a separate chronicle document to be able to avoid reading up the entire result set in memory when comparing document formats:

-

To mark the start of a new result set:

-
-------------------
-version: int16
-type: column_definitions
-column_count: int32;
-column_definition: text, text
-column_definition: text, text
-....
---------------------
-
-
-

To mark a failed query set:

-
---------------------
-version: int16
-type: query_failed
-message: text
----------------------
-
-
-

To mark a row set:

-
--------------------
-version: int16
-type: row
-row_column_count: int32
-column: bytes
----------------------
-
-
-

To mark the end of a result set:

-
-------------------
-version: int16
-type: end_resultset
--------------------
-
-
-
-
-

Performance Overhead of FQL

-

In performance testing FQL appears to have little or no overhead in WRITE only workloads, and a minor overhead in MIXED workload.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/index.html b/src/doc/4.0-alpha4/new/index.html deleted file mode 100644 index 973a641e7..000000000 --- a/src/doc/4.0-alpha4/new/index.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "New Features in Apache Cassandra 4.0" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

New Features in Apache Cassandra 4.0

-

This section covers the new features in Apache Cassandra 4.0.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/java11.html b/src/doc/4.0-alpha4/new/java11.html deleted file mode 100644 index dcf5fa865..000000000 --- a/src/doc/4.0-alpha4/new/java11.html +++ /dev/null @@ -1,354 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Support for Java 11" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Support for Java 11

-

In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions.

-

One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (CASSANDRA-9608). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0.

-

Note: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use.

-
-

Support Matrix

-

The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis.

-

Table 1 : Support Matrix for Java

- ----- - - - - - - - - - - - - - - -
 Java 8 (Run)Java 11 (Run)
Java 8 (Build)SupportedSupported
Java 11(Build)Not SupportedSupported
-

Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0.

-
-
-

Using Java 8 to Build

-

To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows:

-
$ sudo yum install java-1.8.0-openjdk-devel
-
-
-

Set JAVA_HOME and JRE_HOME environment variables in the shell bash script. First, open the bash script:

-
$ sudo vi ~/.bashrc
-
-
-

Set the environment variables including the PATH.

-
$ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies.

-
$ git clone https://github.com/apache/cassandra.git
-
-
-

If Cassandra is already running stop Cassandra with the following command.

-
[ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon
-
-
-

Build the source code from the cassandra directory, which has the build.xml build script. The Apache Ant uses the Java version set in the JAVA_HOME environment variable.

-
$ cd ~/cassandra
-$ ant
-
-
-

Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for CASSANDRA_HOME in the bash script. Also add the CASSANDRA_HOME/bin to the PATH variable.

-
$ export CASSANDRA_HOME=~/cassandra
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin
-
-
-

To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the CASSANDRA_HOME/bin directory, which is in the PATH env variable.

-
$ cassandra
-
-
-

The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet:

-
INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3-
-146.ec2.internal:7000:7001
-INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK
-64-Bit Server VM/11.0.3
-INFO  [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size:
-1004.000MiB/1004.000MiB
-
-
-

The following output indicates a single node Cassandra 4.0 cluster has started.

-
INFO  [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on
-address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl)
-...
-...
-INFO  [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any
-peers but continuing anyway since node is in its own seed list
-INFO  [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state
-INFO  [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip
-INFO  [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled,
-when pool is exhausted (max is 251.000MiB) it will allocate on heap
-INFO  [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto
-bootstrap because it is configured to be a seed node.
-...
-...
-INFO  [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring
-INFO  [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state
-jump to NORMAL
-
-
-
-
-

Using Java 11 to Build

-

If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command.

-
$ yum install java-11-openjdk-devel
-
-
-

Set the environment variables in the bash script for Java 11. The first command is to open the bash script.

-
$ sudo vi ~/.bashrc
-$ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

To build source code with Java 11 one of the following two options must be used.

-
-
    -
  1. -
    Include Apache Ant command-line option -Duse.jdk=11 as follows:
    -
    $ ant -Duse.jdk=11
    -
    -
    -
    -
    -
  2. -
  3. -
    Set environment variable CASSANDRA_USE_JDK11 to true:
    -
    $ export CASSANDRA_USE_JDK11=true
    -
    -
    -
    -
    -
  4. -
-
-

As an example, set the environment variable CASSANDRA_USE_JDK11 to true.

-
[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-
-

Or, set the command-line option.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true
-
-
-

The build output should include the following.

-
_build_java:
-    [echo] Compiling for Java 11
-...
-...
-build:
-
-_main-jar:
-         [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar
-...
-...
-_build-test:
-   [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes
-    [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes
-...
-...
-jar:
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar
-
-BUILD SUCCESSFUL
-Total time: 1 minute 3 seconds
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-
-
-

Common Issues

-

One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must
-be set when building from java 11
-Total time: 1 second
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-

The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-[ec2-user@ip-172-30-3-146 ~]$ cassandra
-...
-...
-Error: A JNI error has occurred, please check your installation and try again
-Exception in thread "main" java.lang.UnsupportedClassVersionError:
-org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of
-the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes
-class file versions up to 52.0
-  at java.lang.ClassLoader.defineClass1(Native Method)
-  at java.lang.ClassLoader.defineClass(ClassLoader.java:763)
-  at ...
-...
-
-
-

The CASSANDRA_USE_JDK11 variable or the command-line option -Duse.jdk11 cannot be used to build with Java 8. To demonstrate set JAVA_HOME to version 8.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-
-
-

Set the CASSANDRA_USE_JDK11=true or command-line option -Duse.jdk11=true. Subsequently, run Apache Ant to start the build. The build fails with error message listed.

-
[ec2-user@ip-172-30-3-146 ~]$ cd
-cassandra
-[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot
-be set when building from java 8
-
-Total time: 0 seconds
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/messaging.html b/src/doc/4.0-alpha4/new/messaging.html deleted file mode 100644 index bdc9fc371..000000000 --- a/src/doc/4.0-alpha4/new/messaging.html +++ /dev/null @@ -1,344 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Improved Internode Messaging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Improved Internode Messaging

-

Apache Cassandra 4.0 has added several new improvements to internode messaging.

-
-

Optimized Internode Messaging Protocol

-

The internode messaging protocol has been optimized (CASSANDRA-14485). Previously the IPAddressAndPort of the sender was included with each message that was sent even though the IPAddressAndPort had already been sent once when the initial connection/session was established. In Cassandra 4.0 IPAddressAndPort has been removed from every separate message sent and only sent when connection/session is initiated.

-

Another improvement is that at several instances (listed) a fixed 4-byte integer value has been replaced with vint as a vint is almost always less than 1 byte:

-
    -
  • The paramSize (the number of parameters in the header)
  • -
  • Each individual parameter value
  • -
  • The payloadSize
  • -
-
-
-

NIO Messaging

-

In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to non-blocking I/O (NIO) via Netty (CASSANDRA-8457).

-

As serialization format, each message contains a header with several fixed fields, an optional key-value parameters section, and then the message payload itself. Note: the IP address in the header may be either IPv4 (4 bytes) or IPv6 (16 bytes).

-
-
The diagram below shows the IPv4 address for brevity.
-
           1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6
- 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                       PROTOCOL MAGIC                          |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Message ID                            |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Timestamp                             |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|  Addr len |           IP Address (IPv4)                       /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |                 Verb                              /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |            Parameters size                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |             Parameter data                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                        Payload size                           |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                                                               /
-/                           Payload                             /
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-
-

An individual parameter has a String key and a byte array value. The key is serialized with its length, encoded as two bytes, followed by the UTF-8 byte encoding of the string. The body is serialized with its length, encoded as four bytes, followed by the bytes of the value.

-
-
-

Resource limits on Queued Messages

-

System stability is improved by enforcing strict resource limits (CASSANDRA-15066) on the number of outbound messages that are queued, measured by the serializedSize of the message. There are three separate limits imposed simultaneously to ensure that progress is always made without any reasonable combination of failures impacting a node’s stability.

-
    -
  1. Global, per-endpoint and per-connection limits are imposed on messages queued for delivery to other nodes and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire size of the message being sent or received.
  2. -
  3. The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. Each node-pair has three links: urgent, small and large. So any given node may have a maximum of N*3 * (internode_application_send_queue_capacity_in_bytes + internode_application_receive_queue_capacity_in_bytes) messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens nodes should need to communicate with significant bandwidth.
  4. -
  5. The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, on all links to or from a single node in the cluster. The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, on all links to or from any node in the cluster. The following configuration settings have been added to cassandra.yaml for resource limits on queued messages.
  6. -
-
internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB
-internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728  #128MiB
-internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912    #512MiB
-internode_application_receive_queue_capacity_in_bytes: 4194304                  #4MiB
-internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB
-internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912   #512MiB
-
-
-
-
-

Virtual Tables for Messaging Metrics

-

Metrics is improved by keeping metrics using virtual tables for inter-node inbound and outbound messaging (CASSANDRA-15066). For inbound messaging a virtual table (internode_inbound) has been added to keep metrics for:

-
    -
  • Bytes and count of messages that could not be serialized or flushed due to an error
  • -
  • Bytes and count of messages scheduled
  • -
  • Bytes and count of messages successfully processed
  • -
  • Bytes and count of messages successfully received
  • -
  • Nanos and count of messages throttled
  • -
  • Bytes and count of messages expired
  • -
  • Corrupt frames recovered and unrecovered
  • -
-

A separate virtual table (internode_outbound) has been added for outbound inter-node messaging. The outbound virtual table keeps metrics for:

-
    -
  • Bytes and count of messages pending
  • -
  • Bytes and count of messages sent
  • -
  • Bytes and count of messages expired
  • -
  • Bytes and count of messages that could not be sent due to an error
  • -
  • Bytes and count of messages overloaded
  • -
  • Active Connection Count
  • -
  • Connection Attempts
  • -
  • Successful Connection Attempts
  • -
-
-
-

Hint Messaging

-

A specialized version of hint message that takes an already encoded in a ByteBuffer hint and sends it verbatim has been added. It is an optimization for when dispatching a hint file of the current messaging version to a node of the same messaging version, which is the most common case. It saves on extra ByteBuffer allocations one redundant hint deserialization-serialization cycle.

-
-
-

Internode Application Timeout

-

A configuration setting has been added to cassandra.yaml for the maximum continuous period a connection may be unwritable in application space.

-
# internode_application_timeout_in_ms = 30000
-
-
-

Some other new features include logging of message size to trace message for tracing a query.

-
-
-

Paxos prepare and propose stage for local requests optimized

-

In pre-4.0 Paxos prepare and propose messages always go through entire MessagingService stack in Cassandra even if request is to be served locally, we can enhance and make local requests severed w/o involving MessagingService. Similar things are done elsewhere in Cassandra which skips MessagingService stage for local requests.

-

This is what it looks like in pre 4.0 if we have tracing on and run a light-weight transaction:

-
Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11
-21:55:18.971000 | A.B.C.D | 15045
-… REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] |
-2017-09-11 21:55:18.976000 | A.B.C.D | 20270
-… Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 |
-A.B.C.D | 20372
-
-
-

Same thing applies for Propose stage as well.

-

In version 4.0 Paxos prepare and propose stage for local requests are optimized (CASSANDRA-13862).

-
-
-

Quality Assurance

-

Several other quality assurance improvements have been made in version 4.0 (CASSANDRA-15066).

-
-

Framing

-

Version 4.0 introduces framing to all internode messages, i.e. the grouping of messages into a single logical payload with headers and trailers; these frames are guaranteed to either contain at most one message, that is split into its own unique sequence of frames (for large messages), or that a frame contains only complete messages.

-
-
-

Corruption prevention

-

Previously, intra-datacenter internode messages would be unprotected from corruption by default, as only LZ4 provided any integrity checks. All messages to post 4.0 nodes are written to explicit frames, which may be:

-
    -
  • LZ4 encoded
  • -
  • CRC protected
  • -
-

The Unprotected option is still available.

-
-
-

Resilience

-

For resilience, all frames are written with a separate CRC protected header, of 8 and 6 bytes respectively. If corruption occurs in this header, the connection must be reset, as before. If corruption occurs anywhere outside of the header, the corrupt frame will be skipped, leaving the connection intact and avoiding the loss of any messages unnecessarily.

-

Previously, any issue at any point in the stream would result in the connection being reset, with the loss of any in-flight messages.

-
-
-

Efficiency

-

The overall memory usage, and number of byte shuffles, on both inbound and outbound messages is reduced.

-

Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB), that is filled before any compressed frame can be produced. Our frame encoders avoid this redundant copy, as well as freeing 192KiB per endpoint.

-

Inbound, frame decoders guarantee only to copy the number of bytes necessary to parse a frame, and to never store more bytes than necessary. This improvement applies twice to LZ4 connections, improving both the message decode and the LZ4 frame decode.

-
-
-

Inbound Path

-

Version 4.0 introduces several improvements to the inbound path.

-

An appropriate message handler is used based on whether large or small messages are expected on a particular connection as set in a flag. NonblockingBufferHandler, running on event loop, is used for small messages, and BlockingBufferHandler, running off event loop, for large messages. The single implementation of InboundMessageHandler handles messages of any size effectively by deriving size of the incoming message from the byte stream. In addition to deriving size of the message from the stream, incoming message expiration time is proactively read, before attempting to deserialize the entire message. If it’s expired at the time when a message is encountered the message is just skipped in the byte stream altogether. -And if a message fails to be deserialized while still on the receiving side - say, because of table id or column being unknown - bytes are skipped, without dropping the entire connection and losing all the buffered messages. An immediately reply back is sent to the coordinator node with the failure reason, rather than waiting for the coordinator callback to expire. This logic is extended to a corrupted frame; a corrupted frame is safely skipped over without dropping the connection.

-

Inbound path imposes strict limits on memory utilization. Specifically, the memory occupied by all parsed, but unprocessed messages is bound - on per-connection, per-endpoint, and global basis. Once a connection exceeds its local unprocessed capacity and cannot borrow any permits from per-endpoint and global reserve, it simply stops processing further messages, providing natural backpressure - until sufficient capacity is regained.

-
-
-

Outbound Connections

-
-

Opening a connection

-

A consistent approach is adopted for all kinds of failure to connect, including: refused by endpoint, incompatible versions, or unexpected exceptions;

-
    -
  • Retry forever, until either success or no messages waiting to deliver.
  • -
  • Wait incrementally longer periods before reconnecting, up to a maximum of 1s.
  • -
  • While failing to connect, no reserve queue limits are acquired.
  • -
-
-
-

Closing a connection

-
    -
  • Correctly drains outbound messages that are waiting to be delivered (unless disconnected and fail to reconnect).
  • -
  • Messages written to a closing connection are either delivered or rejected, with a new connection being opened if the old is irrevocably closed.
  • -
  • Unused connections are pruned eventually.
  • -
-
-
-

Reconnecting

-

We sometimes need to reconnect a perfectly valid connection, e.g. if the preferred IP address changes. We ensure that the underlying connection has no in-progress operations before closing it and reconnecting.

-
-
-

Message Failure

-

Propagates to callbacks instantly, better preventing overload by reclaiming committed memory.

-
-
Expiry
-
    -
  • No longer experiences head-of-line blocking (e.g. undroppable message preventing all droppable messages from being expired).
  • -
  • While overloaded, expiry is attempted eagerly on enqueuing threads.
  • -
  • While disconnected we schedule regular pruning, to handle the case where messages are no longer being sent, but we have a large backlog to expire.
  • -
-
-
-
Overload
-
    -
  • Tracked by bytes queued, as opposed to number of messages.
  • -
-
-
-
Serialization Errors
-
    -
  • Do not result in the connection being invalidated; the message is simply completed with failure, and then erased from the frame.
  • -
  • Includes detected mismatch between calculated serialization size to actual.
  • -
-

Failures to flush to network, perhaps because the connection has been reset are not currently notified to callback handlers, as the necessary information has been discarded, though it would be possible to do so in future if we decide it is worth our while.

-
-
-
-

QoS

-

“Gossip” connection has been replaced with a general purpose “Urgent” connection, for any small messages impacting system stability.

-
-
-

Metrics

-

We track, and expose via Virtual Table and JMX, the number of messages and bytes that: we could not serialize or flush due to an error, we dropped due to overload or timeout, are pending, and have successfully sent.

-
-
-
-
-

Added a Message size limit

-

Cassandra pre-4.0 doesn’t protect the server from allocating huge buffers for the inter-node Message objects. Adding a message size limit would be good to deal with issues such as a malfunctioning cluster participant. Version 4.0 introduced max message size config param, akin to max mutation size - set to endpoint reserve capacity by default.

-
-
-

Recover from unknown table when deserializing internode messages

-

As discussed in (CASSANDRA-9289) it would be nice to gracefully recover from seeing an unknown table in a message from another node. Pre-4.0, we close the connection and reconnect, which can cause other concurrent queries to fail. -Version 4.0 fixes the issue by wrapping message in-stream with -TrackedDataInputPlus, catching -UnknownCFException, and skipping the remaining bytes in this message. TCP won’t be closed and it will remain connected for other messages.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/streaming.html b/src/doc/4.0-alpha4/new/streaming.html deleted file mode 100644 index 962929c43..000000000 --- a/src/doc/4.0-alpha4/new/streaming.html +++ /dev/null @@ -1,260 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Improved Streaming" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Improved Streaming

-

Apache Cassandra 4.0 has made several improvements to streaming. Streaming is the process used by nodes of a cluster to exchange data in the form of SSTables. Streaming of SSTables is performed for several operations, such as:

-
    -
  • SSTable Repair
  • -
  • Host Replacement
  • -
  • Range movements
  • -
  • Bootstrapping
  • -
  • Rebuild
  • -
  • Cluster expansion
  • -
-
-

Streaming based on Netty

-

Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO) with Netty (CASSANDRA-12229). It replaces the single-threaded (or sequential), synchronous, blocking model of streaming messages and transfer of files. Netty supports non-blocking, asynchronous, multi-threaded streaming with which multiple connections are opened simultaneously. Non-blocking implies that threads are not blocked as they don’t wait for a response for a sent request. A response could be returned in a different thread. With asynchronous, connections and threads are decoupled and do not have a 1:1 relation. Several more connections than threads may be opened.

-
-
-

Zero Copy Streaming

-

Pre-4.0, during streaming Cassandra reifies the SSTables into objects. This creates unnecessary garbage and slows down the whole streaming process as some SSTables can be transferred as a whole file rather than individual partitions. Cassandra 4.0 has added support for streaming entire SSTables when possible (CASSANDRA-14556) for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use ZeroCopy for eligible SSTables significantly speeding up transfers and increasing throughput. A zero-copy path avoids bringing data into user-space on both sending and receiving side. Any streaming related operations will notice corresponding improvement. Zero copy streaming is hardware bound; only limited by the hardware limitations (Network and Disk IO ).

-
-

High Availability

-

In benchmark tests Zero Copy Streaming is 5x faster than partitions based streaming. Faster streaming provides the benefit of improved availability. A cluster’s recovery mainly depends on the streaming speed, Cassandra clusters with failed nodes will be able to recover much more quickly (5x faster). If a node fails, SSTables need to be streamed to a replacement node. During the replacement operation, the new Cassandra node streams SSTables from the neighboring nodes that hold copies of the data belonging to this new node’s token range. Depending on the amount of data stored, this process can require substantial network bandwidth, taking some time to complete. The longer these range movement operations take, the more the cluster availability is lost. Failure of multiple nodes would reduce high availability greatly. The faster the new node completes streaming its data, the faster it can serve traffic, increasing the availability of the cluster.

-
-
-

Enabling Zero Copy Streaming

-

Zero copy streaming is enabled by setting the following setting in cassandra.yaml.

-
stream_entire_sstables: true
-
-
-

By default zero copy streaming is enabled.

-
-
-

SSTables Eligible for Zero Copy Streaming

-

Zero copy streaming is used if all partitions within the SSTable need to be transmitted. This is common when using LeveledCompactionStrategy or when partitioning SSTables by token range has been enabled. All partition keys in the SSTables are iterated over to determine the eligibility for Zero Copy streaming.

-
-
-

Benefits of Zero Copy Streaming

-

When enabled, it permits Cassandra to zero-copy stream entire eligible SSTables between nodes, including every component. This speeds up the network transfer significantly subject to throttling specified by stream_throughput_outbound_megabits_per_sec.

-

Enabling this will reduce the GC pressure on sending and receiving node. While this feature tries to keep the disks balanced, it cannot guarantee it. This feature will be automatically disabled if internode encryption is enabled. Currently this can be used with Leveled Compaction.

-
-
-

Configuring for Zero Copy Streaming

-

Throttling would reduce the streaming speed. The stream_throughput_outbound_megabits_per_sec throttles all outbound streaming file transfers on a node to the given total throughput in Mbps. When unset, the default is 200 Mbps or 25 MB/s.

-
stream_throughput_outbound_megabits_per_sec: 200
-
-
-

To run any Zero Copy streaming benchmark the stream_throughput_outbound_megabits_per_sec must be set to a really high value otherwise, throttling will be significant and the benchmark results will not be meaningful.

-

The inter_dc_stream_throughput_outbound_megabits_per_sec throttles all streaming file transfer between the datacenters, this setting allows users to throttle inter dc stream throughput in addition to throttling all network stream traffic as configured with stream_throughput_outbound_megabits_per_sec. When unset, the default is 200 Mbps or 25 MB/s.

-
inter_dc_stream_throughput_outbound_megabits_per_sec: 200
-
-
-
-
-

SSTable Components Streamed with Zero Copy Streaming

-

Zero Copy Streaming streams entire SSTables. SSTables are made up of multiple components in separate files. SSTable components streamed are listed in Table 1.

-

Table 1. SSTable Components

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SSTable ComponentDescription
Data.dbThe base data for an SSTable: the remaining -components can be regenerated based on the data -component.
Index.dbIndex of the row keys with pointers to their -positions in the data file.
Filter.dbSerialized bloom filter for the row keys in the -SSTable.
CompressionInfo.dbFile to hold information about uncompressed -data length, chunk offsets etc.
Statistics.dbStatistical metadata about the content of the -SSTable.
Digest.crc32Holds CRC32 checksum of the data file -size_bytes.
CRC.dbHolds the CRC32 for chunks in an uncompressed file.
Summary.dbHolds SSTable Index Summary -(sampling of Index component)
TOC.txtTable of contents, stores the list of all -components for the SSTable.
-

Custom component, used by e.g. custom compaction strategy may also be included.

-
-
-
-

Repair Streaming Preview

-

Repair with nodetool repair involves streaming of repaired SSTables and a repair preview has been added to provide an estimate of the amount of repair streaming that would need to be performed. Repair preview (CASSANDRA-13257) is invoke with nodetool repair --preview using option:

-
-prv, --preview
-
-
-

It determines ranges and amount of data to be streamed, but doesn’t actually perform repair.

-
-
-

Parallelizing of Streaming of Keyspaces

-

The streaming of the different keyspaces for bootstrap and rebuild has been parallelized in Cassandra 4.0 (CASSANDRA-4663).

-
-
-

Unique nodes for Streaming in Multi-DC deployment

-

Range Streamer picks unique nodes to stream data from when number of replicas in each DC is three or more (CASSANDRA-4650). What the optimization does is to even out the streaming load across the cluster. Without the optimization, some node can be picked up to stream more data than others. This patch allows to select dedicated node to stream only one range.

-

This will increase the performance of bootstrapping a node and will also put less pressure on nodes serving the data. This does not affect if N < 3 in each DC as then it streams data from only 2 nodes.

-
-
-

Stream Operation Types

-

It is important to know the type or purpose of a certain stream. Version 4.0 (CASSANDRA-13064) adds an enum to distinguish between the different types of streams. Stream types are available both in a stream request and a stream task. The different stream types are:

-
    -
  • Restore replica count
  • -
  • Unbootstrap
  • -
  • Relocation
  • -
  • Bootstrap
  • -
  • Rebuild
  • -
  • Bulk Load
  • -
  • Repair
  • -
-
-
-

Disallow Decommission when number of Replicas will drop below configured RF

-

CASSANDRA-12510 guards against decommission that will drop # of replicas below configured replication factor (RF), and adds the --force option that allows decommission to continue if intentional; force decommission of this node even when it reduces the number of replicas to below configured RF.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/transientreplication.html b/src/doc/4.0-alpha4/new/transientreplication.html deleted file mode 100644 index d204e627a..000000000 --- a/src/doc/4.0-alpha4/new/transientreplication.html +++ /dev/null @@ -1,228 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Transient Replication" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Transient Replication

-

Note:

-

Transient Replication (CASSANDRA-14404) is an experimental feature designed for expert Apache Cassandra users who are able to validate every aspect of the database for their application and deployment. -That means being able to check that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, configuration, operational practices, and availability requirements. -Apache Cassandra 4.0 has the initial implementation of transient replication. Future releases of Cassandra will make this feature suitable for a wider audience. -It is anticipated that a future version will support monotonic reads with transient replication as well as LWT, logged batches, and counters. Being experimental, Transient replication is not recommended for production use.

-
-

Objective

-

The objective of transient replication is to decouple storage requirements from data redundancy (or consensus group size) using incremental repair, in order to reduce storage overhead. -Certain nodes act as full replicas (storing all the data for a given token range), and some nodes act as transient replicas, storing only unrepaired data for the same token ranges.

-

The optimization that is made possible with transient replication is called “Cheap quorums”, which implies that data redundancy is increased without corresponding increase in storage usage.

-

Transient replication is useful when sufficient full replicas are unavailable to receive and store all the data. -Transient replication allows you to configure a subset of replicas to only replicate data that hasn’t been incrementally repaired. -As an optimization, we can avoid writing data to a transient replica if we have successfully written data to the full replicas.

-

After incremental repair, transient data stored on transient replicas can be discarded.

-
-
-

Enabling Transient Replication

-

Transient replication is not enabled by default. Transient replication must be enabled on each node in a cluster separately by setting the following configuration property in cassandra.yaml.

-
enable_transient_replication: true
-
-
-

Transient replication may be configured with both SimpleStrategy and NetworkTopologyStrategy. Transient replication is configured by setting replication factor as <total_replicas>/<transient_replicas>.

-

As an example, create a keyspace with replication factor (RF) 3.

-
CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy',
-'replication_factor' : 4/1};
-
-
-

As another example, some_keysopace keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy',
-'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-

Transiently replicated keyspaces only support tables with read_repair set to NONE.

-

Important Restrictions:

-
    -
  • RF cannot be altered while some endpoints are not in a normal state (no range movements).
  • -
  • You can’t add full replicas if there are any transient replicas. You must first remove all transient replicas, then change the # of full replicas, then add back the transient replicas.
  • -
  • You can only safely increase number of transients one at a time with incremental repair run in between each time.
  • -
-

Additionally, transient replication cannot be used for:

-
    -
  • Monotonic Reads
  • -
  • Lightweight Transactions (LWTs)
  • -
  • Logged Batches
  • -
  • Counters
  • -
  • Keyspaces using materialized views
  • -
  • Secondary indexes (2i)
  • -
-
-
-

Cheap Quorums

-

Cheap quorums are a set of optimizations on the write path to avoid writing to transient replicas unless sufficient full replicas are not available to satisfy the requested consistency level. -Hints are never written for transient replicas. Optimizations on the read path prefer reading from transient replicas. -When writing at quorum to a table configured to use transient replication the quorum will always prefer available full -replicas over transient replicas so that transient replicas don’t have to process writes. Tail latency is reduced by -rapid write protection (similar to rapid read protection) when full replicas are slow or unavailable by sending writes -to transient replicas. Transient replicas can serve reads faster as they don’t have to do anything beyond bloom filter -checks if they have no data. With vnodes and large cluster sizes they will not have a large quantity of data -even for failure of one or more full replicas where transient replicas start to serve a steady amount of write traffic -for some of their transiently replicated ranges.

-
-
-

Speculative Write Option

-

The CREATE TABLE adds an option speculative_write_threshold for use with transient replicas. The option is of type simple with default value as 99PERCENTILE. When replicas are slow or unresponsive speculative_write_threshold specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas.

-
-
-

Pending Ranges and Transient Replicas

-

Pending ranges refers to the movement of token ranges between transient replicas. When a transient range is moved, there -will be a period of time where both transient replicas would need to receive any write intended for the logical -transient replica so that after the movement takes effect a read quorum is able to return a response. Nodes are not -temporarily transient replicas during expansion. They stream data like a full replica for the transient range before they -can serve reads. A pending state is incurred similar to how there is a pending state for full replicas. Transient replicas -also always receive writes when they are pending. Pending transient ranges are sent a bit more data and reading from -them is avoided.

-
-
-

Read Repair and Transient Replicas

-

Read repair never attempts to repair a transient replica. Reads will always include at least one full replica. -They should also prefer transient replicas where possible. Range scans ensure the entire scanned range performs -replica selection that satisfies the requirement that every range scanned includes one full replica. During incremental -& validation repair handling, at transient replicas anti-compaction does not output any data for transient ranges as the -data will be dropped after repair, and transient replicas never have data streamed to them.

-
-
-

Transitioning between Full Replicas and Transient Replicas

-

The additional state transitions that transient replication introduces requires streaming and nodetool cleanup to -behave differently. When data is streamed it is ensured that it is streamed from a full replica and not a transient replica.

-

Transitioning from not replicated to transiently replicated means that a node must stay pending until the next incremental -repair completes at which point the data for that range is known to be available at full replicas.

-

Transitioning from transiently replicated to fully replicated requires streaming from a full replica and is identical -to how data is streamed when transitioning from not replicated to replicated. The transition is managed so the transient -replica is not read from as a full replica until streaming completes. It can be used immediately for a write quorum.

-

Transitioning from fully replicated to transiently replicated requires cleanup to remove repaired data from the transiently -replicated range to reclaim space. It can be used immediately for a write quorum.

-

Transitioning from transiently replicated to not replicated requires cleanup to be run to remove the formerly transiently replicated data.

-

When transient replication is in use ring changes are supported including add/remove node, change RF, add/remove DC.

-
-
-

Transient Replication supports EACH_QUORUM

-

(CASSANDRA-14727) adds support for Transient Replication support for EACH_QUORUM. Per (CASSANDRA-14768), we ensure we write to at least a QUORUM of nodes in every DC, -regardless of how many responses we need to wait for and our requested consistency level. This is to minimally surprise -users with transient replication; with normal writes, we soft-ensure that we reach QUORUM in all DCs we are able to, -by writing to every node; even if we don’t wait for ACK, we have in both cases sent sufficient messages.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/new/virtualtables.html b/src/doc/4.0-alpha4/new/virtualtables.html deleted file mode 100644 index 1036719cd..000000000 --- a/src/doc/4.0-alpha4/new/virtualtables.html +++ /dev/null @@ -1,427 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Virtual Tables" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Virtual Tables

-

Apache Cassandra 4.0 implements virtual tables (CASSANDRA-7622).

-
-

Definition

-

A virtual table is a table that is backed by an API instead of data explicitly managed and stored as SSTables. Apache Cassandra 4.0 implements a virtual keyspace interface for virtual tables. Virtual tables are specific to each node.

-
-
-

Objective

-

A virtual table could have several uses including:

-
    -
  • Expose metrics through CQL
  • -
  • Expose YAML configuration information
  • -
-
-
-

How are Virtual Tables different from regular tables?

-

Virtual tables and virtual keyspaces are quite different from regular tables and keyspaces respectively such as:

-
    -
  • Virtual tables are read-only, but it is likely to change
  • -
  • Virtual tables are not replicated
  • -
  • Virtual tables are local only and non distributed
  • -
  • Virtual tables have no associated SSTables
  • -
  • Consistency level of the queries sent virtual tables are ignored
  • -
  • Virtual tables are managed by Cassandra and a user cannot run DDL to create new virtual tables or DML to modify existing virtual tables
  • -
  • Virtual tables are created in special keyspaces and not just any keyspace
  • -
  • All existing virtual tables use LocalPartitioner. Since a virtual table is not replicated the partitioner sorts in order of partition keys instead of by their hash.
  • -
  • Making advanced queries with ALLOW FILTERING and aggregation functions may be used with virtual tables even though in normal tables we dont recommend it
  • -
-
-
-

Virtual Keyspaces

-

Apache Cassandra 4.0 has added two new keyspaces for virtual tables: system_virtual_schema and system_views. Run the following command to list the keyspaces:

-
cqlsh> DESC KEYSPACES;
-system_schema  system       system_distributed  system_virtual_schema
-system_auth      system_traces       system_views
-
-
-

The system_virtual_schema keyspace contains schema information on virtual tables. The system_views keyspace contains the actual virtual tables.

-
-
-

Virtual Table Limitations

-

Virtual tables and virtual keyspaces have some limitations initially though some of these could change such as:

-
    -
  • Cannot alter or drop virtual keyspaces or tables
  • -
  • Cannot truncate virtual tables
  • -
  • Expiring columns are not supported by virtual tables
  • -
  • Conditional updates are not supported by virtual tables
  • -
  • Cannot create tables in virtual keyspaces
  • -
  • Cannot perform any operations against virtual keyspace
  • -
  • Secondary indexes are not supported on virtual tables
  • -
  • Cannot create functions in virtual keyspaces
  • -
  • Cannot create types in virtual keyspaces
  • -
  • Materialized views are not supported on virtual tables
  • -
  • Virtual tables don’t support DELETE statements
  • -
  • Cannot CREATE TRIGGER against a virtual table
  • -
  • Conditional BATCH statements cannot include mutations for virtual tables
  • -
  • Cannot include a virtual table statement in a logged batch
  • -
  • Mutations for virtual and regular tables cannot exist in the same batch
  • -
  • Conditional BATCH statements cannot include mutations for virtual tables
  • -
  • Cannot create aggregates in virtual keyspaces; but may run aggregate functions on select
  • -
-
-
-

Listing and Describing Virtual Tables

-

Virtual tables in a virtual keyspace may be listed with DESC TABLES. The system_views virtual keyspace tables include the following:

-
cqlsh> USE system_views;
-cqlsh:system_views> DESC TABLES;
-coordinator_scans   clients             tombstones_scanned  internode_inbound
-disk_usage          sstable_tasks       live_scanned        caches
-local_writes        max_partition_size  local_reads
-coordinator_writes  internode_outbound  thread_pools
-local_scans         coordinator_reads   settings
-
-
-

Some of the salient virtual tables in system_views virtual keyspace are described in Table 1.

-

Table 1 : Virtual Tables in system_views

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Virtual TableDescription
clientsLists information about all connected clients.
disk_usageDisk usage including disk_space, keyspace_name, -and table_name by system keyspaces.
local_writesA table metric for local writes -including count, keyspace_name, -max, median, per_second, and -table_name.
cachesDisplays the general cache information including -cache name, capacity_bytes, entry_count, hit_count, -hit_ratio double, recent_hit_rate_per_second, -recent_request_rate_per_second, request_count, and -size_bytes.
local_readsA table metric for local reads information.
sstable_tasksLists currently running tasks such as compactions -and upgrades on SSTables.
internode_inboundLists information about the inbound -internode messaging.
thread_poolsLists metrics for each thread pool.
settingsDisplays configuration settings in cassandra.yaml.
max_partition_sizeA table metric for maximum partition size.
internode_outboundInformation about the outbound internode messaging.
-

We shall discuss some of the virtual tables in more detail next.

-
-

Clients Virtual Table

-

The clients virtual table lists all active connections (connected clients) including their ip address, port, connection stage, driver name, driver version, hostname, protocol version, request count, ssl enabled, ssl protocol and user name:

-
cqlsh:system_views> select * from system_views.clients;
- address   | port  | connection_stage | driver_name | driver_version | hostname  | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username
------------+-------+------------------+-------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+-----------
- 127.0.0.1 | 50628 |            ready |        null |           null | localhost |                4 |            55 |             null |       False |         null | anonymous
- 127.0.0.1 | 50630 |            ready |        null |           null | localhost |                4 |            70 |             null |       False |         null | anonymous
-
-(2 rows)
-
-
-

Some examples of how clients can be used are:

-
    -
  • To find applications using old incompatible versions of drivers before upgrading and with nodetool enableoldprotocolversions and nodetool disableoldprotocolversions during upgrades.
  • -
  • To identify clients sending too many requests.
  • -
  • To find if SSL is enabled during the migration to and from ssl.
  • -
-

The virtual tables may be described with DESCRIBE statement. The DDL listed however cannot be run to create a virtual table. As an example describe the system_views.clients virtual table:

-
 cqlsh:system_views> DESC TABLE system_views.clients;
-CREATE TABLE system_views.clients (
-   address inet,
-   connection_stage text,
-   driver_name text,
-   driver_version text,
-   hostname text,
-   port int,
-   protocol_version int,
-   request_count bigint,
-   ssl_cipher_suite text,
-   ssl_enabled boolean,
-   ssl_protocol text,
-   username text,
-   PRIMARY KEY (address, port)) WITH CLUSTERING ORDER BY (port ASC)
-   AND compaction = {'class': 'None'}
-   AND compression = {};
-
-
-
-
-

Caches Virtual Table

-

The caches virtual table lists information about the caches. The four caches presently created are chunks, counters, keys and rows. A query on the caches virtual table returns the following details:

-
cqlsh:system_views> SELECT * FROM system_views.caches;
-name     | capacity_bytes | entry_count | hit_count | hit_ratio | recent_hit_rate_per_second | recent_request_rate_per_second | request_count | size_bytes
----------+----------------+-------------+-----------+-----------+----------------------------+--------------------------------+---------------+------------
-  chunks |      229638144 |          29 |       166 |      0.83 |                          5 |                              6 |           200 |     475136
-counters |       26214400 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
-    keys |       52428800 |          14 |       124 |  0.873239 |                          4 |                              4 |           142 |       1248
-    rows |              0 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
-
-(4 rows)
-
-
-
-
-

Settings Virtual Table

-

The settings table is rather useful and lists all the current configuration settings from the cassandra.yaml. The encryption options are overridden to hide the sensitive truststore information or passwords. The configuration settings however cannot be set using DML on the virtual table presently:

-
cqlsh:system_views> SELECT * FROM system_views.settings;
-
-name                                 | value
--------------------------------------+--------------------
-  allocate_tokens_for_keyspace       | null
-  audit_logging_options_enabled      | false
-  auto_snapshot                      | true
-  automatic_sstable_upgrade          | false
-  cluster_name                       | Test Cluster
-  enable_transient_replication       | false
-  hinted_handoff_enabled             | true
-  hints_directory                    | /home/ec2-user/cassandra/data/hints
-  incremental_backups                | false
-  initial_token                      | null
-                           ...
-                           ...
-                           ...
-  rpc_address                        | localhost
-  ssl_storage_port                   | 7001
-  start_native_transport             | true
-  storage_port                       | 7000
-  stream_entire_sstables             | true
-  (224 rows)
-
-
-

The settings table can be really useful if yaml file has been changed since startup and dont know running configuration, or to find if they have been modified via jmx/nodetool or virtual tables.

-
-
-

Thread Pools Virtual Table

-

The thread_pools table lists information about all thread pools. Thread pool information includes active tasks, active tasks limit, blocked tasks, blocked tasks all time, completed tasks, and pending tasks. A query on the thread_pools returns following details:

-
cqlsh:system_views> select * from system_views.thread_pools;
-
-name                         | active_tasks | active_tasks_limit | blocked_tasks | blocked_tasks_all_time | completed_tasks | pending_tasks
-------------------------------+--------------+--------------------+---------------+------------------------+-----------------+---------------
-            AntiEntropyStage |            0 |                  1 |             0 |                      0 |               0 |             0
-        CacheCleanupExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
-          CompactionExecutor |            0 |                  2 |             0 |                      0 |             881 |             0
-        CounterMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-                 GossipStage |            0 |                  1 |             0 |                      0 |               0 |             0
-             HintsDispatcher |            0 |                  2 |             0 |                      0 |               0 |             0
-       InternalResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
-         MemtableFlushWriter |            0 |                  2 |             0 |                      0 |               1 |             0
-           MemtablePostFlush |            0 |                  1 |             0 |                      0 |               2 |             0
-       MemtableReclaimMemory |            0 |                  1 |             0 |                      0 |               1 |             0
-              MigrationStage |            0 |                  1 |             0 |                      0 |               0 |             0
-                   MiscStage |            0 |                  1 |             0 |                      0 |               0 |             0
-               MutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-   Native-Transport-Requests |            1 |                128 |             0 |                      0 |             130 |             0
-      PendingRangeCalculator |            0 |                  1 |             0 |                      0 |               1 |             0
-PerDiskMemtableFlushWriter_0 |            0 |                  2 |             0 |                      0 |               1 |             0
-                   ReadStage |            0 |                 32 |             0 |                      0 |              13 |             0
-                 Repair-Task |            0 |         2147483647 |             0 |                      0 |               0 |             0
-        RequestResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
-                     Sampler |            0 |                  1 |             0 |                      0 |               0 |             0
-    SecondaryIndexManagement |            0 |                  1 |             0 |                      0 |               0 |             0
-          ValidationExecutor |            0 |         2147483647 |             0 |                      0 |               0 |             0
-           ViewBuildExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
-           ViewMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-
-
-

(24 rows)

-
-
-

Internode Inbound Messaging Virtual Table

-

The internode_inbound virtual table is for the internode inbound messaging. Initially no internode inbound messaging may get listed. In addition to the address, port, datacenter and rack information includes corrupt frames recovered, corrupt frames unrecovered, error bytes, error count, expired bytes, expired count, processed bytes, processed count, received bytes, received count, scheduled bytes, scheduled count, throttled count, throttled nanos, using bytes, using reserve bytes. A query on the internode_inbound returns following details:

-
cqlsh:system_views> SELECT * FROM system_views.internode_inbound;
-address | port | dc | rack | corrupt_frames_recovered | corrupt_frames_unrecovered |
-error_bytes | error_count | expired_bytes | expired_count | processed_bytes |
-processed_count | received_bytes | received_count | scheduled_bytes | scheduled_count | throttled_count | throttled_nanos | using_bytes | using_reserve_bytes
----------+------+----+------+--------------------------+----------------------------+-
-----------
-(0 rows)
-
-
-
-
-

SSTables Tasks Virtual Table

-

The sstable_tasks could be used to get information about running tasks. It lists following columns:

-
cqlsh:system_views> SELECT * FROM sstable_tasks;
-keyspace_name | table_name | task_id                              | kind       | progress | total    | unit
----------------+------------+--------------------------------------+------------+----------+----------+-------
-       basic |      wide2 | c3909740-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 60418761 | 70882110 | bytes
-       basic |      wide2 | c7556770-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction |  2995623 | 40314679 | bytes
-
-
-

As another example, to find how much time is remaining for SSTable tasks, use the following query:

-
SELECT total - progress AS remaining
-FROM system_views.sstable_tasks;
-
-
-
-
-

Other Virtual Tables

-

Some examples of using other virtual tables are as follows.

-

Find tables with most disk usage:

-
cqlsh> SELECT * FROM disk_usage WHERE mebibytes > 1 ALLOW FILTERING;
-
-keyspace_name | table_name | mebibytes
----------------+------------+-----------
-   keyspace1 |  standard1 |       288
-  tlp_stress |   keyvalue |      3211
-
-
-

Find queries on table/s with greatest read latency:

-
cqlsh> SELECT * FROM  local_read_latency WHERE per_second > 1 ALLOW FILTERING;
-
-keyspace_name | table_name | p50th_ms | p99th_ms | count    | max_ms  | per_second
----------------+------------+----------+----------+----------+---------+------------
-  tlp_stress |   keyvalue |    0.043 |    0.152 | 49785158 | 186.563 |  11418.356
-
-
-
-
-
-

The system_virtual_schema keyspace

-

The system_virtual_schema keyspace has three tables: keyspaces, columns and tables for the virtual keyspace definitions, virtual table definitions, and virtual column definitions respectively. It is used by Cassandra internally and a user would not need to access it directly.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/objects.inv b/src/doc/4.0-alpha4/objects.inv deleted file mode 100644 index 44c041f77..000000000 Binary files a/src/doc/4.0-alpha4/objects.inv and /dev/null differ diff --git a/src/doc/4.0-alpha4/operating/audit_logging.html b/src/doc/4.0-alpha4/operating/audit_logging.html deleted file mode 100644 index 1c25f459c..000000000 --- a/src/doc/4.0-alpha4/operating/audit_logging.html +++ /dev/null @@ -1,281 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Audit Logging" -doc-header-links: ' - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml.

-
    -
  • BinAuditLogger An efficient way to log events to file in a binary format.
  • -
  • FileAuditLogger Logs events to audit/audit.log file using slf4j logger.
  • -
-

Recommendation BinAuditLogger is a community recommended logger considering the performance

-
-

What does it capture

-

Audit logging captures following events

-
    -
  • Successful as well as unsuccessful login attempts.
  • -
  • All database commands executed via Native protocol (CQL) attempted or successfully executed.
  • -
-
-
-

Limitations

-

Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log.

-
-
-

What does it log

-

Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with | s to yield the final message.

-
-
    -
  • user: User name(if available)
  • -
  • host: Host IP, where the command is being executed
  • -
  • source ip address: Source IP address from where the request initiated
  • -
  • source port: Source port number from where the request initiated
  • -
  • timestamp: unix time stamp
  • -
  • type: Type of the request (SELECT, INSERT, etc.,)
  • -
  • category - Category of the request (DDL, DML, etc.,)
  • -
  • keyspace - Keyspace(If applicable) on which request is targeted to be executed
  • -
  • scope - Table/Aggregate name/ function name/ trigger name etc., as applicable
  • -
  • operation - CQL command being executed
  • -
-
-
-
-

How to configure

-

Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using nodetool.

-
-

cassandra.yaml configurations for AuditLog

-
-
    -
  • enabled: This option enables/ disables audit log
  • -
  • logger: Class name of the logger/ custom logger.
  • -
  • audit_logs_dir: Auditlogs directory location, if not set, default to cassandra.logdir.audit or cassandra.logdir + /audit/
  • -
  • included_keyspaces: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces
  • -
  • excluded_keyspaces: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except system, system_schema and system_virtual_schema
  • -
  • included_categories: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories
  • -
  • excluded_categories: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category
  • -
  • included_users: Comma separated list of users to be included in audit log, default - includes all users
  • -
  • excluded_users: Comma separated list of users to be excluded from audit log, default - excludes no user
  • -
-
-

List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE

-
-
-

NodeTool command to enable AuditLog

-

enableauditlog: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command.

-
nodetool enableauditlog
-
-
-
-

Options

-
-
--excluded-categories
-
Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
-
--excluded-keyspaces
-
Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used. -Please remeber that system, system_schema and system_virtual_schema are excluded by default, -if you are overwriting this option via nodetool, -remember to add these keyspaces back if you dont want them in audit logs
-
--excluded-users
-
Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
-
--included-categories
-
Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
-
--included-keyspaces
-
Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
-
--included-users
-
Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
-
--logger
-
Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
-
-
-
-
-

NodeTool command to disable AuditLog

-

disableauditlog: Disables AuditLog.

-
nodetool disableuditlog
-
-
-
-
-

NodeTool command to reload AuditLog filters

-

enableauditlog: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous loggername and updated filters

-

E.g.,

-
nodetool enableauditlog --loggername <Default/ existing loggerName> --included-keyspaces <New Filter values>
-
-
-
-
-
-

View the contents of AuditLog Files

-

auditlogviewer is the new tool introduced to help view the contents of binlog file in human readable text format.

-
auditlogviewer <path1> [<path2>...<pathN>] [options]
-
-
-
-

Options

-
-
-f,--follow
-
-
Upon reacahing the end of the log continue indefinitely
-
waiting for more records
-
-
-
-r,--roll_cycle
-
-
How often to roll the log file was rolled. May be
-
necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, -DAILY). Default HOURLY.
-
-
-
-h,--help
-
display this help message
-
-

For example, to dump the contents of audit log files on the console

-
auditlogviewer /logs/cassandra/audit
-
-
-
-
-

Sample output

-
LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1"
-
-
-
-
-
-

Configuring BinAuditLogger

-

To use BinAuditLogger as a logger in AuditLogging, set the logger to BinAuditLogger in cassandra.yaml under audit_logging_options section. BinAuditLogger can be futher configued using its advanced options in cassandra.yaml.

-
-

Adcanced Options for BinAuditLogger

-
-
block
-
Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to true so that AuditLog records wont be lost
-
max_queue_weight
-
Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to 256 * 1024 * 1024
-
max_log_size
-
Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to 16L * 1024L * 1024L * 1024L
-
roll_cycle
-
How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to "HOURLY"
-
-
-
-
-

Configuring FileAuditLogger

-

To use FileAuditLogger as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log

-
<!-- Audit Logging (FileAuditLogger) rolling file appender to audit.log -->
-<appender name="AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
-  <file>${cassandra.logdir}/audit/audit.log</file>
-  <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
-    <!-- rollover daily -->
-    <fileNamePattern>${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
-    <!-- each file should be at most 50MB, keep 30 days worth of history, but at most 5GB -->
-    <maxFileSize>50MB</maxFileSize>
-    <maxHistory>30</maxHistory>
-    <totalSizeCap>5GB</totalSizeCap>
-  </rollingPolicy>
-  <encoder>
-    <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-  </encoder>
-</appender>
-
-<!-- Audit Logging additivity to redirect audt logging events to audit/audit.log -->
-<logger name="org.apache.cassandra.audit" additivity="false" level="INFO">
-        <appender-ref ref="AUDIT"/>
-</logger>
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/backups.html b/src/doc/4.0-alpha4/operating/backups.html deleted file mode 100644 index 53393fcf7..000000000 --- a/src/doc/4.0-alpha4/operating/backups.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Backups

-

Apache Cassandra stores data in immutable SSTable files. Backups in Apache Cassandra database are backup copies of the database data that is stored as SSTable files. Backups are used for several purposes including the following:

-
    -
  • To store a data copy for durability
  • -
  • To be able to restore a table if table data is lost due to node/partition/network failure
  • -
  • To be able to transfer the SSTable files to a different machine; for portability
  • -
-
-

Types of Backups

-

Apache Cassandra supports two kinds of backup strategies.

-
    -
  • Snapshots
  • -
  • Incremental Backups
  • -
-

A snapshot is a copy of a table’s SSTable files at a given time, created via hard links. The DDL to create the table is stored as well. Snapshots may be created by a user or created automatically. -The setting (snapshot_before_compaction) in cassandra.yaml determines if snapshots are created before each compaction. -By default snapshot_before_compaction is set to false. -Snapshots may be created automatically before keyspace truncation or dropping of a table by setting auto_snapshot to true (default) in cassandra.yaml. -Truncates could be delayed due to the auto snapshots and another setting in cassandra.yaml determines how long the coordinator should wait for truncates to complete. -By default Cassandra waits 60 seconds for auto snapshots to complete.

-

An incremental backup is a copy of a table’s SSTable files created by a hard link when memtables are flushed to disk as SSTables. -Typically incremental backups are paired with snapshots to reduce the backup time as well as reduce disk space. -Incremental backups are not enabled by default and must be enabled explicitly in cassandra.yaml (with incremental_backups setting) or with the Nodetool. -Once enabled, Cassandra creates a hard link to each SSTable flushed or streamed locally in a backups/ subdirectory of the keyspace data. Incremental backups of system tables are also created.

-
-
-

Data Directory Structure

-

The directory structure of Cassandra data consists of different directories for keyspaces, and tables with the data files within the table directories. Directories backups and snapshots to store backups and snapshots respectively for a particular table are also stored within the table directory. The directory structure for Cassandra is illustrated in Figure 1.

-
-../_images/Figure_1_backups.jpg -
-

Figure 1. Directory Structure for Cassandra Data

-
-

Setting Up Example Tables for Backups and Snapshots

-

In this section we shall create some example data that could be used to demonstrate incremental backups and snapshots. We have used a three node Cassandra cluster. -First, the keyspaces are created. Subsequently tables are created within a keyspace and table data is added. We have used two keyspaces CQLKeyspace and CatalogKeyspace with two tables within each. -Create CQLKeyspace:

-
cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Create table t in the CQLKeyspace keyspace.

-
cqlsh> USE CQLKeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Add data to table t:

-
cqlsh:cqlkeyspace>
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-
-
-

A table query lists the data:

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
-
- (2 rows)
-
-
-

Create another table t2:

-
cqlsh:cqlkeyspace> CREATE TABLE t2 (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Add data to table t2:

-
cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (1, 1, 'val1');
-cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (2, 2, 'val2');
-
-
-

A table query lists table data:

-
cqlsh:cqlkeyspace> SELECT * FROM t2;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
- 2 | 2 | val2
-
- (3 rows)
-
-
-

Create a second keyspace CatalogKeyspace:

-
cqlsh:cqlkeyspace> CREATE KEYSPACE CatalogKeyspace
-              ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Create a table called journal in CatalogKeyspace:

-
cqlsh:cqlkeyspace> USE CatalogKeyspace;
-cqlsh:catalogkeyspace> CREATE TABLE journal (
-                  ...     id int,
-                  ...     name text,
-                  ...     publisher text,
-                  ...     PRIMARY KEY (id)
-                  ... );
-
-
-

Add data to table journal:

-
cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (0, 'Apache
-Cassandra Magazine', 'Apache Cassandra');
-cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (1, 'Couchbase
-Magazine', 'Couchbase');
-
-
-

Query table journal to list its data:

-
cqlsh:catalogkeyspace> SELECT * FROM journal;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
- (2 rows)
-
-
-

Add another table called magazine:

-
cqlsh:catalogkeyspace> CREATE TABLE magazine (
-                  ...     id int,
-                  ...     name text,
-                  ...     publisher text,
-                  ...     PRIMARY KEY (id)
-                  ... );
-
-
-

Add table data to magazine:

-
cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (0, 'Apache
-Cassandra Magazine', 'Apache Cassandra');
-cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (1, 'Couchbase
-Magazine', 'Couchbase');
-
-
-

List table magazine’s data:

-
cqlsh:catalogkeyspace> SELECT * from magazine;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
- (2 rows)
-
-
-
-
-
-

Snapshots

-

In this section including sub-sections we shall demonstrate creating snapshots. The command used to create a snapshot is nodetool snapshot and its usage is as follows:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool help snapshot
-NAME
-       nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-       of the specified table
-
-SYNOPSIS
-       nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-               [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-               [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-               [(-u <username> | --username <username>)] snapshot
-               [(-cf <table> | --column-family <table> | --table <table>)]
-               [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-               [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-       -cf <table>, --column-family <table>, --table <table>
-           The table name (you must specify one and only one keyspace for using
-           this option)
-
-       -h <host>, --host <host>
-           Node hostname or ip address
-
-       -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-           The list of Keyspace.table to take snapshot.(you must not specify
-           only keyspace)
-
-       -p <port>, --port <port>
-           Remote jmx agent port number
-
-       -pp, --print-port
-           Operate in 4.0 mode with hosts disambiguated by port number
-
-       -pw <password>, --password <password>
-           Remote jmx agent password
-
-       -pwf <passwordFilePath>, --password-file <passwordFilePath>
-           Path to the JMX password file
-
-       -sf, --skip-flush
-           Do not flush memtables before snapshotting (snapshot will not
-           contain unflushed data)
-
-       -t <tag>, --tag <tag>
-           The name of the snapshot
-
-       -u <username>, --username <username>
-           Remote jmx agent username
-
-       --
-           This option can be used to separate command-line options from the
-           list of argument, (useful when arguments might be mistaken for
-           command-line options
-
-       [<keyspaces...>]
-           List of keyspaces. By default, all keyspaces
-
-
-
-

Configuring for Snapshots

-

To demonstrate creating snapshots with Nodetool on the commandline we have set -auto_snapshots setting to false in cassandra.yaml:

-
auto_snapshot: false
-
-
-

Also set snapshot_before_compaction to false to disable creating snapshots automatically before compaction:

-
snapshot_before_compaction: false
-
-
-
-
-

Creating Snapshots

-

To demonstrate creating snapshots start with no snapshots. Search for snapshots and none get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-
-
-

We shall be using the example keyspaces and tables to create snapshots.

-
-

Taking Snapshots of all Tables in a Keyspace

-

To take snapshots of all tables in a keyspace and also optionally tag the snapshot the syntax becomes:

-
nodetool snapshot --tag <tag>  --<keyspace>
-
-
-

As an example create a snapshot called catalog-ks for all the tables in the catalogkeyspace keyspace:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag catalog-ks -- catalogkeyspace
-Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [catalog-ks] and
-options {skipFlush=false}
-Snapshot directory: catalog-ks
-
-
-

Search for snapshots and snapshots directories for the tables journal and magazine, which are in the catalogkeyspace keyspace should get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots
-
-
-

Snapshots of all tables in multiple keyspaces may be created similarly, as an example:

-
nodetool snapshot --tag catalog-cql-ks --catalogkeyspace,cqlkeyspace
-
-
-
-
-

Taking Snapshots of Single Table in a Keyspace

-

To take a snapshot of a single table the nodetool snapshot command syntax becomes as follows:

-
nodetool snapshot --tag <tag> --table <table>  --<keyspace>
-
-
-

As an example create a snapshot for table magazine in keyspace catalokeyspace:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag magazine --table magazine  --
-catalogkeyspace
-Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [magazine] and
-options {skipFlush=false}
-Snapshot directory: magazine
-
-
-
-
-

Taking Snapshot of Multiple Tables from same Keyspace

-

To take snapshots of multiple tables in a keyspace the list of Keyspace.table must be specified with option --kt-list. As an example create snapshots for tables t and t2 in the cqlkeyspace keyspace:

-
nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag multi-table
-[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag
-multi-table
-Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi-
-table] and options {skipFlush=false}
-Snapshot directory: multi-table
-
-
-

Multiple snapshots of the same set of tables may be created and tagged with a different name. As an example, create another snapshot for the same set of tables t and t2 in the cqlkeyspace keyspace and tag the snapshots differently:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag
-multi-table-2
-Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi-
-table-2] and options {skipFlush=false}
-Snapshot directory: multi-table-2
-
-
-
-
-

Taking Snapshot of Multiple Tables from Different Keyspaces

-

To take snapshots of multiple tables that are in different keyspaces the command syntax is the same as when multiple tables are in the same keyspace. Each keyspace.table must be specified separately in the --kt-list option. As an example, create a snapshot for table t in the cqlkeyspace and table journal in the catalogkeyspace and tag the snapshot multi-ks.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list
-catalogkeyspace.journal,cqlkeyspace.t --tag multi-ks
-Requested creating snapshot(s) for [catalogkeyspace.journal,cqlkeyspace.t] with snapshot
-name [multi-ks] and options {skipFlush=false}
-Snapshot directory: multi-ks
-
-
-
-
-
-

Listing Snapshots

-

To list snapshots use the nodetool listsnapshots command. All the snapshots that we created in the preceding examples get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool listsnapshots
-Snapshot Details:
-Snapshot name Keyspace name   Column family name True size Size on disk
-multi-table   cqlkeyspace     t2                 4.86 KiB  5.67 KiB
-multi-table   cqlkeyspace     t                  4.89 KiB  5.7 KiB
-multi-ks      cqlkeyspace     t                  4.89 KiB  5.7 KiB
-multi-ks      catalogkeyspace journal            4.9 KiB   5.73 KiB
-magazine      catalogkeyspace magazine           4.9 KiB   5.73 KiB
-multi-table-2 cqlkeyspace     t2                 4.86 KiB  5.67 KiB
-multi-table-2 cqlkeyspace     t                  4.89 KiB  5.7 KiB
-catalog-ks    catalogkeyspace journal            4.9 KiB   5.73 KiB
-catalog-ks    catalogkeyspace magazine           4.9 KiB   5.73 KiB
-
-Total TrueDiskSpaceUsed: 44.02 KiB
-
-
-
-
-

Finding Snapshots Directories

-

The snapshots directories may be listed with find –name snapshots command:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/snapshots
-./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

To list the snapshots for a particular table first change directory ( with cd) to the snapshots directory for the table. As an example, list the snapshots for the catalogkeyspace/journal table. Two snapshots get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/journal-
-296a2d30c22a11e9b1350d927649052c/snapshots
-[ec2-user@ip-10-0-2-238 snapshots]$ ls -l
-total 0
-drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:44 catalog-ks
-drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:52 multi-ks
-
-
-

A snapshots directory lists the SSTable files in the snapshot. Schema.cql file is also created in each snapshot for the schema definition DDL that may be run in CQL to create the table when restoring from a snapshot:

-
[ec2-user@ip-10-0-2-238 snapshots]$ cd catalog-ks
-[ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user   31 Aug 19 02:44 manifest.jsonZ
-
--rw-rw-r--. 4 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 4 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 4 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 4 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 4 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 4 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 4 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 4 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
--rw-rw-r--. 1 ec2-user ec2-user  814 Aug 19 02:44 schema.cql
-
-
-
-
-

Clearing Snapshots

-

Snapshots may be cleared or deleted with the nodetool clearsnapshot command. Either a specific snapshot name must be specified or the –all option must be specified. -As an example delete a snapshot called magazine from keyspace cqlkeyspace:

-
nodetool clearsnapshot -t magazine – cqlkeyspace
-Delete all snapshots from cqlkeyspace with the –all option.
-nodetool clearsnapshot –all -- cqlkeyspace
-
-
-
-
-
-

Incremental Backups

-

In the following sub-sections we shall discuss configuring and creating incremental backups.

-
-

Configuring for Incremental Backups

-

To create incremental backups set incremental_backups to true in cassandra.yaml.

-
incremental_backups: true
-
-
-

This is the only setting needed to create incremental backups. By default incremental_backups setting is set to false because a new set of SSTable files is created for each data flush and if several CQL statements are to be run the backups directory could fill up quickly and use up storage that is needed to store table data. -Incremental backups may also be enabled on the command line with the Nodetool command nodetool enablebackup. Incremental backups may be disabled with nodetool disablebackup command. Status of incremental backups, whether they are enabled may be found with nodetool statusbackup.

-
-
-

Creating Incremental Backups

-

After each table is created flush the table data with nodetool flush command. Incremental backups get created.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t2
-[ec2-user@ip-10-0-2-238 ~]$ nodetool flush catalogkeyspace journal magazine
-
-
-
-
-

Finding Incremental Backups

-

Incremental backups are created within the Cassandra’s data directory within a table directory. Backups may be found with following command.

-
[ec2-user@ip-10-0-2-238 ~]$ find -name backups
-
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/backups
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/backups
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups
-
-
-
-
-

Creating an Incremental Backup

-

This section discusses how incremental backups are created in more detail starting with when a new keyspace is created and a table is added. Create a keyspace called CQLKeyspace (arbitrary name).

-
cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}
-
-
-

Create a table called t within the CQLKeyspace keyspace:

-
cqlsh> USE CQLKeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Flush the keyspace and table:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-
-
-

Search for backups and a backups directory should get listed even though we have added no table data yet.

-
[ec2-user@ip-10-0-2-238 ~]$ find -name backups
-
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-
-
-

Change directory to the backups directory and list files and no files get listed as no table data has been added yet:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 0
-
-
-

Next, add a row of data to table t that we created:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-
-
-

Run the nodetool flush command to flush table data:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-
-
-

List the files and directories in the backups directory and SSTable files for an incremental backup get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 36
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:32 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   43 Aug 19 00:32 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:32 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:32 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:32 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:32 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:32 na-1-big-TOC.txt
-
-
-

Add another row of data:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-
-
-

Again, run the nodetool flush command:

-
[ec2-user@ip-10-0-2-238 backups]$  nodetool flush cqlkeyspace t
-
-
-

A new incremental backup gets created for the new data added. List the files in the backups directory for table t and two sets of SSTable files get listed, one for each incremental backup. The SSTable files are timestamped, which distinguishes the first incremental backup from the second:

-
[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 72
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:32 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   43 Aug 19 00:32 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:32 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:32 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:32 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:32 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:32 na-1-big-TOC.txt
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:35 na-2-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   41 Aug 19 00:35 na-2-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:35 na-2-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:35 na-2-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:35 na-2-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:35 na-2-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:35 na-2-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:35 na-2-big-TOC.txt
-[ec2-user@ip-10-0-2-238 backups]$
-
-
-

The backups directory for table cqlkeyspace/t is created within the data directory for the table:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330
-[ec2-user@ip-10-0-2-238 t-d132e240c21711e9bbee19821dcea330]$ ls -l
-total 36
-drwxrwxr-x. 2 ec2-user ec2-user  226 Aug 19 02:30 backups
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 02:30 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   79 Aug 19 02:30 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 02:30 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:30 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:30 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4696 Aug 19 02:30 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 02:30 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 02:30 na-1-big-TOC.txt
-
-
-

The incremental backups for the other keyspaces/tables get created similarly. As an example the backups directory for table catalogkeyspace/magazine is created within the data directory:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c
-[ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l
-total 36
-drwxrwxr-x. 2 ec2-user ec2-user  226 Aug 19 02:38 backups
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
-
-
-
-
-
-

Restoring from Incremental Backups and Snapshots

-

The two main tools/commands for restoring a table after it has been dropped are:

-
    -
  • sstableloader
  • -
  • nodetool import
  • -
-

A snapshot contains essentially the same set of SSTable files as an incremental backup does with a few additional files. A snapshot includes a schema.cql file for the schema DDL to create a table in CQL. A table backup does not include DDL which must be obtained from a snapshot when restoring from an incremental backup.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/bloom_filters.html b/src/doc/4.0-alpha4/operating/bloom_filters.html deleted file mode 100644 index 7a933d73f..000000000 --- a/src/doc/4.0-alpha4/operating/bloom_filters.html +++ /dev/null @@ -1,162 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/bulk_loading.html b/src/doc/4.0-alpha4/operating/bulk_loading.html deleted file mode 100644 index f73b281a9..000000000 --- a/src/doc/4.0-alpha4/operating/bulk_loading.html +++ /dev/null @@ -1,680 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bulk Loading

-

Bulk loading of data in Apache Cassandra is supported by different tools. The data to be bulk loaded must be in the form of SSTables. Cassandra does not support loading data in any other format such as CSV, JSON, and XML directly. Bulk loading could be used to:

-
    -
  • Restore incremental backups and snapshots. Backups and snapshots are already in the form of SSTables.
  • -
  • Load existing SSTables into another cluster, which could have a different number of nodes or replication strategy.
  • -
  • Load external data into a cluster
  • -
-

**Note*: CSV Data can be loaded via the cqlsh COPY command but we do not recommend this for bulk loading, which typically requires many GB or TB of data.

-
-

Tools for Bulk Loading

-

Cassandra provides two commands or tools for bulk loading data. These are:

-
    -
  • Cassandra Bulk loader, also called sstableloader
  • -
  • The nodetool import command
  • -
-

The sstableloader and nodetool import are accessible if the Cassandra installation bin directory is in the PATH environment variable. Or these may be accessed directly from the bin directory. We shall discuss each of these next. We shall use the example or sample keyspaces and tables created in the Backups section.

-
-
-

Using sstableloader

-

The sstableloader is the main tool for bulk uploading data. The sstableloader streams SSTable data files to a running cluster. The sstableloader loads data conforming to the replication strategy and replication factor. The table to upload data to does need not to be empty.

-

The only requirements to run sstableloader are:

-
    -
  1. One or more comma separated initial hosts to connect to and get ring information.
  2. -
  3. A directory path for the SSTables to load.
  4. -
-

Its usage is as follows.

-
sstableloader [options] <dir_path>
-
-
-

Sstableloader bulk loads the SSTables found in the directory <dir_path> to the configured cluster. The <dir_path> is used as the target keyspace/table name. As an example, to load an SSTable named -Standard1-g-1-Data.db into Keyspace1/Standard1, you will need to have the -files Standard1-g-1-Data.db and Standard1-g-1-Index.db in a directory /path/to/Keyspace1/Standard1/.

-
-

Sstableloader Option to accept Target keyspace name

-

Often as part of a backup strategy some Cassandra DBAs store an entire data directory. When corruption in data is found then they would like to restore data in the same cluster (for large clusters 200 nodes) but with different keyspace name.

-

Currently sstableloader derives keyspace name from the folder structure. As an option to specify target keyspace name as part of sstableloader, version 4.0 adds support for the --target-keyspace option (CASSANDRA-13884).

-

The supported options are as follows from which only -d,--nodes <initial hosts> is required.

-
-alg,--ssl-alg <ALGORITHM>                                   Client SSL: algorithm
-
--ap,--auth-provider <auth provider>                          Custom
-                                                             AuthProvider class name for
-                                                             cassandra authentication
--ciphers,--ssl-ciphers <CIPHER-SUITES>                       Client SSL:
-                                                             comma-separated list of
-                                                             encryption suites to use
--cph,--connections-per-host <connectionsPerHost>             Number of
-                                                             concurrent connections-per-host.
--d,--nodes <initial hosts>                                   Required.
-                                                             Try to connect to these hosts (comma separated) initially for ring information
-
--f,--conf-path <path to config file>                         cassandra.yaml file path for streaming throughput and client/server SSL.
-
--h,--help                                                    Display this help message
-
--i,--ignore <NODES>                                          Don't stream to this (comma separated) list of nodes
-
--idct,--inter-dc-throttle <inter-dc-throttle>                Inter-datacenter throttle speed in Mbits (default unlimited)
-
--k,--target-keyspace <target keyspace name>                  Target
-                                                             keyspace name
--ks,--keystore <KEYSTORE>                                    Client SSL:
-                                                             full path to keystore
--kspw,--keystore-password <KEYSTORE-PASSWORD>                Client SSL:
-                                                             password of the keystore
---no-progress                                                Don't
-                                                             display progress
--p,--port <native transport port>                            Port used
-                                                             for native connection (default 9042)
--prtcl,--ssl-protocol <PROTOCOL>                             Client SSL:
-                                                             connections protocol to use (default: TLS)
--pw,--password <password>                                    Password for
-                                                             cassandra authentication
--sp,--storage-port <storage port>                            Port used
-                                                             for internode communication (default 7000)
--spd,--server-port-discovery <allow server port discovery>   Use ports
-                                                             published by server to decide how to connect. With SSL requires StartTLS
-                                                             to be used.
--ssp,--ssl-storage-port <ssl storage port>                   Port used
-                                                             for TLS internode communication (default 7001)
--st,--store-type <STORE-TYPE>                                Client SSL:
-                                                             type of store
--t,--throttle <throttle>                                     Throttle
-                                                             speed in Mbits (default unlimited)
--ts,--truststore <TRUSTSTORE>                                Client SSL:
-                                                             full path to truststore
--tspw,--truststore-password <TRUSTSTORE-PASSWORD>            Client SSL:
-                                                             Password of the truststore
--u,--username <username>                                     Username for
-                                                             cassandra authentication
--v,--verbose                                                 verbose
-                                                             output
-
-
-

The cassandra.yaml file could be provided on the command-line with -f option to set up streaming throughput, client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

A sstableloader Demo

-

We shall demonstrate using sstableloader by uploading incremental backup data for table catalogkeyspace.magazine. We shall also use a snapshot of the same table to bulk upload in a different run of sstableloader. The backups and snapshots for the catalogkeyspace.magazine table are listed as follows.

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c
-[ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l
-total 0
-drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups
-drwxrwxr-x. 4 ec2-user ec2-user  40 Aug 19 02:45 snapshots
-
-
-

The directory path structure of SSTables to be uploaded using sstableloader is used as the target keyspace/table.

-

We could have directly uploaded from the backups and snapshots directories respectively if the directory structure were in the format used by sstableloader. But the directory path of backups and snapshots for SSTables is /catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups and /catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots respectively, which cannot be used to upload SSTables to catalogkeyspace.magazine table. The directory path structure must be /catalogkeyspace/magazine/ to use sstableloader. We need to create a new directory structure to upload SSTables with sstableloader which is typical when using sstableloader. Create a directory structure /catalogkeyspace/magazine and set its permissions.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine
-
-
-
-

Bulk Loading from an Incremental Backup

-

An incremental backup does not include the DDL for a table. The table must already exist. If the table was dropped it may be created using the schema.cql generated with every snapshot of a table. As we shall be using sstableloader to load SSTables to the magazine table, the table must exist prior to running sstableloader. The table does not need to be empty but we have used an empty table as indicated by a CQL query:

-
cqlsh:catalogkeyspace> SELECT * FROM magazine;
-
-id | name | publisher
-----+------+-----------
-
-(0 rows)
-
-
-

After the table to upload has been created copy the SSTable files from the backups directory to the /catalogkeyspace/magazine/ directory that we created.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c/backups/* /catalogkeyspace/magazine/
-
-
-

Run the sstableloader to upload SSTables from the /catalogkeyspace/magazine/ directory.

-
sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-
-

The output from the sstableloader command should be similar to the listed:

-
[ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-Opening SSTables and calculating sections to stream
-Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db
-/catalogkeyspace/magazine/na-2-big-Data.db  to [35.173.233.153:7000, 10.0.2.238:7000,
-54.158.45.75:7000]
-progress: [35.173.233.153:7000]0:1/2 88 % total: 88% 0.018KiB/s (avg: 0.018KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% total: 176% 33.807KiB/s (avg: 0.036KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% total: 176% 0.000KiB/s (avg: 0.029KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:1/2 39 % total: 81% 0.115KiB/s
-(avg: 0.024KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % total: 108%
-97.683KiB/s (avg: 0.033KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:1/2 39 % total: 80% 0.233KiB/s (avg: 0.040KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 88.522KiB/s (avg: 0.049KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.045KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.044KiB/s)
-
-
-

After the sstableloader has run query the magazine table and the loaded table should get listed when a query is run.

-
cqlsh:catalogkeyspace> SELECT * FROM magazine;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
-(2 rows)
-cqlsh:catalogkeyspace>
-
-
-
-
-

Bulk Loading from a Snapshot

-

In this section we shall demonstrate restoring a snapshot of the magazine table to the magazine table. As we used the same table to restore data from a backup the directory structure required by sstableloader should already exist. If the directory structure needed to load SSTables to catalogkeyspace.magazine does not exist create the directories and set their permissions.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine
-
-
-

As we shall be copying the snapshot files to the directory remove any files that may be in the directory.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo rm /catalogkeyspace/magazine/*
-[ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine/
-[ec2-user@ip-10-0-2-238 magazine]$ ls -l
-total 0
-
-
-

Copy the snapshot files to the /catalogkeyspace/magazine directory.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c/snapshots/magazine/* /catalogkeyspace/magazine
-
-
-

List the files in the /catalogkeyspace/magazine directory and a schema.cql should also get listed.

-
[ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 magazine]$ ls -l
-total 44
--rw-r--r--. 1 root root   31 Aug 19 04:13 manifest.json
--rw-r--r--. 1 root root   47 Aug 19 04:13 na-1-big-CompressionInfo.db
--rw-r--r--. 1 root root   97 Aug 19 04:13 na-1-big-Data.db
--rw-r--r--. 1 root root   10 Aug 19 04:13 na-1-big-Digest.crc32
--rw-r--r--. 1 root root   16 Aug 19 04:13 na-1-big-Filter.db
--rw-r--r--. 1 root root   16 Aug 19 04:13 na-1-big-Index.db
--rw-r--r--. 1 root root 4687 Aug 19 04:13 na-1-big-Statistics.db
--rw-r--r--. 1 root root   56 Aug 19 04:13 na-1-big-Summary.db
--rw-r--r--. 1 root root   92 Aug 19 04:13 na-1-big-TOC.txt
--rw-r--r--. 1 root root  815 Aug 19 04:13 schema.cql
-
-
-

Alternatively create symlinks to the snapshot folder instead of copying the data, something like:

-
mkdir keyspace_name
-ln -s _path_to_snapshot_folder keyspace_name/table_name
-
-
-

If the magazine table was dropped run the DDL in the schema.cql to create the table. Run the sstableloader with the following command.

-
sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-
-

As the output from the command indicates SSTables get streamed to the cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-Established connection to initial hosts
-Opening SSTables and calculating sections to stream
-Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db  to
-[35.173.233.153:7000, 10.0.2.238:7000, 54.158.45.75:7000]
-progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.017KiB/s (avg: 0.017KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.000KiB/s (avg: 0.014KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % total: 108% 0.115KiB/s
-(avg: 0.017KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.232KiB/s (avg: 0.024KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.022KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.021KiB/s)
-
-
-

Some other requirements of sstableloader that should be kept into consideration are:

-
    -
  • The SSTables to be loaded must be compatible with the Cassandra version being loaded into.
  • -
  • Repairing tables that have been loaded into a different cluster does not repair the source tables.
  • -
  • Sstableloader makes use of port 7000 for internode communication.
  • -
  • Before restoring incremental backups run nodetool flush to backup any data in memtables
  • -
-
-
-
-
-

Using nodetool import

-

In this section we shall import SSTables into a table using the nodetool import command. The nodetool refresh command is deprecated, and it is recommended to use nodetool import instead. The nodetool refresh does not have an option to load new SSTables from a separate directory which the nodetool import does.

-

The command usage is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-       [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-       [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-       [(-u <username> | --username <username>)] import
-       [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-       [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-       [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-       <directory> ...
-
-
-

The arguments keyspace, table name and directory to import SSTables from are required.

-

The supported options are as follows.

-
-c, --no-invalidate-caches
-    Don't invalidate the row cache when importing
-
--e, --extended-verify
-    Run an extended verify, verifying all values in the new SSTables
-
--h <host>, --host <host>
-    Node hostname or ip address
-
--l, --keep-level
-    Keep the level on the new SSTables
-
--p <port>, --port <port>
-    Remote jmx agent port number
-
--pp, --print-port
-    Operate in 4.0 mode with hosts disambiguated by port number
-
--pw <password>, --password <password>
-    Remote jmx agent password
-
--pwf <passwordFilePath>, --password-file <passwordFilePath>
-    Path to the JMX password file
-
--q, --quick
-    Do a quick import without verifying SSTables, clearing row cache or
-    checking in which data directory to put the file
-
--r, --keep-repaired
-    Keep any repaired information from the SSTables
-
--t, --no-tokens
-    Don't verify that all tokens in the new SSTable are owned by the
-    current node
-
--u <username>, --username <username>
-    Remote jmx agent username
-
--v, --no-verify
-    Don't verify new SSTables
-
---
-    This option can be used to separate command-line options from the
-    list of argument, (useful when arguments might be mistaken for
-    command-line options
-
-
-

As the keyspace and table are specified on the command line nodetool import does not have the same requirement that sstableloader does, which is to have the SSTables in a specific directory path. When importing snapshots or incremental backups with nodetool import the SSTables don’t need to be copied to another directory.

-
-

Importing Data from an Incremental Backup

-

In this section we shall demonstrate using nodetool import to import SSTables from an incremental backup. We shall use the example table cqlkeyspace.t. Drop table t as we are demonstrating to restore the table.

-
cqlsh:cqlkeyspace> DROP table t;
-
-
-

An incremental backup for a table does not include the schema definition for the table. If the schema definition is not kept as a separate backup, the schema.cql from a backup of the table may be used to create the table as follows.

-
cqlsh:cqlkeyspace> CREATE TABLE IF NOT EXISTS cqlkeyspace.t (
-              ...         id int PRIMARY KEY,
-              ...         k int,
-              ...         v text)
-              ...         WITH ID = d132e240-c217-11e9-bbee-19821dcea330
-              ...         AND bloom_filter_fp_chance = 0.01
-              ...         AND crc_check_chance = 1.0
-              ...         AND default_time_to_live = 0
-              ...         AND gc_grace_seconds = 864000
-              ...         AND min_index_interval = 128
-              ...         AND max_index_interval = 2048
-              ...         AND memtable_flush_period_in_ms = 0
-              ...         AND speculative_retry = '99p'
-              ...         AND additional_write_policy = '99p'
-              ...         AND comment = ''
-              ...         AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' }
-              ...         AND compaction = { 'max_threshold': '32', 'min_threshold': '4',
-'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' }
-              ...         AND compression = { 'chunk_length_in_kb': '16', 'class':
-'org.apache.cassandra.io.compress.LZ4Compressor' }
-              ...         AND cdc = false
-              ...         AND extensions = {  };
-
-
-

Initially the table could be empty, but does not have to be.

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+---
-
-(0 rows)
-
-
-

Run the nodetool import command by providing the keyspace, table and the backups directory. We don’t need to copy the table backups to another directory to run nodetool import as we had to when using sstableloader.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool import -- cqlkeyspace t
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

The SSTables get imported into the table. Run a query in cqlsh to list the data imported.

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
-
-
-
-
-

Importing Data from a Snapshot

-

Importing SSTables from a snapshot with the nodetool import command is similar to importing SSTables from an incremental backup. To demonstrate we shall import a snapshot for table catalogkeyspace.journal. Drop the table as we are demonstrating to restore the table from a snapshot.

-
cqlsh:cqlkeyspace> use CATALOGKEYSPACE;
-cqlsh:catalogkeyspace> DROP TABLE journal;
-
-
-

We shall use the catalog-ks snapshot for the journal table. List the files in the snapshot. The snapshot includes a schema.cql, which is the schema definition for the journal table.

-
[ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user   31 Aug 19 02:44 manifest.json
--rw-rw-r--. 3 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 3 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 3 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 3 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 3 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 3 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 3 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 3 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
--rw-rw-r--. 1 ec2-user ec2-user  814 Aug 19 02:44 schema.cql
-
-
-

Copy the DDL from the schema.cql and run in cqlsh to create the catalogkeyspace.journal table.

-
cqlsh:catalogkeyspace> CREATE TABLE IF NOT EXISTS catalogkeyspace.journal (
-                  ...         id int PRIMARY KEY,
-                  ...         name text,
-                  ...         publisher text)
-                  ...         WITH ID = 296a2d30-c22a-11e9-b135-0d927649052c
-                  ...         AND bloom_filter_fp_chance = 0.01
-                  ...         AND crc_check_chance = 1.0
-                  ...         AND default_time_to_live = 0
-                  ...         AND gc_grace_seconds = 864000
-                  ...         AND min_index_interval = 128
-                  ...         AND max_index_interval = 2048
-                  ...         AND memtable_flush_period_in_ms = 0
-                  ...         AND speculative_retry = '99p'
-                  ...         AND additional_write_policy = '99p'
-                  ...         AND comment = ''
-                  ...         AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' }
-                  ...         AND compaction = { 'min_threshold': '4', 'max_threshold':
-'32', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' }
-                  ...         AND compression = { 'chunk_length_in_kb': '16', 'class':
-'org.apache.cassandra.io.compress.LZ4Compressor' }
-                  ...         AND cdc = false
-                  ...         AND extensions = {  };
-
-
-

Run the nodetool import command to import the SSTables for the snapshot.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool import -- catalogkeyspace journal
-./cassandra/data/data/catalogkeyspace/journal-
-296a2d30c22a11e9b1350d927649052c/snapshots/catalog-ks/
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

Subsequently run a CQL query on the journal table and the data imported gets listed.

-
cqlsh:catalogkeyspace>
-cqlsh:catalogkeyspace> SELECT * FROM journal;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
-(2 rows)
-cqlsh:catalogkeyspace>
-
-
-
-
-
-

Bulk Loading External Data

-

Bulk loading external data directly is not supported by any of the tools we have discussed which include sstableloader and nodetool import. The sstableloader and nodetool import require data to be in the form of SSTables. Apache Cassandra supports a Java API for generating SSTables from input data. Subsequently the sstableloader or nodetool import could be used to bulk load the SSTables. Next, we shall discuss the org.apache.cassandra.io.sstable.CQLSSTableWriter Java class for generating SSTables.

-
-

Generating SSTables with CQLSSTableWriter Java API

-

To generate SSTables using the CQLSSTableWriter class the following need to be supplied at the least.

-
    -
  • An output directory to generate the SSTable in
  • -
  • The schema for the SSTable
  • -
  • A prepared insert statement
  • -
  • A partitioner
  • -
-

The output directory must already have been created. Create a directory (/sstables as an example) and set its permissions.

-
sudo mkdir /sstables
-sudo chmod  777 -R /sstables
-
-
-

Next, we shall discuss To use CQLSSTableWriter could be used in a Java application. Create a Java constant for the output directory.

-
public static final String OUTPUT_DIR = "./sstables";
-
-
-

CQLSSTableWriter Java API has the provision to create a user defined type. Create a new type to store int data:

-
String type = "CREATE TYPE CQLKeyspace.intType (a int, b int)";
-// Define a String variable for the SSTable schema.
-String schema = "CREATE TABLE CQLKeyspace.t ("
-                 + "  id int PRIMARY KEY,"
-                 + "  k int,"
-                 + "  v1 text,"
-                 + "  v2 intType,"
-                 + ")";
-
-
-

Define a String variable for the prepared insert statement to use:

-
String insertStmt = "INSERT INTO CQLKeyspace.t (id, k, v1, v2) VALUES (?, ?, ?, ?)";
-
-
-

The partitioner to use does not need to be set as the default partitioner Murmur3Partitioner is used.

-

All these variables or settings are used by the builder class CQLSSTableWriter.Builder to create a CQLSSTableWriter object.

-

Create a File object for the output directory.

-
File outputDir = new File(OUTPUT_DIR + File.separator + "CQLKeyspace" + File.separator + "t");
-
-
-

Next, obtain a CQLSSTableWriter.Builder object using static method CQLSSTableWriter.builder(). Set the output -directory File object, user defined type, SSTable schema, buffer size, prepared insert statement, and optionally any of the other builder options, and invoke the build() method to create a CQLSSTableWriter object:

-
CQLSSTableWriter writer = CQLSSTableWriter.builder()
-                                             .inDirectory(outputDir)
-                                             .withType(type)
-                                             .forTable(schema)
-                                             .withBufferSizeInMB(256)
-                                             .using(insertStmt).build();
-
-
-

Next, set the SSTable data. If any user define types are used obtain a UserType object for these:

-
UserType userType = writer.getUDType("intType");
-
-
-

Add data rows for the resulting SSTable.

-
writer.addRow(0, 0, "val0", userType.newValue().setInt("a", 0).setInt("b", 0));
-   writer.addRow(1, 1, "val1", userType.newValue().setInt("a", 1).setInt("b", 1));
-   writer.addRow(2, 2, "val2", userType.newValue().setInt("a", 2).setInt("b", 2));
-
-
-

Close the writer, finalizing the SSTable.

-
writer.close();
-
-
-

All the public methods the CQLSSTableWriter class provides including some other methods that are not discussed in the preceding example are as follows.

-

All the public methods the CQLSSTableWriter.Builder class provides including some other methods that are not discussed in the preceding example are as follows.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MethodDescription
inDirectory(String directory)The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable.
inDirectory(File directory)The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable.
forTable(String schema)The schema (CREATE TABLE statement) for the table for which SSTable is to be created. The -provided CREATE TABLE statement must use a fully-qualified table name, one that includes the -keyspace name. This is a mandatory option.
withPartitioner(IPartitioner partitioner)The partitioner to use. By default, Murmur3Partitioner will be used. If this is not the -partitioner used by the cluster for which the SSTables are created, the correct partitioner -needs to be provided.
using(String insert)The INSERT or UPDATE statement defining the order of the values to add for a given CQL row. -The provided INSERT statement must use a fully-qualified table name, one that includes the -keyspace name. Moreover, said statement must use bind variables since these variables will -be bound to values by the resulting SSTable writer. This is a mandatory option.
withBufferSizeInMB(int size)The size of the buffer to use. This defines how much data will be buffered before being -written as a new SSTable. This corresponds roughly to the data size that will have the -created SSTable. The default is 128MB, which should be reasonable for a 1GB heap. If -OutOfMemory exception gets generated while using the SSTable writer, should lower this -value.
sorted()Creates a CQLSSTableWriter that expects sorted inputs. If this option is used, the resulting -SSTable writer will expect rows to be added in SSTable sorted order (and an exception will -be thrown if that is not the case during row insertion). The SSTable sorted order means that -rows are added such that their partition keys respect the partitioner order. This option -should only be used if the rows can be provided in order, which is rarely the case. If the -rows can be provided in order however, using this sorted might be more efficient. If this -option is used, some option like withBufferSizeInMB will be ignored.
build()Builds a CQLSSTableWriter object.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/cdc.html b/src/doc/4.0-alpha4/operating/cdc.html deleted file mode 100644 index 9626d57a0..000000000 --- a/src/doc/4.0-alpha4/operating/cdc.html +++ /dev/null @@ -1,194 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property cdc=true (either when creating the table or -altering it). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in cassandra.yaml. On segment fsync to disk, if CDC data is present anywhere in the segment a -<segment_name>_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word “COMPLETED” will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file.

-

We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable.

-

A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory.

-
-
-

Configuration

-
-

Enabling or disabling CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

Use a CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

If CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/compaction/index.html b/src/doc/4.0-alpha4/operating/compaction/index.html deleted file mode 100644 index fdcfa4500..000000000 --- a/src/doc/4.0-alpha4/operating/compaction/index.html +++ /dev/null @@ -1,387 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Strategies

-

Picking the right compaction strategy for your workload will ensure the best performance for both querying and for compaction itself.

-
-
Size Tiered Compaction Strategy
-
The default compaction strategy. Useful as a fallback when other strategies don’t fit the workload. Most useful for -non pure time series workloads with spinning disks, or when the I/O from LCS is too high.
-
Leveled Compaction Strategy
-
Leveled Compaction Strategy (LCS) is optimized for read heavy workloads, or workloads with lots of updates and deletes. It is not a good choice for immutable time series data.
-
Time Window Compaction Strategy
-
Time Window Compaction Strategy is designed for TTL’ed, mostly immutable time series data.
-
-
-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy). With TimeWindowCompactionStrategy -it is possible to remove the guarantee (not check for shadowing data) by enabling unsafe_aggressive_sstable_expiration.

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/compaction/lcs.html b/src/doc/4.0-alpha4/operating/compaction/lcs.html deleted file mode 100644 index 8e81a566d..000000000 --- a/src/doc/4.0-alpha4/operating/compaction/lcs.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Leveled Compaction Strategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/compaction/stcs.html b/src/doc/4.0-alpha4/operating/compaction/stcs.html deleted file mode 100644 index 1d294d812..000000000 --- a/src/doc/4.0-alpha4/operating/compaction/stcs.html +++ /dev/null @@ -1,122 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Leveled Compaction Strategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Leveled Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/compaction/twcs.html b/src/doc/4.0-alpha4/operating/compaction/twcs.html deleted file mode 100644 index d92f6a6f1..000000000 --- a/src/doc/4.0-alpha4/operating/compaction/twcs.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Time Window CompactionStrategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
unsafe_aggressive_sstable_expiration (default: false)
-
Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially -risky option that can lead to data loss or deleted data re-appearing, going beyond what -unchecked_tombstone_compaction does for single sstable compaction. Due to the risk the jvm must also be -started with -Dcassandra.unsafe_aggressive_sstable_expiration=true.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled).

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/compression.html b/src/doc/4.0-alpha4/operating/compression.html deleted file mode 100644 index b23af8197..000000000 --- a/src/doc/4.0-alpha4/operating/compression.html +++ /dev/null @@ -1,195 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. Because Cassandra -SSTables are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. By -default, three options are relevant:

-
    -
  • class specifies the compression class - Cassandra provides four classes (LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor). The default is LZ4Compressor.
  • -
  • chunk_length_in_kb specifies the number of kilobytes of data per compression chunk. The default is 64KB.
  • -
  • crc_check_chance determines how likely Cassandra is to verify the checksum on each compression chunk during -reads. The default is 1.0.
  • -
  • -
    compression_level is only applicable for ZstdCompressor and accepts values between -131072 and 22.
    -
    The lower the level, the faster the speed (at the cost of compression). Values from 20 to 22 are called -“ultra levels” and should be used with caution, as they require more memory. The default is 3.
    -
    -
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'SnappyCompressor', 'chunk_length_in_kb': 128, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/hardware.html b/src/doc/4.0-alpha4/operating/hardware.html deleted file mode 100644 index 494f8407a..000000000 --- a/src/doc/4.0-alpha4/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/hints.html b/src/doc/4.0-alpha4/operating/hints.html deleted file mode 100644 index 652f44347..000000000 --- a/src/doc/4.0-alpha4/operating/hints.html +++ /dev/null @@ -1,402 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hints

-

Hinting is a data repair technique applied during write operations. When -replica nodes are unavailable to accept a mutation, either due to failure or -more commonly routine maintenance, coordinators attempting to write to those -replicas store temporary hints on their local filesystem for later application -to the unavailable replica. Hints are an important way to help reduce the -duration of data inconsistency. Coordinators replay hints quickly after -unavailable replica nodes return to the ring. Hints are best effort, however, -and do not guarantee eventual consistency like anti-entropy repair does.

-

Hints are useful because of how Apache Cassandra replicates data to provide -fault tolerance, high availability and durability. Cassandra partitions -data across the cluster using consistent -hashing, and then replicates keys to multiple nodes along the hash ring. To -guarantee availability, all replicas of a key can accept mutations without -consensus, but this means it is possible for some replicas to accept a mutation -while others do not. When this happens an inconsistency is introduced.

-

Hints are one of the three ways, in addition to read-repair and -full/incremental anti-entropy repair, that Cassandra implements the eventual -consistency guarantee that all updates are eventually received by all replicas. -Hints, like read-repair, are best effort and not an alternative to performing -full repair, but they do help reduce the duration of inconsistency between -replicas in practice.

-
-

Hinted Handoff

-

Hinted handoff is the process by which Cassandra applies hints to unavailable -nodes.

-

For example, consider a mutation is to be made at Consistency Level -LOCAL_QUORUM against a keyspace with Replication Factor of 3. -Normally the client sends the mutation to a single coordinator, who then sends -the mutation to all three replicas, and when two of the three replicas -acknowledge the mutation the coordinator responds successfully to the client. -If a replica node is unavailable, however, the coordinator stores a hint -locally to the filesystem for later application. New hints will be retained for -up to max_hint_window_in_ms of downtime (defaults to 3 hours). If the -unavailable replica does return to the cluster before the window expires, the -coordinator applies any pending hinted mutations against the replica to ensure -that eventual consistency is maintained.

-
-Hinted Handoff Example

Hinted Handoff in Action

-
-
    -
  • (t0): The write is sent by the client, and the coordinator sends it -to the three replicas. Unfortunately replica_2 is restarting and cannot -receive the mutation.
  • -
  • (t1): The client receives a quorum acknowledgement from the coordinator. -At this point the client believe the write to be durable and visible to reads -(which it is).
  • -
  • (t2): After the write timeout (default 2s), the coordinator decides -that replica_2 is unavailable and stores a hint to its local disk.
  • -
  • (t3): Later, when replica_2 starts back up it sends a gossip message -to all nodes, including the coordinator.
  • -
  • (t4): The coordinator replays hints including the missed mutation -against replica_2.
  • -
-

If the node does not return in time, the destination replica will be -permanently out of sync until either read-repair or full/incremental -anti-entropy repair propagates the mutation.

-
-

Application of Hints

-

Hints are streamed in bulk, a segment at a time, to the target replica node and -the target node replays them locally. After the target node has replayed a -segment it deletes the segment and receives the next segment. This continues -until all hints are drained.

-
-
-

Storage of Hints on Disk

-

Hints are stored in flat files in the coordinator node’s -$CASSANDRA_HOME/data/hints directory. A hint includes a hint id, the target -replica node on which the mutation is meant to be stored, the serialized -mutation (stored as a blob) that couldn’t be delivered to the replica node, the -mutation timestamp, and the Cassandra version used to serialize the mutation. -By default hints are compressed using LZ4Compressor. Multiple hints are -appended to the same hints file.

-

Since hints contain the original unmodified mutation timestamp, hint application -is idempotent and cannot overwrite a future mutation.

-
-
-

Hints for Timed Out Write Requests

-

Hints are also stored for write requests that time out. The -write_request_timeout_in_ms setting in cassandra.yaml configures the -timeout for write requests.

-
write_request_timeout_in_ms: 2000
-
-
-

The coordinator waits for the configured amount of time for write requests to -complete, at which point it will time out and generate a hint for the timed out -request. The lowest acceptable value for write_request_timeout_in_ms is 10 ms.

-
-
-
-

Configuring Hints

-

Hints are enabled by default as they are critical for data consistency. The -cassandra.yaml configuration file provides several settings for configuring -hints:

-

Table 1. Settings for Hints

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SettingDescriptionDefault Value
hinted_handoff_enabledEnables/Disables hinted handoffstrue
hinted_handoff_disabled_datacenters

A list of data centers that do not perform -hinted handoffs even when handoff is -otherwise enabled. -Example:

-
-
hinted_handoff_disabled_datacenters:
-  - DC1
-  - DC2
-
-
-
-
unset
max_hint_window_in_msDefines the maximum amount of time (ms) -a node shall have hints generated after it -has failed.10800000 # 3 hours
hinted_handoff_throttle_in_kbMaximum throttle in KBs per second, per -delivery thread. This will be reduced -proportionally to the number of nodes in -the cluster. -(If there are two nodes in the cluster, -each delivery thread will use the maximum -rate; if there are 3, each will throttle -to half of the maximum,since it is expected -for two nodes to be delivering hints -simultaneously.)1024
max_hints_delivery_threadsNumber of threads with which to deliver -hints; Consider increasing this number when -you have multi-dc deployments, since -cross-dc handoff tends to be slower2
hints_directoryDirectory where Cassandra stores hints.$CASSANDRA_HOME/data/hints
hints_flush_period_in_msHow often hints should be flushed from the -internal buffers to disk. Will not -trigger fsync.10000
max_hints_file_size_in_mbMaximum size for a single hints file, in -megabytes.128
hints_compressionCompression to apply to the hint files. -If omitted, hints files will be written -uncompressed. LZ4, Snappy, and Deflate -compressors are supported.LZ4Compressor
-
-
-

Configuring Hints at Runtime with nodetool

-

nodetool provides several commands for configuring hints or getting hints -related information. The nodetool commands override the corresponding -settings if any in cassandra.yaml for the node running the command.

-

Table 2. Nodetool Commands for Hints

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CommandDescription
nodetool disablehandoffDisables storing and delivering hints
nodetool disablehintsfordcDisables storing and delivering hints to a -data center
nodetool enablehandoffRe-enables future hints storing and -delivery on the current node
nodetool enablehintsfordcEnables hints for a data center that was -previously disabled
nodetool getmaxhintwindowPrints the max hint window in ms. New in -Cassandra 4.0.
nodetool handoffwindowPrints current hinted handoff window
nodetool pausehandoffPauses hints delivery process
nodetool resumehandoffResumes hints delivery process
nodetool -sethintedhandoffthrottlekbSets hinted handoff throttle in kb -per second, per delivery thread
nodetool setmaxhintwindowSets the specified max hint window in ms
nodetool statushandoffStatus of storing future hints on the -current node
nodetool truncatehintsTruncates all hints on the local node, or -truncates hints for the endpoint(s) -specified.
-
-

Make Hints Play Faster at Runtime

-

The default of 1024 kbps handoff throttle is conservative for most modern -networks, and it is entirely possible that in a simple node restart you may -accumulate many gigabytes hints that may take hours to play back. For example if -you are ingesting 100 Mbps of data per node, a single 10 minute long -restart will create 10 minutes * (100 megabit / second) ~= 7 GiB of data -which at (1024 KiB / second) would take 7.5 GiB / (1024 KiB / second) = -2.03 hours to play back. The exact math depends on the load balancing strategy -(round robin is better than token aware), number of tokens per node (more -tokens is better than fewer), and naturally the cluster’s write rate, but -regardless you may find yourself wanting to increase this throttle at runtime.

-

If you find yourself in such a situation, you may consider raising -the hinted_handoff_throttle dynamically via the -nodetool sethintedhandoffthrottlekb command.

-
-
-

Allow a Node to be Down Longer at Runtime

-

Sometimes a node may be down for more than the normal max_hint_window_in_ms, -(default of three hours), but the hardware and data itself will still be -accessible. In such a case you may consider raising the -max_hint_window_in_ms dynamically via the nodetool setmaxhintwindow -command added in Cassandra 4.0 (CASSANDRA-11720). -This will instruct Cassandra to continue holding hints for the down -endpoint for a longer amount of time.

-

This command should be applied on all nodes in the cluster that may be holding -hints. If needed, the setting can be applied permanently by setting the -max_hint_window_in_ms setting in cassandra.yaml followed by a rolling -restart.

-
-
-
-

Monitoring Hint Delivery

-

Cassandra 4.0 adds histograms available to understand how long it takes to deliver -hints which is useful for operators to better identify problems (CASSANDRA-13234).

-

There are also metrics available for tracking Hinted Handoff -and Hints Service metrics.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/index.html b/src/doc/4.0-alpha4/operating/index.html deleted file mode 100644 index 61c95f3ce..000000000 --- a/src/doc/4.0-alpha4/operating/index.html +++ /dev/null @@ -1,252 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/metrics.html b/src/doc/4.0-alpha4/operating/metrics.html deleted file mode 100644 index 0f119fb70..000000000 --- a/src/doc/4.0-alpha4/operating/metrics.html +++ /dev/null @@ -1,1801 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorWriteLatencyTimerCoordinator write latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
BytesRepairedGauge<Long>Size of table data repaired on disk
BytesUnrepairedGauge<Long>Size of table data unrepaired on disk
BytesPendingRepairGauge<Long>Size of table data isolated for an ongoing incremental repair
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
SpeculativeFailedRetriesCounterNumber of speculative retries that failed to prevent a timeout
SpeculativeInsufficientReplicasCounterNumber of speculative retries that couldn’t be attempted due to lack of replicas
SpeculativeSampleLatencyNanosGauge<Long>Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
AnticompactionTimeTimerTime spent anticompacting before a consistent repair.
ValidationTimeTimerTime spent doing validation compaction during repair.
SyncTimeTimerTime spent doing streaming during repair.
BytesValidatedHistogramHistogram over the amount of bytes read during validation.
PartitionsValidatedHistogramHistogram over the number of partitions read during validation.
BytesAnticompactedCounterHow many bytes we anticompacted.
BytesMutatedAnticompactionCounterHow many bytes we avoided anticompacting because the sstable was fully contained in the repaired range.
MutatedAnticompactionGaugeGauge<Double>Ratio of bytes mutated vs total bytes repaired.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

Most of these metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
WriteFailedIdeaCLCounterNumber of writes that failed to achieve the configured ideal consistency level or 0 if none is configured
IdealCLWriteLatencyLatencyCoordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured
RepairTimeTimerTotal time spent as repair coordinator.
RepairPrepareTimeTimerTotal time spent preparing for repair.
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools path=<Path> scope=<ThreadPoolName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
MaxTasksQueuedGauge<Integer>The maximum number of tasks queued before a task get blocked.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
ViewBuildExecutorinternalPerforms materialized views initial build
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessage.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMessage scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

HintsService Metrics

-

Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintsService.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintsService name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
HintsSucceededMeterA meter of the hints successfully delivered
HintsFailedMeterA meter of the hints that failed deliver
HintsTimedOutMeterA meter of the hints that timed out
Hint_delaysHistogramHistogram of hint delivery delays (in milliseconds)
Hint_delays-<PeerIP>HistogramHistogram of hint delivery delays (in milliseconds) per peer
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsGauge<Integer>Number of clients connected to this nodes native protocol server
connectionsGauge<List<Map<String, String>>List of all connections and their state information
connectedNativeClientsByUserGauge<Map<String, Int>Number of connnective native clients by username
-
-
-

Batch Metrics

-

Metrics specifc to batch statements.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Batch.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Batch name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PartitionsPerCounterBatchHistogramDistribution of the number of partitions processed per counter batch
PartitionsPerLoggedBatchHistogramDistribution of the number of partitions processed per logged batch
PartitionsPerUnloggedBatchHistogramDistribution of the number of partitions processed per unlogged batch
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/read_repair.html b/src/doc/4.0-alpha4/operating/read_repair.html deleted file mode 100644 index 6ba6a8866..000000000 --- a/src/doc/4.0-alpha4/operating/read_repair.html +++ /dev/null @@ -1,267 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Read repair

-

Read Repair is the process of repairing data replicas during a read request. If all replicas involved in a read request at the given read consistency level are consistent the data is returned to the client and no read repair is needed. But if the replicas involved in a read request at the given consistency level are not consistent a read repair is performed to make replicas involved in the read request consistent. The most up-to-date data is returned to the client. The read repair runs in the foreground and is blocking in that a response is not returned to the client until the read repair has completed and up-to-date data is constructed.

-
-

Expectation of Monotonic Quorum Reads

-

Cassandra uses a blocking read repair to ensure the expectation of “monotonic quorum reads” i.e. that in 2 successive quorum reads, it’s guaranteed the 2nd one won’t get something older than the 1st one, and this even if a failed quorum write made a write of the most up to date value only to a minority of replicas. “Quorum” means majority of nodes among replicas.

-
-
-

Table level configuration of monotonic reads

-

Cassandra 4.0 adds support for table level configuration of monotonic reads (CASSANDRA-14635). The read_repair table option has been added to table schema, with the options blocking (default), and none.

-

The read_repair option configures the read repair behavior to allow tuning for various performance and consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-
-

Blocking

-

The default setting. When read_repair is set to BLOCKING, and a read repair is started, the read will block on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition level write atomicity.

-
-
-

None

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-

An example of using the NONE setting for the read_repair option is as follows:

-
CREATE TABLE ks.tbl (k INT, c INT, v INT, PRIMARY KEY (k,c)) with read_repair='NONE'");
-
-
-
-
-
-

Read Repair Example

-

To illustrate read repair with an example, consider that a client sends a read request with read consistency level TWO to a 5-node cluster as illustrated in Figure 1. Read consistency level determines how many replica nodes must return a response before the read request is considered successful.

-
-../_images/Figure_1_read_repair.jpg -
-

Figure 1. Client sends read request to a 5-node Cluster

-

Three nodes host replicas for the requested data as illustrated in Figure 2. With a read consistency level of TWO two replica nodes must return a response for the read request to be considered successful. If the node the client sends request to hosts a replica of the data requested only one other replica node needs to be sent a read request to. But if the receiving node does not host a replica for the requested data the node becomes a coordinator node and forwards the read request to a node that hosts a replica. A direct read request is forwarded to the fastest node (as determined by dynamic snitch) as shown in Figure 2. A direct read request is a full read and returns the requested data.

-
-../_images/Figure_2_read_repair.jpg -
-

Figure 2. Direct Read Request sent to Fastest Replica Node

-

Next, the coordinator node sends the requisite number of additional requests to satisfy the consistency level, which is TWO. The coordinator node needs to send one more read request for a total of two. All read requests additional to the first direct read request are digest read requests. A digest read request is not a full read and only returns the hash value of the data. Only a hash value is returned to reduce the network data traffic. In the example being discussed the coordinator node sends one digest read request to a node hosting a replica as illustrated in Figure 3.

-
-../_images/Figure_3_read_repair.jpg -
-

Figure 3. Coordinator Sends a Digest Read Request

-

The coordinator node has received a full copy of data from one node and a hash value for the data from another node. To compare the data returned a hash value is calculated for the full copy of data. The two hash values are compared. If the hash values are the same no read repair is needed and the full copy of requested data is returned to the client. The coordinator node only performed a total of two replica read request because the read consistency level is TWO in the example. If the consistency level were higher such as THREE, three replica nodes would need to respond to a read request and only if all digest or hash values were to match with the hash value of the full copy of data would the read request be considered successful and the data returned to the client.

-

But, if the hash value/s from the digest read request/s are not the same as the hash value of the data from the full read request of the first replica node it implies that an inconsistency in the replicas exists. To fix the inconsistency a read repair is performed.

-

For example, consider that that digest request returns a hash value that is not the same as the hash value of the data from the direct full read request. We would need to make the replicas consistent for which the coordinator node sends a direct (full) read request to the replica node that it sent a digest read request to earlier as illustrated in Figure 4.

-
-../_images/Figure_4_read_repair.jpg -
-

Figure 4. Coordinator sends Direct Read Request to Replica Node it had sent Digest Read Request to

-

After receiving the data from the second replica node the coordinator has data from two of the replica nodes. It only needs two replicas as the read consistency level is TWO in the example. Data from the two replicas is compared and based on the timestamps the most recent replica is selected. Data may need to be merged to construct an up-to-date copy of data if one replica has data for only some of the columns. In the example, if the data from the first direct read request is found to be outdated and the data from the second full read request to be the latest read, repair needs to be performed on Replica 2. If a new up-to-date data is constructed by merging the two replicas a read repair would be needed on both the replicas involved. For example, a read repair is performed on Replica 2 as illustrated in Figure 5.

-
-../_images/Figure_5_read_repair.jpg -
-

Figure 5. Coordinator performs Read Repair

-

The most up-to-date data is returned to the client as illustrated in Figure 6. From the three replicas Replica 1 is not even read and thus not repaired. Replica 2 is repaired. Replica 3 is the most up-to-date and returned to client.

-
-../_images/Figure_6_read_repair.jpg -
-

Figure 6. Most up-to-date Data returned to Client

-
-
-

Read Consistency Level and Read Repair

-

The read consistency is most significant in determining if a read repair needs to be performed. As discussed in Table 1 a read repair is not needed for all of the consistency levels.

-

Table 1. Read Repair based on Read Consistency Level

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
Read Consistency LevelDescription
ONERead repair is not performed as the -data from the first direct read request -satisfies the consistency level ONE. -No digest read requests are involved -for finding mismatches in data.
TWORead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
THREERead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
LOCAL_ONERead repair is not performed as the data -from the direct read request from the -closest replica satisfies the consistency -level LOCAL_ONE.No digest read requests are -involved for finding mismatches in data.
LOCAL_QUORUMRead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
QUORUMRead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
-

If read repair is performed it is made only on the replicas that are not up-to-date and that are involved in the read request. The number of replicas involved in a read request would be based on the read consistency level; in the example it is two.

-
-
-

Improved Read Repair Blocking Behavior in Cassandra 4.0

-

Cassandra 4.0 makes two improvements to read repair blocking behavior (CASSANDRA-10726).

-
    -
  1. Speculative Retry of Full Data Read Requests. Cassandra 4.0 makes use of speculative retry in sending read requests (full, not digest) to replicas if a full data response is not received, whether in the initial full read request or a full data read request during read repair. With speculative retry if it looks like a response may not be received from the initial set of replicas Cassandra sent messages to, to satisfy the consistency level, it speculatively sends additional read request to un-contacted replica/s. Cassandra 4.0 will also speculatively send a repair mutation to a minority of nodes not involved in the read repair data read / write cycle with the combined contents of all un-acknowledged mutations if it looks like one may not respond. Cassandra accepts acks from them in lieu of acks from the initial mutations sent out, so long as it receives the same number of acks as repair mutations transmitted.
  2. -
  3. Only blocks on Full Data Responses to satisfy the Consistency Level. Cassandra 4.0 only blocks for what is needed for resolving the digest mismatch and wait for enough full data responses to meet the consistency level, no matter whether it’s speculative retry or read repair chance. As an example, if it looks like Cassandra might not receive full data requests from everyone in time, it sends additional requests to additional replicas not contacted in the initial full data read. If the collection of nodes that end up responding in time end up agreeing on the data, the response from the disagreeing replica that started the read repair is not considered, and won’t be included in the response to the client, preserving the expectation of monotonic quorum reads.
  4. -
-
-
-

Diagnostic Events for Read Repairs

-

Cassandra 4.0 adds diagnostic events for read repair (CASSANDRA-14668) that can be used for exposing information such as:

-
    -
  • Contacted endpoints
  • -
  • Digest responses by endpoint
  • -
  • Affected partition keys
  • -
  • Speculated reads / writes
  • -
  • Update oversized
  • -
-
-
-

Background Read Repair

-

Background read repair, which was configured using read_repair_chance and dclocal_read_repair_chance settings in cassandra.yaml is removed Cassandra 4.0 (CASSANDRA-13910).

-

Read repair is not an alternative for other kind of repairs such as full repairs or replacing a node that keeps failing. The data returned even after a read repair has been performed may not be the most up-to-date data if consistency level is other than one requiring response from all replicas.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/repair.html b/src/doc/4.0-alpha4/operating/repair.html deleted file mode 100644 index a40c7edd3..000000000 --- a/src/doc/4.0-alpha4/operating/repair.html +++ /dev/null @@ -1,278 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Repair

-

Cassandra is designed to remain available if one of it’s nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren’t guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire.

-

These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes.

-
-

Incremental and Full Repairs

-

There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that’s been written since the previous incremental repair.

-

Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it’s important to understand that once an incremental repair marks data as repaired, it won’t -try to repair it again. This is fine for syncing up missed writes, but it doesn’t protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally.

-
-
-

Usage and Best Practices

-

Since repair can result in a lot of disk and network io, it’s not run automatically by Cassandra. It is run by the operator -via nodetool.

-

Incremental repair is the default and is run with the following command:

-
nodetool repair
-
-
-

A full repair can be run with the following command:

-
nodetool repair --full
-
-
-

Additionally, repair can be run on a single keyspace:

-
nodetool repair [options] <keyspace_name>
-
-
-

Or even on specific tables:

-
nodetool repair [options] <keyspace_name> <table1> <table2>
-
-
-

The repair command only repairs token ranges on the node being repaired, it doesn’t repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you’re running repair on, which will cause duplicate work if you run it -on every node. The -pr flag will only repair the “primary” ranges on a node, so you can repair your entire cluster by running -nodetool repair -pr on each node in a single datacenter.

-

The specific frequency of repair that’s right for your cluster, of course, depends on several factors. However, if you’re -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don’t want to run incremental repairs, a full repair every 5 days is a good place -to start.

-

At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays.

-
-
-

Other Options

-
-
-pr, --partitioner-range
-
Restricts repair to the ‘primary’ token ranges of the node being repaired. A primary range is just a token range for -which a node is the first replica in the ring.
-
-prv, --preview
-
Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints -the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, -add the --full flag to estimate a full repair.
-
-vd, --validate
-
Verifies that the repaired data is the same across all nodes. Similiar to --preview, this builds and compares merkle -trees of repaired data, but doesn’t do any streaming. This is useful for troubleshooting. If this shows that the repaired -data is out of sync, a full repair should be run.
-
-
-

See also

-

nodetool repair docs

-
-
-
-

Full Repair Example

-

Full repair is typically needed to redistribute data after increasing the replication factor of a keyspace or after adding a node to the cluster. Full repair involves streaming SSTables. To demonstrate full repair start with a three node cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool status
-Datacenter: us-east-1
-=====================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address   Load        Tokens  Owns  Host ID                              Rack
-UN  10.0.1.115  547 KiB     256    ?  b64cb32a-b32a-46b4-9eeb-e123fa8fc287  us-east-1b
-UN  10.0.3.206  617.91 KiB  256    ?  74863177-684b-45f4-99f7-d1006625dc9e  us-east-1d
-UN  10.0.2.238  670.26 KiB  256    ?  4dcdadd2-41f9-4f34-9892-1f20868b27c7  us-east-1c
-
-
-

Create a keyspace with replication factor 3:

-
cqlsh> DROP KEYSPACE cqlkeyspace;
-cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Add a table to the keyspace:

-
cqlsh> use cqlkeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-           ...   id int,
-           ...   k int,
-           ...   v text,
-           ...   PRIMARY KEY (id)
-           ... );
-
-
-

Add table data:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (2, 2, 'val2');
-
-
-

A query lists the data added:

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
- 2 | 2 | val2
-(3 rows)
-
-
-

Make the following changes to a three node cluster:

-
    -
  1. Increase the replication factor from 3 to 4.
  2. -
  3. Add a 4th node to the cluster
  4. -
-

When the replication factor is increased the following message gets output indicating that a full repair is needed as per (CASSANDRA-13079):

-
cqlsh:cqlkeyspace> ALTER KEYSPACE CQLKeyspace
-           ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-Warnings :
-When increasing replication factor you need to run a full (-full) repair to distribute the
-data.
-
-
-

Perform a full repair on the keyspace cqlkeyspace table t with following command:

-
nodetool repair -full cqlkeyspace t
-
-
-

Full repair completes in about a second as indicated by the output:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool repair -full cqlkeyspace t
-[2019-08-17 03:06:21,445] Starting repair command #1 (fd576da0-c09b-11e9-b00c-1520e8c38f00), repairing keyspace cqlkeyspace with repair options (parallelism: parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [t], dataCenters: [], hosts: [], previewKind: NONE, # of ranges: 1024, pull repair: false, force repair: false, optimise streams: false)
-[2019-08-17 03:06:23,059] Repair session fd8e5c20-c09b-11e9-b00c-1520e8c38f00 for range [(-8792657144775336505,-8786320730900698730], (-5454146041421260303,-5439402053041523135], (4288357893651763201,4324309707046452322], ... , (4350676211955643098,4351706629422088296]] finished (progress: 0%)
-[2019-08-17 03:06:23,077] Repair completed successfully
-[2019-08-17 03:06:23,077] Repair command #1 finished in 1 second
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

The nodetool  tpstats command should list a repair having been completed as Repair-Task > Completed column value of 1:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool tpstats
-Pool Name Active   Pending Completed   Blocked  All time blocked
-ReadStage  0           0           99       0              0
-…
-Repair-Task 0       0           1        0              0
-RequestResponseStage                  0        0        2078        0               0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/security.html b/src/doc/4.0-alpha4/operating/security.html deleted file mode 100644 index 526289ea8..000000000 --- a/src/doc/4.0-alpha4/operating/security.html +++ /dev/null @@ -1,474 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-

By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still:

-
    -
  • Craft internode messages to insert users into authentication schema
  • -
  • Craft internode messages to truncate or drop schema
  • -
  • Use tools such as sstableloader to overwrite system_auth tables
  • -
  • Attach to the cluster directly to capture write traffic
  • -
-

Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra’s -security features is crucial to configuring your cluster to meet your security needs.

-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-
-

SSL Certificate Hot Reloading

-

Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes.

-

Certificate Hot reloading may also be triggered using the nodetool reloadssl command. Use this if you want to Cassandra to -immediately notice the changed certificates.

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to no. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/snitch.html b/src/doc/4.0-alpha4/operating/snitch.html deleted file mode 100644 index 1cd3b2884..000000000 --- a/src/doc/4.0-alpha4/operating/snitch.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero, this will allow ‘pinning’ of replicas to hosts -in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region, or in multiple regions with inter-region VPC enabled (available -since the end of 2017, see AWS announcement). -Loads Region and Availability Zone information from the EC2 API. The Region is treated as the datacenter, and the -Availability Zone as the rack. Only private IPs are used, so this will work across multiple regions only if -inter-region VPC is enabled.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/operating/topo_changes.html b/src/doc/4.0-alpha4/operating/topo_changes.html deleted file mode 100644 index 67aadb0c0..000000000 --- a/src/doc/4.0-alpha4/operating/topo_changes.html +++ /dev/null @@ -1,222 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in nodetool netstats.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344)

-

Once the bootstrapping is complete the node will be marked “UP”.

-
-

Note

-

If any of the following cases apply, you MUST run repair to make the replaced node consistent again, since -it missed ongoing writes during/prior to bootstrapping. The replacement timeframe refers to the period from when the -node initially dies to when a new node completes the replacement process.

-
    -
  1. The node is down for longer than max_hint_window_in_ms before being replaced.
  2. -
  3. You are replacing using the same IP address as the dead node and replacement takes longer than max_hint_window_in_ms.
  4. -
-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/plugins/index.html b/src/doc/4.0-alpha4/plugins/index.html deleted file mode 100644 index 3cd777cc4..000000000 --- a/src/doc/4.0-alpha4/plugins/index.html +++ /dev/null @@ -1,117 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Third-Party Plugins" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Third-Party Plugins

-

Available third-party plugins for Apache Cassandra

-
-

CAPI-Rowcache

-

The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments.

-

The official page for the CAPI-Rowcache plugin contains further details how to build/run/download the plugin.

-
-
-

Stratio’s Cassandra Lucene Index

-

Stratio’s Lucene index is a Cassandra secondary index implementation based on Apache Lucene. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or Apache Solr, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed.

-

The official Github repository Cassandra Lucene Index contains everything you need to build/run/configure the plugin.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/search.html b/src/doc/4.0-alpha4/search.html deleted file mode 100644 index 5c92655f2..000000000 --- a/src/doc/4.0-alpha4/search.html +++ /dev/null @@ -1,105 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/searchindex.js b/src/doc/4.0-alpha4/searchindex.js deleted file mode 100644 index c52c4d290..000000000 --- a/src/doc/4.0-alpha4/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/operators","cql/security","cql/triggers","cql/types","data_modeling/data_modeling_conceptual","data_modeling/data_modeling_logical","data_modeling/data_modeling_physical","data_modeling/data_modeling_queries","data_modeling/data_modeling_rdbms","data_modeling/data_modeling_refining","data_modeling/data_modeling_schema","data_modeling/data_modeling_tools","data_modeling/index","data_modeling/intro","development/ci","development/code_style","development/dependencies","development/documentation","development/gettingstarted","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/release_process","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/production","getting_started/querying","index","new/auditlogging","new/fqllogging","new/index","new/java11","new/messaging","new/streaming","new/transientreplication","new/virtualtables","operating/audit_logging","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction/index","operating/compaction/lcs","operating/compaction/stcs","operating/compaction/twcs","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","plugins/index","tools/cassandra_stress","tools/cqlsh","tools/index","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","tools/sstable/index","tools/sstable/sstabledump","tools/sstable/sstableexpiredblockers","tools/sstable/sstablelevelreset","tools/sstable/sstableloader","tools/sstable/sstablemetadata","tools/sstable/sstableofflinerelevel","tools/sstable/sstablerepairedset","tools/sstable/sstablescrub","tools/sstable/sstablesplit","tools/sstable/sstableupgrade","tools/sstable/sstableutil","tools/sstable/sstableverify","troubleshooting/finding_nodes","troubleshooting/index","troubleshooting/reading_logs","troubleshooting/use_nodetool","troubleshooting/use_tools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/operators.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/data_modeling_conceptual.rst","data_modeling/data_modeling_logical.rst","data_modeling/data_modeling_physical.rst","data_modeling/data_modeling_queries.rst","data_modeling/data_modeling_rdbms.rst","data_modeling/data_modeling_refining.rst","data_modeling/data_modeling_schema.rst","data_modeling/data_modeling_tools.rst","data_modeling/index.rst","data_modeling/intro.rst","development/ci.rst","development/code_style.rst","development/dependencies.rst","development/documentation.rst","development/gettingstarted.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/release_process.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/production.rst","getting_started/querying.rst","index.rst","new/auditlogging.rst","new/fqllogging.rst","new/index.rst","new/java11.rst","new/messaging.rst","new/streaming.rst","new/transientreplication.rst","new/virtualtables.rst","operating/audit_logging.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction/index.rst","operating/compaction/lcs.rst","operating/compaction/stcs.rst","operating/compaction/twcs.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","plugins/index.rst","tools/cassandra_stress.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","tools/sstable/index.rst","tools/sstable/sstabledump.rst","tools/sstable/sstableexpiredblockers.rst","tools/sstable/sstablelevelreset.rst","tools/sstable/sstableloader.rst","tools/sstable/sstablemetadata.rst","tools/sstable/sstableofflinerelevel.rst","tools/sstable/sstablerepairedset.rst","tools/sstable/sstablescrub.rst","tools/sstable/sstablesplit.rst","tools/sstable/sstableupgrade.rst","tools/sstable/sstableutil.rst","tools/sstable/sstableverify.rst","troubleshooting/finding_nodes.rst","troubleshooting/index.rst","troubleshooting/reading_logs.rst","troubleshooting/use_nodetool.rst","troubleshooting/use_tools.rst"],objects:{},objnames:{},objtypes:{},terms:{"000kib":[64,207],"000mib":56,"00t89":22,"011mib":207,"014kib":64,"017kib":64,"018kib":[64,207],"01t02":218,"021kib":[64,207],"022kib":64,"024kib":64,"028809z":204,"029kib":64,"031mib":207,"033kib":64,"036kib":64,"03t04":22,"040kib":64,"044kib":64,"045kib":64,"049kib":64,"054mib":207,"055z":204,"056kib":207,"061kib":207,"062mib":207,"063kib":207,"064kib":207,"0665ae80b2d711e886c66d2c86545d91":205,"06t22":218,"077mib":207,"078kib":207,"081kib":207,"082kib":207,"090kib":207,"092mib":207,"096gib":215,"0974e5a0aa5811e8a0a06d2c86545d91":207,"099kib":207,"0_222":49,"0_232":49,"0d927649052c":64,"0ee8b91fdd0":219,"0f03de2d9ae1":60,"0f9a6a95":49,"0h00m04":219,"0percentil":11,"0x0000000000000000":220,"0x0000000000000003":14,"0x00000004":13,"0x00007f829c001000":220,"0x00007f82d0856000":220,"0x00007f82e800e000":220,"0x00007f82e80cc000":220,"0x00007f82e80d7000":220,"0x00007f82e84d0800":220,"0x2a19":220,"0x2a29":220,"0x2a2a":220,"0x2a2c":220,"0x3a74":220,"100b":81,"100k":81,"100m":11,"100mb":[6,32],"1024l":61,"105kib":207,"10m":11,"10mb":6,"10s":[82,220],"10x":[6,67],"115kib":64,"115mib":207,"11e6":82,"11e8":219,"11e9":[60,64,76],"122kib":207,"128kb":220,"128mb":64,"128mib":[6,57],"128th":4,"12gb":71,"12h30m":22,"130mib":207,"142mib":211,"147mib":207,"14fa364d":49,"14t00":218,"150kib":207,"1520e8c38f00":76,"155kib":207,"15m":74,"160mb":67,"162kib":207,"165kib":207,"167kb":220,"16kb":50,"16l":61,"16mb":[45,66],"16th":6,"173kib":207,"176kib":207,"17t06":218,"184kb":220,"192kib":57,"19821dcea330":64,"19t03":[168,211],"1f20868b27c7":[54,76],"1gb":64,"1kb":50,"1mo":22,"1n_r":28,"1st":[22,75],"1ubuntu1":49,"200m":[218,220],"203mib":207,"2062b290":219,"20m":220,"20t20":204,"217kb":220,"217mib":207,"21x":49,"22x":49,"22z":204,"232kib":64,"232mib":207,"233kib":64,"23t06":218,"23z":204,"244m":220,"245mib":207,"247mib":207,"24h":22,"25005289beb2":204,"250m":6,"251m":220,"253mib":207,"256mb":6,"256th":6,"258mib":207,"25mb":220,"265kib":207,"266k":49,"270mib":207,"27t04":218,"280mib":207,"28757dde":49,"28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d":49,"28t17":218,"295kib":207,"296a2d30":64,"296a2d30c22a11e9b1350d927649052c":[62,64],"299kib":207,"29d":22,"29t00":218,"2cc0":219,"2e10":10,"2gb":71,"2nd":[6,11,75,78],"2xlarg":71,"300mib":207,"300s":6,"307kib":207,"30kb":220,"30s":6,"30t23":218,"30x":49,"311x":49,"314kib":207,"320k":49,"322kib":207,"325kib":207,"327e":82,"32gb":71,"32mb":[6,45],"331mib":207,"333kib":207,"33m":218,"348mib":207,"353mib":215,"3578d7de":204,"35ea8c9f":219,"361kib":207,"366b":220,"370mib":207,"378711z":204,"383b":220,"384z":204,"385b":220,"386kib":207,"387mib":207,"388mib":207,"392kib":207,"392mib":207,"394kib":207,"3f22a07b2bc6":204,"3ff3e5109f22":13,"3gb":[70,220],"3ms":220,"3rd":[6,74,78],"401mib":207,"406mib":207,"40a7":219,"40f3":13,"40fa":219,"40s":220,"40x":49,"410kib":207,"412kib":207,"416mib":215,"41b52700b4ed11e896476d2c86545d91":208,"41f9":[54,76],"423b":220,"423kib":207,"4248dc9d790e":204,"431kib":207,"43kb":220,"440kib":207,"443kib":207,"446eae30c22a11e9b1350d927649052c":[62,64],"449mib":207,"452kib":207,"457mib":207,"458mib":207,"45f4":[54,76],"461mib":207,"465kib":207,"46b4":[54,76],"46e9":219,"476mib":207,"481mib":207,"482mib":211,"48d6":204,"4ae3":13,"4d40":204,"4dcdadd2":[54,76],"4f34":[54,76],"4f3438394e39374d3730":208,"4f58":219,"4kb":[11,50],"4mib":[6,57],"4th":76,"4xlarg":71,"500m":220,"501mib":207,"50kb":[6,220],"50m":[11,220],"50mb":[6,61,68,212],"50th":216,"512mb":6,"512mib":[6,57],"513kib":207,"521kib":207,"522kib":64,"524kib":207,"536kib":207,"543mib":207,"545kib":207,"54kb":220,"550mib":207,"5573e5b09f14":13,"559kib":207,"561mib":207,"563kib":207,"563mib":207,"56m":218,"571kib":207,"576kb":220,"5850e9f0a63711e8a5c5091830ac5256":213,"589f7041":49,"591mib":207,"592kib":207,"5gb":61,"5kb":6,"5level":61,"5mb":67,"603kib":207,"606mib":207,"60m":11,"61111111111111e":208,"613mib":207,"619kib":207,"61de":219,"635kib":207,"6365332094dd11e88f324f9c503e4753":[206,209,211,212,214,215],"638mib":207,"640kib":207,"646mib":207,"64k":6,"64kb":[50,70],"64kib":57,"650b":220,"65c429e08c5a11e8939edf4f403979ef":[204,206],"65kb":220,"663kib":207,"665kib":207,"669kb":220,"683kib":64,"684b":[54,76],"684mib":207,"688kib":207,"690mib":207,"6e630115fd75":219,"6gb":219,"6ms":6,"701mib":207,"715b":220,"718mib":207,"71b0a49":218,"725mib":207,"730kib":207,"732mib":207,"734mib":207,"736kb":220,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":38,"737mib":207,"738mib":207,"743kib":207,"744mib":207,"751mib":207,"752e278f":219,"75th":74,"771mib":207,"775mib":215,"780mib":207,"782kib":207,"783522z":204,"789z":204,"791mib":207,"793kib":207,"798mib":207,"79kb":220,"7f3a":219,"802kib":207,"807kib":64,"812mib":207,"813kib":207,"814kib":207,"832mib":207,"835kib":207,"840kib":207,"843mib":207,"845b":220,"846kib":207,"848kib":207,"84fc":204,"861mib":207,"86400s":66,"869kb":220,"872kib":207,"877mib":207,"880mib":207,"882kib":207,"889mib":207,"892kib":207,"894mib":207,"89h4m48":22,"8gb":[71,220],"8th":[6,65],"8u222":49,"903mib":207,"90percentil":11,"90th":74,"911kib":207,"920kib":207,"920mib":207,"9328455af73f":219,"938kib":207,"954kib":207,"957mib":207,"95ac6470":82,"95th":74,"965kib":207,"9695b790a63211e8a6fb091830ac5256":213,"974b":219,"975kib":207,"983kib":207,"98th":74,"993mib":207,"996kib":207,"99f7":[54,76],"99p":[11,64],"99percentil":[11,59],"99th":[74,216],"9dc1a293":219,"9e6054da04a7":219,"9eeb":[54,76],"9gb":220,"9percentil":11,"9th":74,"\u00eatre":9,"abstract":[34,39],"boolean":[9,11,12,14,17,20,22,28,29,60,82],"break":[11,31,42,66,75,213,217,220],"byte":[4,6,9,13,22,28,50,54,57,60,74,91,109,145,195,207,219],"case":[4,6,10,11,12,13,14,16,17,18,22,24,26,27,28,35,38,39,42,44,45,50,57,59,63,64,71,72,77,79,81,82,207,218,219,220],"catch":[34,57,209],"class":[3,6,11,14,22,29,34,40,44,50,53,54,56,59,60,61,62,64,66,70,73,76,77,81,146,158,179,207,218],"default":[0,4,6,10,11,13,14,17,18,20,22,27,33,40,44,45,46,50,53,54,57,58,59,61,62,63,64,65,66,67,68,69,70,72,74,75,76,77,79,81,82,86,105,109,116,145,146,148,151,161,162,168,183,185,196,204,207,208,212,216,218,219,220],"enum":[9,58],"export":[40,56,74,82,220],"final":[14,20,24,28,34,36,40,43,54,61,64,65,66,71,77,83,162,203,220],"float":[9,10,11,12,14,17,19,22,63,70],"function":[0,3,6,9,10,11,12,15,16,18,20,22,28,32,39,47,52,53,54,60,61,77,78,80,82,203],"goto":33,"import":[1,11,14,22,23,24,27,32,40,41,44,46,50,58,59,62,66,71,72,73,74,76,82,146,216,219,220],"int":[4,9,10,11,13,14,17,18,19,20,22,32,44,54,60,62,64,65,70,74,75,76],"long":[4,6,13,22,27,38,39,45,53,56,62,66,72,74,75,81,210,211,218,220],"new":[0,1,3,4,6,10,11,14,16,17,18,19,20,21,22,24,27,28,32,33,34,36,37,39,40,42,43,44,48,49,50,52,53,54,56,57,58,60,61,62,63,64,66,67,69,71,72,75,77,79,81,137,144,146,206,207,209,211,214,216],"null":[9,10,12,13,14,17,18,22,34,60,82],"public":[6,14,32,34,35,43,44,45,64,77,78],"return":[6,9,11,13,14,16,17,18,19,20,22,24,27,39,54,58,59,60,72,75,81,161,205,206,220],"short":[4,22,25,28,36],"static":[6,9,10,11,18,24,25,28,36,64,74,78,208],"super":[4,24,77,80,81],"switch":[4,6,10,20,24,40,45,57,73,74,77,78],"throw":[6,14,34,44,54,216],"transient":[3,6,11,52,55],"true":[6,11,12,17,20,22,27,28,40,45,53,54,56,58,59,60,61,62,65,66,67,69,72,77,79,82,143,146,213],"try":[0,6,11,24,26,27,34,35,40,42,45,56,61,64,66,76,161,207,219,220],"var":[4,6,34,49,204,205,206,207,208,209,210,211,212,213,214,215,218,220],"void":44,"while":[0,4,6,10,11,12,13,22,25,28,30,38,42,43,53,54,57,58,59,63,64,67,69,70,71,72,76,77,82,207,216,218,219,220],AES:6,AND:[9,11,13,14,18,20,29,54,60,64,77,81,82,218],AWS:[50,71,78],Added:[10,55],Adding:[6,11,20,22,28,45,52,54,57,73,77],And:[11,14,20,57,77],Are:39,Ave:22,BUT:34,Being:59,But:[13,20,22,27,33,34,42,45,64,75,82],CAS:[1,6,219],CCS:220,CFs:[161,168],CLS:82,CMS:220,DCs:[6,59],DNS:45,Doing:[10,83,203],EBS:71,For:[0,3,4,6,9,10,11,12,13,14,15,16,17,18,20,21,22,23,24,25,26,28,32,37,42,43,44,45,46,49,50,51,54,57,61,66,67,69,71,72,75,76,77,78,81,82,207,208,209,212,216,218,219,220],GCs:6,HDs:220,Has:39,IDE:[30,37,41,52],IDEs:[30,40,41],IDs:[25,27,146,186],INTO:[6,9,11,13,14,17,22,54,62,64,76],IPs:[6,78,167,186],Ids:192,Its:[54,64],JKS:6,JPS:220,KBs:[6,72],LCS:[11,66,208],LTS:[49,56],MVs:32,NFS:71,NOT:[6,9,10,11,13,14,16,18,20,21,22,64],NTS:[6,50],N_s:28,Not:[13,20,42,50,56,66,70],ONE:[0,6,11,74,75,81,82],One:[0,24,27,42,44,45,56,64,66,69,220],PFS:6,Pis:71,QPS:216,Such:22,THE:6,TLS:[6,64,73,207],That:[0,11,12,18,22,27,42,45,59,68,82,220],The:[0,1,3,4,6,8,9,10,11,12,14,16,18,19,20,21,22,23,24,25,26,27,28,29,30,32,33,34,36,37,38,40,42,43,44,45,46,49,50,51,52,53,54,55,56,57,58,59,62,63,64,65,67,68,69,70,71,72,74,75,76,77,78,79,80,81,82,86,89,94,96,102,106,112,115,116,119,124,128,130,132,137,144,146,148,152,153,159,161,168,171,172,179,185,186,187,194,196,199,200,202,206,207,208,209,211,212,213,214,217,218,219,220],Their:22,Then:[13,44,45,66,77,209,213,220],There:[6,10,11,12,13,14,22,25,27,28,30,40,42,44,45,50,57,66,68,72,74,76,77,81,210,212,216,219,220],These:[0,4,6,11,14,30,40,49,53,57,64,74,76,77,81,82,214,216,217,218,219,220],USE:[9,14,15,53,54,60,61,62],USING:[9,13,16,21,22,69],Use:[6,11,13,20,24,25,45,51,52,54,64,65,73,77,81,82,83,84,89,146,151,161,192,199,203,204,209,210,211,214,217],Used:[23,24,25,26,27,28,29,30,74,220],Useful:[66,220],Uses:[6,17,73,78],Using:[1,11,13,31,36,44,45,50,54,55,73,77,83,203,204,207,211,214,218],WILL:6,WITH:[9,11,12,16,18,20,29,32,50,53,54,59,60,62,63,64,65,66,70,76,77,81,82],Will:[6,52,54,72,109,146,179,209],With:[0,1,6,13,17,45,50,58,59,64,66,75,76,79,85,218,220],Yes:45,_build:56,_build_java:56,_by_:24,_cache_max_entri:77,_cdc:65,_development_how_to_review:37,_if_:6,_main:56,_must_:6,_only_:218,_path_to_snapshot_fold:64,_trace:[74,219],_udt:14,_update_interval_in_m:77,_use:14,_validity_in_m:77,_x86_64_:220,a6fd:219,a8ed:60,abbrevi:43,abil:[14,20,30,45,70],abilityid:16,abl:[0,1,6,14,22,25,26,28,33,36,40,44,45,53,54,58,59,62,66,77,216,217],abort:33,about:[0,1,4,6,20,23,24,26,27,28,29,36,40,41,42,44,45,50,54,58,60,63,66,76,78,82,88,146,167,208,218,219,220],abov:[0,4,6,8,11,12,13,14,22,35,40,42,45,50,68,74,81,83,203,207,214,220],absenc:12,absent:0,abstracttyp:22,ac79:219,acceler:80,accept:[0,6,10,11,12,13,17,42,44,50,63,70,72,75,79,104,146],access:[3,6,10,11,20,22,24,26,30,32,40,42,60,61,64,71,72,73,74,80,207,208,216,217,220],accident:[24,206],accompani:6,accomplish:[26,54],accord:[0,1,4,6,24,45],accordingli:[6,14,45],account:[1,6,22,36,43,44,220],accru:[66,74],accrual:0,accumul:[6,66,72,74],accur:[6,24,28,45,63,79,167,208],accuraci:[63,148,196],acheiv:77,achiev:[0,1,6,32,54,66,74],achil:47,ack:[4,6,59,75],acknowledg:[0,25,72,75],acoount:74,acquir:[20,57,74],across:[0,1,6,11,20,25,27,28,32,42,58,72,74,76,77,78,81,146,150,208,215],act:[0,26,28,59,218],action:[6,13,20,40,72,215,220],activ:[4,6,30,37,42,53,54,57,60,65,74,76,82,146,148,196,216,218,219,220],active_task:60,active_tasks_limit:60,activetask:74,actor:77,actual:[0,4,6,13,21,28,30,34,36,39,43,45,57,58,60,61,66,67,76,78,81,161,211,220],acycl:20,adapt:[23,24,25,26,27,28,29,30],add:[0,6,9,10,11,22,24,25,28,32,33,36,37,38,39,41,42,43,46,49,52,53,54,56,58,59,61,62,64,66,72,75,76,77,81,209,214,218],addamsfamili:11,added:[0,1,3,4,6,10,11,14,19,24,36,39,49,54,57,58,60,62,64,65,66,68,72,75,76,212],adding:[0,6,13,14,24,28,32,39,50,54,71,76,82,209,215],addit:[0,1,3,6,9,11,13,19,20,22,24,25,26,27,28,32,40,42,46,50,53,57,58,59,60,61,62,66,69,71,72,74,75,77,82,218,220],addition:[11,13,33,59,69,76,81,218],additional_write_polici:[11,64],addr:57,address:[6,8,17,22,25,26,29,33,40,42,46,52,53,54,56,57,60,61,62,64,74,76,78,79,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,219,220],addressload:54,addrow:64,adher:10,adjac:[32,69],adjust:[6,11,25,50,63],adler32:4,adopt:57,advanc:[0,3,6,36,60,61,73,77,217],advantag:[0,27,50,71],advers:[45,219],advic:[42,45],advis:[6,12,18,22,45,49],advoc:0,ae6b2006:49,aefb:204,af08:13,afd:22,affect:[0,11,24,32,39,42,45,53,58,66,75,168,211,216,220],afford:6,afraid:27,after:[5,6,10,11,12,13,14,16,17,18,24,25,27,32,40,42,43,45,49,50,53,54,56,59,62,64,65,66,67,71,72,73,74,75,76,77,78,82,209,210,213],afterward:[33,36,40,44],afunct:14,again:[0,6,25,42,43,54,56,62,66,76,79,82,210,213],against:[6,11,14,18,27,30,36,42,44,45,54,58,60,71,72,76,79,81,82,161,208,220],agent:[24,53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,220],agentlib:40,aggreg:[3,6,9,10,13,15,18,20,53,54,60,61,74,82],aggress:216,ago:210,agre:[0,75],ahead:48,aid:[3,12],aim:[6,218],akeyspac:14,akin:57,alg:[64,207],algorithm:[0,6,11,50,64,79,207,218],alia:[10,13,14,47],alias:[6,10,18],alic:20,align:34,aliv:[0,6],all:[0,1,3,4,6,9,11,12,13,14,17,18,19,22,23,24,25,26,27,28,29,30,32,33,34,35,36,38,39,40,42,44,49,52,53,54,56,57,58,59,60,61,63,64,65,66,67,68,69,72,74,75,76,77,79,81,82,83,86,87,88,104,116,121,137,138,143,146,148,150,159,162,168,183,185,187,196,198,199,200,203,205,209,211,215,216,218,219,220],allmemtableslivedatas:74,allmemtablesoffheaps:74,allmemtablesonheaps:74,alloc:[0,6,45,50,56,57,65,71,74],allocate_tokens_for_keyspac:[60,79],allocate_tokens_for_local_replication_factor:50,allow:[0,1,3,4,6,9,10,11,12,14,16,17,18,22,23,24,27,33,36,37,46,50,54,58,59,60,63,64,65,66,69,70,71,75,76,78,81,211,219,220],allowallauthent:[6,77],allowallauthor:[6,77],allowallinternodeauthent:6,allowallnetworkauthor:6,almost:[0,4,6,14,22,57,67,216,220],alon:[11,34],along:[0,6,13,27,37,43,56,61,72,143,146,218],alongsid:[51,82],alpha4:[11,22,52],alphabet:34,alphanumer:[11,20],alreadi:[6,11,14,16,18,22,24,26,27,42,45,50,54,56,57,64,68,77,81,83,199,203,212],also:[0,3,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,28,30,32,33,36,40,42,43,44,45,46,49,50,54,56,58,59,61,62,64,65,66,67,68,69,71,72,74,75,77,79,82,116,200,213,214,218,219,220],alter:[0,9,10,15,17,28,45,59,60,63,65,66,70,76,77],alter_keyspac:53,alter_keyspace_stat:12,alter_rol:53,alter_role_stat:12,alter_t:53,alter_table_instruct:11,alter_table_stat:12,alter_typ:53,alter_type_modif:22,alter_type_stat:[12,22],alter_user_stat:12,alter_view:53,altern:[10,11,12,13,17,22,40,42,46,49,64,71,72,75,77,207],although:[6,27,28,42,57,81,218,220],altogeth:57,alwai:[0,3,4,6,9,10,11,13,14,18,22,27,34,36,42,43,44,45,50,54,57,59,66,67,71,81,216,220],amazon:[0,3,49],amazonaw:56,amen:[24,26,27,29],amend:38,amenities_by_room:[24,29],amenity_nam:29,ami:49,among:[32,75],amongst:11,amount:[0,6,11,13,22,28,40,42,44,45,58,59,66,70,71,72,74,76,79,82,161,220],amplif:[50,67,71],anaggreg:14,analogu:13,analysi:[24,28,31,217,218],analyt:[24,63],analyz:[25,28,32,44,220],ancestor:[4,214],ani:[0,4,6,10,11,12,13,14,17,18,20,21,22,24,25,27,30,33,35,36,38,39,40,42,43,44,46,49,50,52,53,54,56,57,58,59,60,64,66,69,71,72,74,75,76,77,79,81,82,84,137,143,146,151,168,183,204,208,211,213,214,217,218,219],annot:34,announc:78,anonym:[12,22,53,60,61],anoth:[1,6,11,14,20,22,24,27,28,32,44,54,57,59,60,62,64,66,75,77,82,205,212,217,220],anotherarg:14,answer:[41,220],ant:[33,35,40,42,44,56],antclassload:44,anti:[0,6,22,31,59,72],anticip:[0,11,59],anticompact:[66,74,192,212],anticompactiontim:74,antientropystag:[60,74,219],antipattern:71,anymor:[38,66],anyon:34,anyth:[59,66],anywai:[6,56],anywher:[13,57,65],apach:[0,1,2,3,5,6,7,14,21,30,32,34,35,36,38,39,41,42,43,44,45,48,49,53,54,56,57,58,59,60,61,62,64,66,70,72,74,77,80,83,204,205,206,207,208,209,210,211,212,214,215,218],apart:61,api:[0,6,8,17,51,58,60,78],appar:24,appear:[6,11,12,14,24,54,66,69,75,82],append:[4,22,24,38,61,71,72,74,82,218],appendic:[15,52],appendix:[12,15],appl:22,appli:[0,4,6,9,10,11,12,13,20,22,38,42,44,45,57,72,74,75,79,81,82],applic:[0,1,3,6,11,20,23,24,25,27,28,29,31,32,34,37,39,40,52,53,55,56,59,60,61,64,70,77,81,218],appreci:42,approach:[4,24,25,26,27,28,32,57,66,79],appropri:[6,11,20,22,39,42,43,57,77,78,79,218],approv:33,approxim:[0,28,50,54,68,69,74,208],apt:[49,220],arbitrari:[11,12,22,62,81],architectur:[3,25,45,52,80],archiv:[4,6,43,49,54,65,109],archive_command:[53,54,109],archive_retri:[54,109],area:[3,37,220],aren:[13,76],arg:[14,54,146,184,204,208,214],argnam:14,argnum:14,argument:[6,11,13,14,16,17,45,46,50,53,54,62,64,70,81,82,84,85,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202],arguments_declar:14,arguments_signatur:14,arithmet:[10,12,15,52],arithmetic_oper:12,armor:43,around:[6,20,24,27,32,53,54,66,71,78,220],arrai:[6,23,45,49,57],arriv:[6,42,45,57],arrow:26,artem:24,artifact:[26,35,40,41],artifici:11,asap:10,asc:[9,11,13,29,60],ascend:[11,13],ascii:[9,14,17,22],asdf:204,asf:[8,40,43],ask:[5,28,42,43,44,52,77],aspect:[0,11,59],assassin:146,assertionerror:34,assertrow:44,assess:[219,220],assign:[0,6,13,20,25,27,45],assist:24,associ:[0,6,11,26,54,60,80,214,216],assum:[0,6,11,14,25,26,27,28,36,40,77,78,216,220],assumpt:[0,77],assur:55,asterisk:25,astyanax:47,async:[6,77],asynchron:[16,45,54,58,71],asynchroni:74,ata:27,atabl:14,atom:[3,11,13,21,38,75],atomiclong:74,attach:[37,42,77,80,220],attack:77,attemp:74,attempt:[0,6,11,16,18,20,22,24,45,50,53,57,59,61,69,72,74,75,76,77,82,83,162,203,213,218],attent:[34,35,42,43],attract:24,attribut:[23,24,25,32,49,53,54,61,66],audienc:[0,59],audit:[3,6,27,52,54,55,95,105,146],audit_log:6,audit_logging_opt:[53,61],audit_logging_options_en:60,audit_logs_dir:[53,61],auditlog:[53,105],auditlogentrytyp:53,auditlogkeyspac:[53,54],auditlogview:[3,53,61],audt:61,aug:[53,54,62,64,213],auth:[6,53,61,64,207],authent:[10,49,53,61,64,73,82,207],authenticatedus:6,author:[9,20,22,36,42,73,81],authorizationproxi:77,authprovid:[64,207],auto:[0,6,11,45,56,62,81,187],auto_bootstrap:79,auto_snapshot:[60,62],autocompact:[66,96,106,146,187],autom:[30,34,37],automat:[6,13,14,16,32,33,36,40,44,45,49,53,58,62,66,76,77,79,81],automatic_sstable_upgrad:60,avail:[0,2,3,6,8,11,14,20,23,24,25,26,27,28,29,30,33,40,42,43,44,50,54,56,57,59,61,65,72,75,76,77,78,80,82,86,116,159,168,179,199,216,218,220],availabil:6,available_rooms_by_hotel_d:[24,28,29],availablil:50,averag:[6,11,14,28,49,68,74,207,218,219,220],average_live_cells_per_slice_last_five_minut:195,average_s:11,average_tombstones_per_slice_last_five_minut:195,averagefin:14,averagest:14,avg:[28,64,207,220],avg_bucket_s:68,avgqu:220,avgrq:220,avoid:[0,6,11,12,27,32,34,39,42,54,57,58,59,63,66,67,69,71,74,77,78,82,200,207],awai:[40,79,82,219],await:220,awar:[0,6,11,42,57,63,70,72,167,216,219],awesom:81,axi:56,az123:25,azur:71,b00c:76,b09:49,b10:49,b124:13,b135:64,b2c5b10:218,b32a:[54,76],b64cb32a:[54,76],b70de1d0:13,b7a2:219,b7c5:219,b957:204,b9c5:219,back:[3,6,11,32,36,54,57,59,60,61,66,72,74,75,79,143,146,219],backend:6,background:[43,45,73,77,218,220],backlog:[6,57],backpressur:[6,57],backpressurestrategi:6,backup:[3,6,52,66,73,79,82,97,107,146,188,213,214],backward:[6,10,11,15,20,22],bad:[6,14,45,77,78,216,219],balanc:[0,3,6,24,50,58,72,79,216,219],banana:22,band:22,bandwidth:[6,57,58,80,220],bank:24,bar:[12,34,220],bardet:22,bare:6,base:[0,1,4,6,10,11,13,14,18,19,20,22,24,25,27,28,32,33,35,37,38,41,42,43,44,45,49,50,53,54,55,57,61,66,71,74,75,77,79,80,208,216,219],baseurl:49,bash:[45,56,220],bashrc:56,basi:[0,6,11,33,45,54,57,70],basic:[0,6,11,24,25,32,57,60,66,68,69,71,81,83,203,214,217],batch:[0,2,4,6,9,11,15,27,44,52,53,54,59,60,73,75,81,82,216,220],batch_remov:[74,219],batch_stat:12,batch_stor:[74,219],batchlog:[1,13,74,118,146,163,169],batchtimemilli:54,batchtyp:81,bbee:64,bc9cf530b1da11e886c66d2c86545d91:211,be34:13,beatl:22,beca:82,becam:[0,3],becaus:[0,4,6,11,13,14,20,24,27,28,32,56,57,62,66,70,72,74,75,77,208,211,220],becom:[0,4,6,11,14,20,32,42,62,66,69,74,75,77,79],been:[0,1,4,6,10,11,13,14,15,20,22,24,32,39,42,43,50,54,56,57,58,59,60,62,64,66,67,71,75,76,77,168,211,214,216],befor:[0,4,6,10,11,13,14,16,19,21,22,25,26,28,33,36,37,40,41,43,44,47,53,54,57,59,60,61,62,64,66,68,72,74,75,77,78,79,81,82,109,185,203,204,205,206,207,208,209,210,211,212,213,214,215,216],began:3,begin:[9,12,13,24,26,27,44,77,82],beginn:42,begintoken:82,behalf:24,behav:[6,59],behavior:[0,6,10,11,14,17,22,34,39,63,69,73,79,162,216],behind:[6,34,44,45,53,54,61,67],being:[0,4,6,11,13,17,22,24,28,39,43,44,45,53,54,57,59,61,63,64,66,67,68,74,75,76,79,209,218,219,220],believ:[72,216],belong:[0,3,11,13,14,58,74,86,146],below:[6,11,12,13,17,20,22,23,24,25,26,27,28,32,35,42,55,57,61,66,68,74,82,92,207,209,216,218],benchmark:[54,58,71,81],benefici:69,benefit:[0,6,37,50,53,63,66,71,73,207],best:[0,3,6,24,27,29,30,32,36,43,44,50,66,67,72,73,77,78,216,220],best_effort:6,better:[6,34,36,37,42,57,67,71,72,207,219,220],between:[0,1,4,6,9,10,11,12,13,15,23,24,25,26,31,32,42,45,55,57,58,63,66,70,72,74,75,76,77,80,81,161,183,220],beyond:[6,59,69,82,200],big:[6,28,62,64,66,89,204,205,206,207,208,210,211,212,213,214,215],bigg:28,bigger:[11,50,68],biggest:14,bigint:[9,14,17,19,22,60],bigintasblob:14,bigtabl:3,bigtableread:[205,211,213,215],billion:28,bin:[40,49,51,54,56,64,82,218],binari:[14,43,48,53,54,61,77,98,108,146,189,218],binauditlogg:[53,105],bind:[6,10,12,14,45,54,64],bind_mark:[12,13,18,22],binlog:[54,61],biolat:220,biolog:11,biosnoop:220,birth:13,birth_year:13,bit:[14,17,22,28,35,42,45,49,56,59,70,71],bite:45,bitempor:80,bitstr:9,black:6,blank:[6,34,45,208],blindli:45,blob:[9,10,12,17,22,52,70,72,81],blob_plain:43,blobasbigint:14,blobastyp:14,block:[4,6,11,38,46,53,54,57,58,60,61,66,67,71,73,74,76,77,83,109,203,218,219,220],blockdev:50,blocked_task:60,blocked_tasks_all_tim:60,blockedonalloc:6,blockingbufferhandl:57,blog:[6,13,50],blog_til:13,blog_titl:13,blogpost:81,bloom:[4,11,52,58,59,71,73,74,208],bloom_filter_false_posit:195,bloom_filter_false_ratio:195,bloom_filter_fp_ch:[4,11,63,64],bloom_filter_off_heap_memory_us:195,bloom_filter_space_us:195,bloomfilterdiskspaceus:74,bloomfilterfalseposit:74,bloomfilterfalseratio:74,bloomfilteroffheapmemoryus:74,blunt:77,bnf:12,bob:[13,20],bodi:[6,11,12,57,81],bog:23,boilerpl:41,book:[23,26],boolstyl:82,boost:6,boot:45,bootstrap:[0,6,50,52,56,58,66,70,73,74,77,146,151,179,209],born:13,borrow:57,both:[0,1,3,6,11,13,14,18,22,23,24,28,37,38,39,42,43,45,46,49,53,54,57,58,59,63,66,70,71,74,75,77,79,81,82,214,220],bottleneck:6,bottom:45,bound:[4,6,11,12,22,54,57,58,61,64,71,77],boundari:209,box:[3,6,26,77,78],brace:34,bracket:12,braket:12,branch:[33,36,38,39,40,43,44],branchnam:42,brand:0,breadcrumb:216,breakdown:[219,220],breakpoint:40,breed:44,brendangregg:220,breviti:57,brief:[50,220],briefli:[1,219],bring:[0,6,56,58],brk:45,broadcast:6,broadcast_address:78,broken:[6,66,74,211],brows:[6,43,204,205,206,207,208,209,210,211,212,214,215],browser:[82,220],bucket:[0,28,68],bucket_high:68,bucket_low:68,buff:220,buffer:[4,6,50,54,56,57,64,65,72,74],bufferpool:[56,73],buffers_mb:220,bug:[10,38,41,43,44,45,52,76],build:[18,27,30,33,35,36,37,41,42,43,44,49,52,55,64,74,76,80,81,146,202],builder:[6,64,123,146,176],buildfil:56,built:[1,18,28,32,40,56,74],bulk:[52,58,72,73,207],bump:[4,10,209],bunch:34,burn:65,busi:[0,24,27],button:[36,40,45],bytebuff:[14,57],byteorderedpartition:[6,14],bytesanticompact:74,bytescompact:74,bytesflush:74,bytesmutatedanticompact:74,bytespendingrepair:74,bytesrepair:74,bytestyp:[9,208],bytesunrepair:74,bytesvalid:74,bz2:43,c09b:76,c217:64,c22a:64,c3909740:60,c39ee7e6:49,c60d:204,c73de1d3:13,c7556770:60,c_l:28,cach:[0,6,11,20,35,45,46,50,64,71,73,78,137,139,140,141,146,170,171,219],cachecleanupexecutor:[60,74,219],cached_mb:220,cachenam:74,cachestat:220,cadenc:56,calcul:[0,6,23,24,25,31,57,63,64,65,69,74,75,78,207,208],call:[0,9,11,12,13,14,20,24,34,36,41,46,52,54,59,61,62,64,66,70,71,74,79,146,179,220],callback:[57,74],caller:34,can:[0,3,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25,27,28,30,33,34,35,36,37,38,39,40,42,43,44,46,49,50,51,52,53,54,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,79,81,82,84,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,205,206,207,209,210,211,212,213,216,217,218,219,220],cancel:[10,162],candid:67,cannot:[1,6,9,11,13,14,17,18,20,22,27,32,44,53,56,57,58,59,60,64,66,72,77,84,146,219],cap:[2,12,120,125,131,146,173,178,181],capabl:[0,6,45,80,82],capac:[0,6,28,32,53,54,57,65,74,78,80,146,148,170,196,215,216,218,220],capacit:216,capacity_byt:60,capi:52,captur:[6,11,24,52,54,73,77,83,218],cardin:208,care:[6,66,81,161,220],carefulli:[35,50],carlo:20,carpent:[23,24,25,26,27,28,29,30],carri:[34,161],cascad:27,cascommit:74,cascontent:[132,182],casprepar:74,caspropos:74,casread:74,cassablanca:22,cassafort:47,cassanda:0,cassandra:[0,1,2,3,4,5,8,10,11,13,14,18,20,21,22,23,24,25,26,28,29,31,32,34,35,37,38,42,47,48,50,51,53,54,56,57,58,59,60,62,63,64,66,67,70,71,72,74,76,78,79,82,105,109,146,157,161,164,168,193,201,203,204,205,206,207,208,209,210,211,212,214,215,216,217,219,220],cassandra_flam:220,cassandra_hom:[4,6,56,65,72,77,218],cassandra_job_dsl_se:33,cassandra_stack:220,cassandra_use_jdk11:56,cassandraauthor:[6,77],cassandradaemon:[40,56],cassandrafullquerylog:54,cassandrakeyspacesimpl:59,cassandralogin:77,cassandranetworkauthor:6,cassandrarolemanag:[6,77],casser:47,cassi:47,cast:[10,13,18],caswrit:74,cat:[22,204,220],catalog:[62,64],catalogkeyspac:[62,64],catalokeyspac:62,categor:74,categori:[11,12,13,14,61,105],caught:[39,74],caus:[0,4,6,18,27,45,57,66,68,69,76,77,209,211,218,219,220],caution:[6,70],caveat:77,cbc:6,ccm:[39,44,220],ccmlib:44,cd941b956e60:219,cdc:[6,11,64],cdc_enabl:65,cdc_free_space_check_interval_m:65,cdc_free_space_in_mb:65,cdc_raw:[6,65],cdc_raw_directori:65,cdccompactor:6,cde63440:49,cdf7:60,cell:[6,22,28,74,116,200,204,208,219],center:[6,11,20,22,24,45,50,72,78,79,102,112,146,161],cento:49,central:[40,77,82,216],centric:[20,32,36],certain:[0,1,4,6,9,11,20,36,44,50,58,59,66,77,205],certainli:[14,24,26,28],certif:[73,146,157],cf188983:204,cfname:[130,148,196],cfs:34,chain:20,challeng:[3,37,80],chanc:[37,63,75,208],chang:[0,4,6,11,12,15,20,22,27,32,33,35,36,37,38,40,41,43,48,50,52,53,54,57,59,60,62,66,67,70,73,74,76,77,179,206,209,218,220],changelog:43,channel:0,charact:[11,12,13,17,20,22,28,34,81,82],character:6,chat:8,cheap:[6,11,54,55],chebotko:[24,25,30],check:[0,6,13,24,33,34,39,40,42,43,44,45,49,54,56,57,59,63,64,65,66,67,69,74,77,83,137,146,161,200,203,215,219],checklist:[41,42,52],checkout:[36,40,42,43],checksum:[4,6,11,58,70,146,200,214],chen:23,cherri:38,chess:13,child:82,chmod:[53,54,64,77],choic:[0,6,11,32,43,49,52,66,69,73,210],choos:[0,1,3,6,11,41,43,47,48,71,74],chord:0,chose:0,chosen:[0,6,11,14,219],chown:77,christoph:22,chrome:82,chronicl:[53,54,61],chunk:[4,6,11,45,50,57,58,60,70,82],chunk_length_in_kb:[11,50,64,70],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:74,chunksiz:82,churn:6,cipher:[6,64,77,207],cipher_suit:6,circular:20,circumst:[11,27,75],citi:[22,29],claim:30,clash:12,class_nam:[4,6],classload:[44,56],classpath:[6,14,22,74],claus:[10,11,14,16,17,18,20,34],clean:[6,34,54,74,83,86,146,164,203,207],cleanli:42,cleanup:[45,59,66,73,74,116,146,192,214],clear:[39,42,64,83,88,137],clearli:26,clearsnapshot:[62,146],click:[13,40,42,43,44,220],client:[0,1,4,6,8,10,11,13,17,20,22,27,32,39,45,46,48,49,50,52,53,61,64,65,71,72,73,75,82,88,146,207,211,217,218,219,220],client_encryption_opt:[64,77,207],clientrequest:74,clientstat:146,clock:[0,6],clockr:6,clockwis:0,clojur:48,clone:[40,43,45,56,82,220],close:[6,15,36,43,64,77,220],closer:63,closest:75,cloud:[50,73],clue:[24,220],cluster:[1,2,3,4,6,9,10,11,13,14,21,22,24,27,28,29,30,32,39,44,46,50,51,52,54,56,57,58,59,60,62,64,66,68,71,72,74,75,76,77,78,79,81,82,83,93,114,118,134,146,169,186,203,208,215,216,217,218,220],cluster_nam:[46,51,60],clustering_column:11,clustering_ord:11,clusteringtyp:208,cmake:220,cmd:220,cmsparallelremarken:40,coalesc:6,coalescingstrategi:6,codd:27,code:[6,10,12,14,21,25,28,32,33,36,37,38,39,40,41,44,52,56,70,74,216,220],codestyl:34,coher:80,col:[14,81],cold:6,collat:6,collect:[0,3,6,10,11,12,13,14,15,17,23,25,30,71,73,74,75,81,116,218],collection_liter:12,collection_typ:22,collector:218,color:[22,82,220],column1:9,column:[0,1,3,4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,27,28,29,30,32,49,54,57,60,62,70,74,75,76,80,81,82,130,148,168,185,196,208,211,213,218,219],column_count:54,column_definit:[11,54],column_nam:[11,13,16],columnfamili:[4,6,9,34,66,76,206,209],columnspec:81,colupdatetimedeltahistogram:74,com:[6,14,33,34,36,38,43,49,56,77,220],combin:[0,3,4,6,10,50,57,69,75],come:[6,9,26,28,77,220],comingl:69,comma:[6,11,12,13,46,53,61,64,77,79,82,105,148,151,196,207],command:[0,3,6,18,27,30,35,38,43,44,45,46,51,53,54,56,60,62,64,70,72,73,76,77,81,83,84,85,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,206,207,212,213,214,217,218,219,220],commandlin:62,comment:[4,6,11,15,18,29,34,36,37,39,64,77],commit:[4,6,8,11,36,41,42,43,52,57,74,214,220],commitlog:[2,6,45,46,71,73,208,218,219],commitlog_archiv:[4,6],commitlog_compress:4,commitlog_directori:[4,46,71],commitlog_segment_size_in_mb:[4,45],commitlog_sync:4,commitlog_sync_batch_window_in_m:4,commitlog_sync_period_in_m:4,commitlog_total_space_in_mb:4,commitlogposit:208,commitlogread:65,commitlogreadhandl:65,commitlogseg:[6,73,74],committ:[37,38,41,42,43,44],commod:[2,3],common:[0,14,15,24,25,27,34,36,39,42,50,53,55,57,58,69,73,76,82,216,217,220],common_nam:11,commonli:[54,72,146],commun:[0,6,8,24,37,39,40,42,45,46,51,57,61,64,77,207],commut:45,compact:[4,6,11,15,45,48,52,58,59,60,62,63,64,69,70,71,73,81,86,90,91,116,119,120,146,159,168,172,173,179,187,192,199,205,206,207,208,209,211,214,217,218,220],compacted_partition_maximum_byt:195,compacted_partition_mean_byt:195,compacted_partition_minimum_byt:195,compaction_:192,compaction_histori:218,compaction_throughput:219,compaction_window_s:69,compaction_window_unit:69,compactionbyteswritten:74,compactionexecutor:[60,74,219],compactionhistori:[66,146],compactionid:192,compactionparamet:66,compactionparametersjson:66,compactions_in_progress:214,compactionstat:[66,146,219],compactiontask:218,compactor:[122,146,175],compani:27,compar:[0,1,3,4,6,31,35,42,49,55,67,74,75,76,81,216,219],comparis:28,comparison:6,compat:[6,9,10,11,13,15,20,39,42,64,83,220],compatilibi:22,compet:6,compil:[34,35,40,56,82],compilerthread3:220,complain:40,complet:[1,6,13,14,24,29,30,42,43,45,54,57,58,59,60,62,65,69,72,74,75,76,77,79,82,146,160,162,211,213,214,219],completed_task:60,completedtask:74,complex:[0,4,6,9,14,22,23,25,27,28,32,42],complexarg:14,compliant:[6,14,77],complic:42,compon:[0,4,6,11,24,32,39,63,74,77,146,179,220],compos:[11,13,22],composit:[4,11,32],compound:[17,32],comprehens:39,compress:[0,4,6,11,44,48,52,57,60,64,67,71,72,73,74,81,208],compression_level:[11,70],compression_metadata_off_heap_memory_us:195,compressioninfo:[4,58,62,64],compressionmetadataoffheapmemoryus:74,compressionratio:74,compressor:[4,6,11,72,208],compris:[4,11,53,70],compromis:[1,43,77],comput:[0,4,6,11,14,28,54,56,215],concaten:[14,53,61],concentr:23,concept:[0,20,27,30,32,66,69],conceptu:[24,26,27,30,31,32,52],concern:[13,14,25,220],conclus:6,concret:[12,22],concurr:[0,1,6,33,57,64,71,81,121,122,123,146,161,174,175,176,207,219,220],concurrent_compactor:219,concurrent_materialized_view_build:18,concurrent_writ:4,concurrentmarksweep:71,condens:13,condit:[6,10,12,13,20,22,32,34,38,60,68,74,77,81,82,220],conditionnotmet:74,conduct:49,conf:[6,45,46,49,56,64,74,77,82,207,218],confid:0,config:[33,57,64,74,77,82,83,203],configu:[61,220],configur:[0,3,4,11,20,22,27,33,40,44,45,48,49,52,55,56,57,59,60,64,73,74,77,78,80,81,82,92,109,146,164,179,206,207,208,216,218,219],confirm:[6,8,25,26,29,33,39,40],confirm_numb:29,conflict:[0,13,22,38,41],conform:[18,39,64],confus:[10,12,45,220],congratul:36,conjunct:82,connect:[0,6,11,22,23,30,40,49,51,52,54,58,60,64,74,77,78,81,82,88,92,145,146,207,220],connectednativecli:74,connectednativeclientsbyus:74,connection_stag:60,connectionsperhost:[64,207],connector:[23,45,47,77],connnect:74,consecut:[28,46],consensu:[1,59,72],consequ:[11,13,19,22,71],conserv:[6,72],consid:[0,6,13,22,24,28,32,37,42,46,61,63,66,69,71,72,75,215],consider:[13,22,23,24,25,27,32,64],consist:[2,3,6,11,12,13,14,29,32,39,57,59,60,62,72,73,74,77,79,83,216,219],consol:[40,46,61,82],constant:[10,11,15,17,22,64],constantli:[0,6,66],constitut:32,constrain:27,constraint:1,construct:[0,12,75,220],constructor:[6,34],consum:[6,44,53,54,57,63,65,74,219],consumpt:65,contact:[0,6,11,45,52,75,216],contain:[0,1,3,6,8,9,10,11,12,13,15,16,18,20,22,24,25,35,40,42,44,54,57,60,62,66,70,72,74,77,80,82,185,205,210,214,216,218,219,220],contend:[6,74],content:[4,6,11,12,13,24,36,49,52,53,54,58,66,69,75,82,109,204,220],contentionhistogram:74,context:[4,6,9,20,22,26,40,42,45,77,218],contigu:13,continu:[0,1,6,28,34,44,53,56,57,58,61,67,72,77,78],contrarili:12,contrast:[0,24,27,32,44,77],contribut:[5,33,36,38,44,52],contributor:[36,38,42],control:[0,3,6,10,11,13,15,39,46,54,66,77,78,82],conveni:[9,12,14,17,44,79],convent:[6,11,14,15,25,36,38,41,42,44,77,78],converg:0,convers:10,convert:[10,13,14,66,220],convict:0,coordin:[0,1,3,6,11,13,14,22,32,45,57,62,72,74,75,162,216,217],coordinator_read:60,coordinator_scan:60,coordinator_writ:60,coordinatorreadlat:[74,216],coordinatorscanlat:74,coordinatorwritelat:[74,216],cop:34,copi:[0,1,3,6,28,35,45,55,56,57,62,64,66,75,83,207,216],copyright:[23,24,25,26,27,28,29,30],core:[6,14,61,71,80,174,219,220],correct:[0,6,10,35,39,49,54,64,67,70,77,146,159,206,212],correctli:[6,11,36,45,50,53,57,61,66,77],correl:[6,10,78,216,219],correspond:[0,4,6,9,11,13,14,18,22,36,42,44,45,49,58,59,64,65,72,78,207],corrupt:[6,60,64,66,70,71,76,83,168,200,203],corrupt_frames_recov:60,corrupt_frames_unrecov:60,cost:[6,13,22,50,70,76],couchbas:[62,64],could:[0,3,6,12,22,24,25,27,28,32,37,39,42,53,54,57,58,60,62,64,66,67,76,82,218,220],couldn:[72,74],count:[4,6,9,13,22,28,45,50,57,58,60,66,74,79,81,208,218,219,220],counter1:211,counter:[0,4,6,9,14,19,59,60,71,74,81,83,139,146,168,170,171,203],counter_mut:[74,219],counter_read:81,counter_writ:81,countercach:74,countermutationstag:[60,74,219],counterwrit:[81,132,182],countri:[13,22,25,29],country_cod:22,coupl:[0,6,25,27],cours:[13,76,215,220],cover:[11,25,36,39,42,44,45,48,55,66,67,74,75,208],coverag:[35,37,54],cph:[64,207],cpu:[0,6,11,50,65,70,73,216,218,219],cpu_idl:220,cq4:[53,54,218],cq4t:[53,54],cqerl:47,cqex:47,cql3:[14,39,44,82],cql:[0,3,6,10,11,12,13,14,16,17,19,20,22,25,27,28,29,30,32,43,44,47,49,51,52,53,55,60,61,62,64,69,73,77,81,83,179,204,220],cql_type:[11,12,13,14,20,22],cqlc:47,cqldefinit:14,cqlkeyspac:[62,64,76],cqlsh:[45,48,49,52,54,60,62,64,76,77,83],cqlshrc:83,cqltester:[39,44],cqltrace:220,craft:77,crash:71,crc32:[4,58,62,64,213,214],crc:[4,57,58,213,214],crc_check_chanc:[11,64,70],crdt:0,creat:[0,3,4,6,9,10,12,13,15,17,19,23,24,25,26,27,28,29,30,32,33,36,40,41,44,45,48,49,53,54,56,58,59,60,64,65,66,67,68,69,70,72,75,76,77,79,81,82,89,207,212,220],create_aggreg:53,create_aggregate_stat:12,create_funct:53,create_function_stat:12,create_index:53,create_index_stat:12,create_keyspac:53,create_keyspace_stat:12,create_materialized_view_stat:12,create_rol:53,create_role_stat:12,create_t:53,create_table_stat:12,create_trigg:53,create_trigger_stat:12,create_typ:53,create_type_stat:[12,22],create_user_stat:12,create_view:53,createkeystor:6,createrepo:43,createt:44,creation:[6,10,11,13,14,18,22,65,218],creator:20,credenti:[6,77],critic:[0,39,42,72,77,216,219],cross:[0,3,6,45,72,78],crossnodedroppedlat:74,crucial:[77,218,219,220],cryptographi:6,csv:[64,82],ctrl:220,cuddli:22,cue:25,culprit:216,cumul:[219,220],curent:208,curl:[38,49],current:[0,1,6,9,11,13,20,22,24,40,42,43,49,54,57,58,60,61,64,66,67,68,69,72,74,79,81,82,83,111,129,133,135,137,146,160,191,199,203,208,209,214,218,219],currentd:[10,14],currentlyblockedtask:74,currenttim:[10,14],currenttimestamp:[10,14],currenttimeuuid:[10,14],custom:[6,9,10,11,14,15,16,20,24,26,27,28,33,42,58,61,64,78,81,82,207],custom_option1:20,custom_option2:20,custom_typ:[14,22],cut:218,cute:22,cvh:39,cycl:[6,56,57,65,75,109],cython:83,d1006625dc9e:[54,76],d132e240:64,d132e240c21711e9bbee19821dcea330:[62,64],d18250c0:204,d85b:204,d936bd20a17c11e8bc92a55ed562cd82:210,d993a390c22911e9b1350d927649052c:62,daemon:[40,146,193,220],dai:[14,17,19,22,28,61,66,69,76],daili:[33,53,54,61,109],danger:6,dart:48,dart_cassandra_cql:47,dash:12,data:[1,2,3,4,6,10,12,14,15,16,18,26,27,29,39,46,49,50,52,54,57,58,59,60,63,67,68,69,70,71,72,73,74,75,76,77,78,80,81,82,84,89,102,109,112,116,137,146,151,161,185,200,204,205,206,207,208,209,210,211,212,213,214,215,218,219,220],data_file_directori:[46,71],data_read:20,data_writ:20,databas:[0,1,3,12,13,15,21,24,27,30,31,32,43,49,52,53,54,59,61,62,66,71,77,217,218,220],datacent:[0,1,3,6,11,54,57,58,60,64,76,78,102,112,125,146,161,178,207,216,219],datacenter1:[6,50,81],dataset:[2,3,6,50,76,220],datastax:[6,14,30,47,216],datastor:219,datatyp:14,date:[1,4,9,10,15,17,19,24,25,26,27,28,29,32,61,75,83,168,203,204,208],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,66],datetim:15,datum:3,daylight:22,db532690a63411e8b4ae091830ac5256:213,db_user:77,dba:[64,77],dbd:47,dc1:[6,11,20,59,72,77,219],dc1c1:215,dc2:[6,11,59,72,77,219],dc3:20,dcassandra:[67,69,74,77,79],dcawareroundrobin:216,dcl:[53,61],dclocal_read_repair_ch:75,dcom:77,dcpar:161,ddl:[11,53,60,61,62,64,82],ddl_statement:12,deactiv:6,dead:[0,6,73,84,146,220],dead_node_ip:79,deal:[57,83,203],deb:49,debian:[43,45,48,220],debug:[46,54,82,211,212,213,214,215,216,217,220],decai:216,decid:[9,36,57,64,67,68,72,78],decim:[9,14,17,19,22,82],decimalsep:82,decis:0,declar:[11,12,14,22,25],decod:[17,22,57,220],decommiss:[0,6,55,59,79,146],decompress:[70,220],decoupl:[0,58,59],decreas:[6,50,69,207,220],decrement:[13,22],decrypt:6,dedic:[4,6,24,54,58],dedupl:[143,146],deem:6,deep:[52,217,218],deeper:[42,220],default_time_to_l:[10,11,13,64],defend:45,defens:6,defer:[11,220],defin:[0,3,6,9,10,11,12,13,15,16,17,18,20,21,23,24,25,27,28,31,40,52,64,66,72,74,77,78,79,81,82,89,146,208],defineclass1:56,defineclass:56,definit:[9,13,14,15,18,22,23,24,25,26,27,28,29,30,32,52,55,62,63,64,81,208],deflat:[4,6,72],deflatecompressor:[11,70],degrad:[0,6,24],delai:[1,4,62,74,76],deleg:40,delet:[0,4,6,9,10,11,12,15,17,18,20,22,24,27,42,52,53,54,60,61,62,69,72,76,82,109,116,146,198,208,213,214,218],delete_stat:[12,13],deletiontim:4,delimit:6,deliv:[0,6,57,72,74],deliveri:[6,57,73,74,146,147,166,177],delta:[74,208],demand:[0,77],demo:55,demonstr:[53,54,56,62,64,76,217],deni:45,denorm:[22,24,25,32],denot:[6,12,25],dens:63,dep:35,departur:0,depend:[0,4,6,11,12,13,14,22,33,36,39,40,41,42,44,52,56,58,68,72,76,83,216],dependenic:35,depict:23,deploi:[35,45,46,49,220],deploy:[0,6,55,59,72,77,78,80],deprec:[6,10,11,14,15,64,66],depth:220,deriv:[57,64],desc:[9,11,13,32,54,60,82],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,20,22,26,27,30,39,40,42,43,54,55,63,77,83,146,203],describeclust:146,descript:[10,11,14,19,22,24,26,29,33,36,42,50,58,60,64,72,74,75,82],descriptor:[74,214],deseri:[55,215],design:[0,1,3,14,23,24,25,26,28,30,31,52,54,59,66,69,71,76],desir:[16,22,24,33,45,210],destin:[65,72,82],destroyjavavm:220,detach:43,detail:[5,6,10,11,12,13,14,22,23,24,25,26,33,36,37,45,56,60,62,73,77,80,81,82,83,203,213,218,219,220],detect:[1,2,6,38,45,57,77,215],detector:[0,114,146],determin:[0,6,11,13,20,24,27,28,32,50,58,62,63,70,75,78,161,216,219,220],determinist:[0,45],detractor:27,dev1:61,dev:[6,8,43,45,50,220],devcent:30,devel:56,develop:[3,8,30,36,37,40,42,44,56,71],devic:[4,80,220],df303ac7:219,dfb660d92ad8:82,dfp:200,dht:[6,208],diagnost:[6,55,73],diagram:[23,24,26,30,57],diagrammat:24,diamond:23,dictat:[6,77],did:[26,39,74,206],didn:[0,24],die:6,dies:[52,79],diff:[15,34,218],differ:[0,1,6,11,12,13,14,15,20,22,24,25,31,32,33,38,40,42,44,45,46,50,53,54,55,58,59,64,66,67,68,70,71,74,75,76,79,81,216,220],difficult:[0,6,44,220],difficulti:22,digest:[4,6,58,62,64,75,213,214],digit:[17,22,45],digitalpacif:49,diminish:22,dinclud:35,dir:56,dir_path:[64,207],direct:[0,6,11,17,20,24,42,74,75,220],directli:[13,18,20,36,40,60,64,66,77,208,220],director:13,directori:[4,6,21,35,36,40,44,45,48,49,51,56,61,64,65,68,71,72,73,82,137,146,164,207,220],dirti:[4,6,54,220],disabl:[6,11,14,53,54,58,62,66,70,72,77,78,82,95,96,97,98,99,100,101,102,103,112,146,169,171,173,178,181,182,183],disable_stcs_in_l0:67,disableauditlog:[53,61,146],disableautocompact:[66,146],disablebackup:[62,146],disablebinari:146,disablefullquerylog:[54,146,218],disablegossip:146,disablehandoff:[72,146],disablehintsfordc:[72,146],disableoldprotocolvers:[60,146],disablesnapshot:168,disableuditlog:61,disadvantag:0,disagre:75,disallow:[6,55],disambigu:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],disappear:[11,75],discard:[6,57,59],disconnect:[57,66],discourag:[11,22,42],discov:[45,76],discoveri:[0,64],discret:0,discuss:[8,11,22,24,26,28,42,53,54,56,57,60,62,64,75],disk:[4,6,11,24,27,31,32,46,50,52,53,54,58,60,61,62,63,65,66,69,70,73,74,76,109,143,146,159,200,205,209,212,213,218,219,220],disk_spac:60,disk_usag:60,dispar:[6,27],dispatch:57,displai:[11,24,53,54,60,61,64,82,83,85,91,121,136,138,145,146,195,203,207,211,212,213,214,215,220],displaystyl:28,disrupt:[45,77],dissect:220,dist:[43,49],distanc:[23,80],distinct:[0,9,10,13],distinguish:[9,14,58,62],distribut:[1,2,3,6,23,32,35,42,44,45,49,60,66,74,76,77,79,80,81,208,209,217,218,219,220],distro:43,dive:[52,217,218],diverg:1,divid:[12,25,27],divis:19,djava:[40,45,77],dload:49,dml:[21,53,60,61],dml_statement:12,dmx4jaddress:74,dmx4jport:74,dns:45,dobar:34,doc:[6,35,36,39,43,49,76,77,206,220],document:[5,12,14,15,17,24,25,27,29,30,33,39,41,42,50,51,54,77,81,82],doe:[0,3,6,11,13,14,16,17,18,20,22,27,32,38,39,42,49,52,54,55,58,59,62,63,64,66,67,69,70,72,75,76,77,78,79,143,146,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220],doesn:[6,14,22,24,34,44,45,54,57,58,76,81,207,208,210,211,218,220],dofoo:34,doing:[6,13,18,27,44,45,50,66,67,74,79,220],dollar:[10,12],domain:[0,23,24,27,37,77,81,167,186],domin:220,don:[0,5,6,13,26,27,28,32,34,37,38,39,40,42,43,45,46,58,59,60,64,66,76,137,161,207,212,217,219,220],done:[6,11,13,22,25,27,33,36,37,42,43,44,46,51,54,57,67,68,81,209,212,213],dont:[60,61],doubl:[6,9,10,11,12,14,17,19,22,40,50,60,74,78],doubt:11,down:[0,6,20,23,28,50,54,58,66,74,76,78,79,100,146,161,209,216,218,219,220],downgrad:213,download:[6,30,33,40,43,49,56,74,80],downsampl:4,downstream:[26,219],downtim:[3,50,72],downward:20,dozen:219,dpkg:43,drag:11,drain:[4,57,72,146],draw:[0,25],drive:[0,6,50,66,71,218,219,220],driven:[3,27,31],driver:[6,12,14,20,28,30,44,48,52,60,82,216],driver_nam:60,driver_vers:60,drop:[1,6,10,15,52,53,54,55,57,59,60,61,62,64,66,67,69,74,76,77,109,205,208,209,211,216,219,220],drop_aggreg:53,drop_aggregate_stat:12,drop_funct:53,drop_function_stat:12,drop_index:53,drop_index_stat:12,drop_keyspac:53,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_rol:53,drop_role_stat:12,drop_tabl:53,drop_table_stat:12,drop_trigg:53,drop_trigger_stat:12,drop_typ:53,drop_type_stat:[12,22],drop_user_stat:12,drop_view:53,dropdown:220,droppabl:[6,57,66,208],dropped_mut:195,droppedmessag:73,droppedmut:74,dropwizard:74,drwxr:213,drwxrwxr:[62,64],dry:[83,203],dsl:33,dt_socket:40,dtest:[33,39,41],due:[0,1,11,13,22,35,45,50,57,62,69,72,74,79,216,220],dump:[53,54,61,82,83,203,218],duplic:[25,27,28,32,39,76,214],durabl:[0,2,4,62,65,72],durable_writ:11,durat:[6,10,15,19,20,66,72,74,81,148,196,207],dure:[6,11,14,21,23,35,42,44,45,58,59,60,64,67,68,69,70,72,74,75,77,79,81,82,168,205,211,215,218,220],duse:56,dverbos:35,dying:45,dynam:[6,23,72,73,75,77],dynamic_snitch:78,dynamic_snitch_badness_threshold:78,dynamic_snitch_reset_interval_in_m:78,dynamic_snitch_update_interval_in_m:78,dynamo:[2,3,52],e123fa8fc287:[54,76],e2b06b91:49,each:[0,1,3,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,27,28,29,32,33,36,38,42,49,50,51,52,53,54,56,57,58,59,60,61,62,64,66,67,70,71,72,74,76,77,78,79,80,81,82,83,146,171,187,200,203,204,218,219,220],each_quorum:[0,6,55],eagerli:57,earli:[6,12,42],earlier:[42,49,53,56,75],eas:220,easi:[3,9,23,36,42,54,220],easier:[0,36,42,50,53,204],easiest:45,easili:[0,23,27,77],east:[54,76],eben:[23,24,25,26,27,28,29,30],ec2:[6,50,53,54,56,60,62,64,71,76,78],ec2multiregionsnitch:[6,78],ec2snitch:[6,50,78],ecc:71,echo:[49,56,208],eclips:[34,41,44],ecosystem:39,eden:220,edg:39,edit:[3,36,40,43,46,49,74,77,214],editor:36,effect:[0,3,6,11,22,25,27,42,45,54,57,59,63,70,77,100,146,216,219,220],effectiv:74,effici:[6,11,32,53,61,64,66,69,78,79],effort:[0,6,36,42,72,76],eight:0,either:[0,4,6,8,12,13,14,16,22,27,28,33,34,36,38,40,42,45,49,51,56,57,62,65,66,72,74,77,81,198,216,218,219,220],elaps:[66,74,220],elast:50,elasticsearch:80,elder:40,element:[0,22,24,25,29,36,50,54,82],elig:6,elimin:[28,216],elixir:48,els:[11,13,34,42],elsewher:57,email:[8,16,22,29,43,52],embed:44,emerg:[3,35],emit:6,emploi:63,empti:[6,9,10,11,12,64,82,211],emptytyp:9,enabl:[0,6,11,14,17,20,44,45,55,56,60,62,66,69,70,72,78,79,82,105,106,107,109,112,113,146,183,207,208,218,220],enable_legacy_ssl_storage_port:6,enable_transient_repl:[59,60],enable_user_defined_funct:14,enableauditlog:[53,61,146],enableautocompact:[66,146],enablebackup:[62,146],enablebinari:146,enablefullquerylog:[6,54,146,218],enablegossip:146,enablehandoff:[72,146],enablehintsfordc:[72,146],enableoldprotocolvers:[60,146],encapsul:[34,74],enclos:[9,10,12,14,20,27,81],enclosur:12,encod:[15,22,39,57,61,82,208],encodingstat:208,encount:[0,5,13,43,57,74,81],encourag:[11,65],encrypt:[6,48,56,58,60,64,73,207],end:[22,24,27,33,45,53,54,61,66,67,68,75,77,78,82,89,124,146,161,209,214,220],end_dat:29,end_resultset:54,end_token:[89,161],end_token_1:151,end_token_2:151,end_token_n:151,endpoint:[0,6,57,59,72,74,75,78,84,124,146,161,198],endpoint_snitch:78,endpointsnitchinfo:77,endtoken:82,enforc:[17,27,57,77],engin:[0,2,3,11,28,42,52,74,80],enhac:37,enhanc:[37,57,71],enjoi:43,enough:[0,6,22,23,26,28,45,46,53,54,67,75,76,78,82,218,220],enqueu:[6,57,218],ensur:[1,13,18,21,28,45,48,54,57,59,65,66,70,72,75,77,206,218,219],entail:45,enter:[0,24,26,33,45,82,218,220],enterpris:49,entir:[0,4,6,11,14,22,27,45,50,54,57,58,59,63,64,69,72,76,77,79,82,83,203,205,216,220],entiti:[23,24,25,26,27,32],entri:[4,6,9,13,16,33,42,43,49,52,53,54,74,77,82,208],entropi:[0,6,72],entry_count:60,entry_titl:13,enumer:[20,204],env:[45,46,56,74,77],environ:[0,1,5,6,30,35,40,41,44,45,48,49,50,52,56,64,71,210],envis:24,ephemer:71,epoch:[22,54,208],epol:6,equal:[0,6,10,11,13,22,24,28,34,66,81],equival:[10,11,12,13,14,20,38,66,217],equivil:50,eras:[11,57],erlang:48,erlcass:47,err:82,errfil:82,error:[1,6,11,12,14,16,18,20,22,24,30,33,34,39,40,43,44,52,53,56,60,61,76,81,82,162,206,211,215,217,218,219],error_byt:60,error_count:60,escap:[12,17,81],especi:[0,24,25,42,45,50,66,82,220],essenc:24,essenti:[14,45,56,62,82],establish:[6,20,57,64,78,207],estim:[4,28,58,74,76,208,219],estimatedcolumncounthistogram:74,estimatedpartitioncount:74,estimatedpartitionsizehistogram:74,etc:[6,18,22,34,39,45,46,49,54,58,61,66,67,74,77,81,207,220],eth0:6,eth1:6,ev1:22,evalu:[6,19,29,31,52],even:[0,1,6,10,11,12,13,14,17,22,24,25,26,27,29,32,37,42,50,52,54,57,58,59,60,62,66,72,75,76,77,82,92,168,199,216,218,219,220],evenli:[0,6,32],evenlog:[206,209],event:[1,3,4,6,13,22,55,57,61,66,73,81,82,161,204],event_typ:13,eventlog:[204,206,209,212,214,215],eventlog_dump_2018jul26:204,eventlog_dump_2018jul26_d:204,eventlog_dump_2018jul26_excludekei:204,eventlog_dump_2018jul26_justkei:204,eventlog_dump_2018jul26_justlin:204,eventlog_dump_2018jul26_singlekei:204,eventlog_dump_2018jul26_tim:204,eventlog_dump_2018jul26b:204,eventu:[0,2,3,4,13,36,50,57,72,76],ever:[34,44,45,71],everi:[0,1,4,6,11,13,14,18,20,21,22,24,28,51,53,56,57,58,59,61,63,64,66,71,76,81,82,216,219,220],everyon:[23,75],everyth:[4,12,34,40,45,80],evict:74,evil:[6,14],ex1:81,ex2:81,exact:[11,12,14,70,72,217],exactli:[11,14,18,54,77,204,220],examin:[25,28],exampl:[0,3,6,11,13,14,17,20,22,23,24,25,26,27,28,31,37,43,44,49,50,51,53,54,56,59,60,61,64,66,67,69,72,73,77,78,81,82,204,205,206,207,208,209,210,211,212,214,215,216,217,218,219,220],example2:81,exaust:6,excalibur:11,exce:[4,6,11,17,34,57,218],exceed:[6,57,71,209],excel:[11,28],excelsior:11,except:[0,6,13,14,17,39,41,42,44,45,54,56,57,61,64,74,204,209,218,220],excess:63,exchang:[0,6,45,58],exclud:[0,11,53,61,74,83,105,129,146,203],excluded_categori:[53,61,105],excluded_keyspac:[53,61,105],excluded_us:[53,61,105],exclus:[22,35,44,161],execut:[6,9,11,12,13,14,20,30,33,35,40,44,51,54,61,66,74,77,82,203,204,205,206,207,208,209,210,211,212,213,214,215,219,220],executor:33,exhaust:[6,49,56,216],exhibit:13,exist:[0,1,6,9,10,11,12,13,14,16,17,18,20,21,22,27,28,36,37,39,40,43,44,52,53,54,60,61,63,64,66,69,70,75,78,79,81,205],exit:[54,83,214],exp:81,expand:[11,50,83],expans:[11,50,58,59],expect:[0,1,4,6,10,12,22,27,32,34,39,42,43,57,64,66,69,72,73,76,77,209,219],expens:[6,54,63,78],experi:[6,24,49,57,66,219],experienc:[0,6,218],experiment:[0,3,25,32,49,56,59,161],expert:59,expir:[6,10,11,13,22,57,60,69,72,73,76,77,168,205,208,211],expired_byt:60,expired_count:60,expiri:66,explain:[26,34,36,39,42],explan:[50,83,203],explicit:[0,10,11,20,29,57],explicitli:[3,4,6,10,11,13,17,22,34,53,60,62,69,78,81],explor:[24,40],expon:10,exponenti:[74,81,216],expos:[6,9,57,60,75,77],express:[0,6,10,12,19,78],expung:45,extend:[22,42,44,57,64,80,83,137,200,203],extens:[6,11,24,64,77],extent:1,extern:[27,52,53,73,74,79,80,217],extra:[0,4,6,57,66],extract:[34,49],extrem:[6,13,24,50,81],f6845640a6cb11e8b6836d2c86545d91:208,f8a4fa30aa2a11e8af27091830ac5256:207,facebook:3,facilit:6,fact:[22,37,44,45,216],factor:[0,6,11,23,28,52,58,59,64,70,72,76,77],factori:81,fail:[0,1,6,11,13,14,22,28,33,35,52,53,54,56,57,58,66,72,74,75,82,146,162],failur:[1,2,6,42,52,58,59,62,66,71,72,74,78,114,146,200,216],failuredetector:146,fairli:[6,24,65,77,220],fake:14,fall:[0,6,24,53,54,61],fallback:[6,66,78],fals:[6,11,12,17,20,22,54,60,61,62,63,64,65,66,69,70,74,76,77,79,82,168],famili:[6,62,71,130,148,185,196,213],familiar:23,fanout_s:[66,67],faq:83,far:[36,37,54],fare:220,fashion:0,fast:[0,6,24,28,32,63,66,80,218,220],faster:[0,6,32,42,58,59,70,71,146,171,219],fastest:[6,38,75,78],fatal:6,fault:[1,45,72],fav:[16,22],favor:56,favorit:220,fax:22,fct:14,fct_using_udt:14,fd576da0:76,fd8e5c20:76,fear:45,feasibl:22,featur:[0,2,25,27,29,30,37,39,40,42,50,52,53,54,56,57,58,59,77],fed:6,feed:54,feedback:42,feel:[36,38],felt:[26,28],fetch:[6,11,36,82],few:[0,6,25,62,66,67,71,216,218],fewer:[6,32,42,72],fewest:50,fffffffff:[17,22],fgc:220,fgct:220,field:[10,13,14,17,22,32,34,54,57,61,63,81,211],field_definit:22,field_nam:13,fifteen:74,fifteenminutecachehitr:74,fifth:219,figur:[0,1,24,25,26,27,28,32,37,62,68,75,205],file:[3,4,7,11,27,28,36,40,41,42,43,44,45,46,49,50,52,53,54,56,57,58,60,62,63,64,65,66,69,71,72,74,77,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,208,209,210,211,213,214,216,217,220],fileauditlogg:53,filenam:[4,11,82,130,146,208,212],filenamepattern:61,filesystem:[33,53,54,72,77],fill:[43,57,62,65,67],fillion:13,filter:[0,4,9,11,18,52,58,59,60,62,64,71,73,74,161,208,213,214],finalfunc:[9,14],finalis:43,find:[0,4,6,24,25,26,27,29,33,36,40,43,44,52,60,63,66,72,75,77,79,124,128,204,208,210,213,217,219,220],fine:[4,6,42,76,77],finer:[4,6],finish:[28,29,40,42,54,56,76,146,163,218],finish_releas:43,fip:[6,77],fire:[21,37],firefox:82,firewal:[6,45,46,78],first:[0,1,4,5,6,11,13,14,22,23,24,25,26,28,32,42,43,45,48,56,59,62,67,69,71,75,76,77,81,82,161,168,204,208,211,214,216,218,219,220],first_nam:29,firstnam:13,fit:[6,28,66,67,74],five:74,fiveminutecachehitr:74,fix:[6,10,11,12,18,27,33,36,38,41,43,45,50,57,66,71,75,76,81,211],fixm:43,flag:[6,13,38,39,42,57,65,74,76,79,206],flash:80,flat:72,flexibl:[0,3,11,50,54,55,77],flight:[6,57,77],flip:11,floor:6,flow:[6,20,26,39,41,61],fluent:47,flush:[4,6,11,57,62,64,65,66,67,69,71,72,74,104,146,185,214,218],fname:14,focu:[23,26,33,42],focus:81,focuss:220,folder:[40,64,192,212],follow:[0,1,3,4,5,6,8,9,10,11,12,13,14,17,18,19,20,22,26,28,32,33,34,35,36,37,38,39,40,42,43,44,45,46,50,52,53,54,56,57,58,59,60,61,62,64,65,66,70,72,74,75,76,77,78,79,82,86,89,96,106,115,116,152,161,168,182,187,199,200,205,210,211,214,216,220],font:12,foo:[11,12,65,220],footprint:[3,54,146,148],forc:[4,6,11,13,58,76,82,89,92,146,160,161,162,215],forcefulli:[84,146],foreground:[46,75],foreign:[3,27,32],forev:[57,66],forget:5,fork:[36,42],form:[0,6,10,11,12,14,20,24,27,50,58,64,91,145,195],formal:[0,12,36,43],format:[4,6,10,11,17,22,25,28,36,38,39,41,42,53,54,57,61,64,74,82,83,90,109,130,151,195,197,203,214,219],former:[6,74],formerli:59,formula:28,fortabl:64,forward:[6,11,35,75],found:[0,1,5,6,12,14,23,33,36,37,42,44,46,51,62,64,75,77,79,81,82,83,192,200,203,207,208,213,214],four:[0,13,28,57,60,70],fourth:[28,219],fqcn:44,fql:[55,218],fql_log:218,fqltool:[3,54,56,218],fraction:[0,6],frame:[6,60],framework:[39,44],franc:[13,22],free:[0,6,11,22,30,36,38,40,57,74,80,215,220],freed:4,freestyl:33,frequenc:[6,11,32,65,76],frequent:[6,11,24,25,27,50,52,56,69,77,216,220],fresh:79,fri:54,friendli:[6,11,22,44],from:[0,1,3,4,6,9,11,12,13,14,15,17,18,19,20,22,23,24,25,26,27,28,29,30,32,37,38,41,42,44,49,50,51,52,53,54,55,56,58,59,61,63,65,66,67,69,70,71,72,73,74,75,76,77,78,79,81,83,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,155,156,159,160,161,162,164,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,205,206,208,209,211,214,216,218,219,220],fromjson:15,front:[24,54],froom:22,frozen:[9,10,11,13,14,22,29],fruit:[22,42],fsync:[4,6,65,72,74,219],fulfil:81,full:[0,1,3,6,9,11,13,16,20,26,42,45,51,52,55,64,66,67,70,72,73,75,77,80,81,82,99,109,146,152,161,164,207,209,214,219],full_nam:195,full_query_log_dir:54,full_query_logging_opt:54,fulli:[0,1,6,11,12,14,32,43,59,64,69,73,74,77],function_cal:12,function_nam:[13,14,20],fundament:17,further:[5,11,18,22,32,48,57,66,73,77,80,219],furthermor:[0,10,13,77],futher:61,futur:[6,9,10,11,22,42,57,59,72,111,146,191],g1gc:71,gain:30,galleri:23,game:[14,22],garbag:[11,58,71,73,74,116,218],garbage_collect:192,garbagecollect:146,gather:66,gaug:74,gaurante:0,gaussian:81,gc_grace_second:[11,64,208],gc_type:74,gce:[45,71],gcg:6,gcinspector:218,gcstat:146,gct:220,gcutil:220,gcviewer:218,gear:24,gen:220,gener:[0,2,4,6,8,11,12,13,14,17,22,30,32,33,36,39,40,41,42,43,45,54,57,60,71,72,77,80,81,82,132,168,182,211,217,218,219,220],genuin:34,geoloc:23,geospati:80,get:[0,4,6,8,24,25,27,28,32,33,35,36,38,40,41,42,45,49,50,52,53,54,56,60,62,63,64,66,67,72,74,75,76,83,121,122,123,126,129,146,203,205,209,217,219,220],getbatchlogreplaythrottl:146,getcompactionthreshold:146,getcompactionthroughput:146,getconcurr:146,getconcurrentcompactor:146,getconcurrentviewbuild:[18,146],getendpoint:146,getint:14,getinterdcstreamthroughput:146,getlocalhost:[6,45],getlogginglevel:[146,218],getlong:14,getmaxhintwindow:[72,146],getpartition:34,getreplica:146,getse:146,getsstabl:146,getstr:14,getstreamthroughput:146,gettempsstablepath:34,getter:[20,34],gettimeout:146,gettraceprob:146,getudtyp:64,gib:[53,54,72,91,145,195,219],gigabyt:72,gist:[4,34],git1:43,git:[5,33,36,38,40,42,43,56,218,220],gitbox:[40,43],github:[33,34,38,41,42,43,44,56,80,220],give:[0,18,20,22,28,36,42,44,52,82,206,218,219],giveawai:220,given:[0,6,11,12,13,14,16,22,24,26,27,28,29,32,33,42,53,57,58,59,62,63,64,66,67,69,75,76,77,79,81,82,87,89,94,96,106,119,128,132,146,152,172,179,183,187,194,204,206,208,209,210,213,214],glanc:220,global:[0,3,6,25,33,56,57,82,146,170],gms:218,gmt:22,goal:[6,27,28,31,50,54,69,216],gocassa:47,gocql:47,goe:[11,27],going:[6,25,28,42,67,69,211,217,219,220],gone:[6,11],good:[6,28,34,36,42,44,45,54,57,66,76,82,210,216,218,219,220],googl:[3,34,82,220],gori:45,gossip:[1,6,45,56,57,72,74,78,100,110,134,146,190,218],gossipinfo:146,gossipingpropertyfilesnitch:[6,50,78],gossipstag:[60,74,218,219],got:6,gotcha:220,gp2:71,gpg:49,gpgcheck:49,gpgkei:49,grace:[73,76,83,203],gracefulli:57,grafana:216,grai:22,grain:[27,77],grammar:[11,12,35],grant:[6,9,53,77],grant_permission_stat:12,grant_role_stat:12,granular:[4,6,11,75,116],graph:[20,83],graphit:216,gravesit:11,great:[26,37,42,66,217,218,219,220],greater:[0,6,22,45,50,78,175,176,218,220],greatest:60,greatli:[6,58],green:[22,40],grep:[4,206,208,210,218,219,220],groovi:33,group:[6,10,11,20,24,28,32,57,59,69,74,77,78,216],group_by_claus:13,grow:[22,24,80],growth:[0,3],guarante:[0,2,6,11,13,14,22,24,42,52,57,58,63,66,67,72,75,76,79,80,82,205],guard:58,guest:[23,24,25,26,27,29],guest_id:[25,29],guest_last_nam:29,gui:220,guid:[6,23,24,25,26,27,28,29,30,36,40],guidelin:[10,32,39,43,71],habit:43,hackolad:30,had:[0,3,6,9,10,24,57,64,66,75,211,217,219],half:[4,6,38,45,72],hand:[3,6,13,71,219],handi:[28,220],handl:[0,1,6,14,39,41,42,45,54,57,59,65,71,74,77,81,109,218],handler:57,handoff:[0,6,73,74,101,135,146,177],handoffwindow:[72,146],hang:42,happen:[0,6,13,34,38,42,52,66,67,72,74,78,216,218,219,220],happi:42,happili:71,hard:[3,6,14,27,28,62,65,66,71,213,218],harder:6,hardest:37,hardwar:[2,3,6,33,50,52,58,72,73,216],has:[0,1,3,4,6,10,11,12,13,14,18,20,22,25,27,28,30,32,34,42,43,45,50,54,56,57,58,59,60,61,62,64,65,66,67,71,72,74,75,77,78,79,81,82,83,203,207,216,218,219,220],hash:[2,4,6,32,49,60,66,72,75,76,215,220],hashcod:34,hashtabl:23,haskel:48,hasn:[0,54,59,109],have:[0,3,5,6,9,10,11,12,13,14,15,18,19,20,22,24,25,26,27,28,29,32,33,34,36,37,38,39,40,42,43,44,45,46,49,50,53,54,57,58,59,60,61,62,63,64,66,67,68,69,70,71,72,74,76,77,78,109,168,205,207,209,211,214,215,216,217,218,219,220],haven:42,hayt:47,hdd:[4,6,71],head:[36,42,57,220],header:[40,57,82],headroom:6,health:220,healthi:[50,220],heap:[4,6,40,46,52,54,56,63,64,70,71,74,218,219,220],heap_buff:6,hear:27,heartbeat:[0,6,218],heavi:[0,6,50,66,218,219,220],heavili:[50,71],held:[6,71,146,150],help:[0,5,6,10,24,25,26,28,30,35,37,42,44,49,51,53,54,61,62,64,72,81,83,85,146,184,207,211,212,213,214,215,216,217,218,219,220],helper:44,henc:[5,6,11,22],here:[6,23,24,26,27,28,29,35,36,38,43,44,45,47,49,50,67,74,77,81,219],hewitt:[23,24,25,26,27,28,29,30],hex:[12,17,130],hexadecim:[10,12,130],hibern:79,hidden:[79,220],hide:[34,39,60,83,203],hierarch:[0,20],hierarchi:[20,76],high:[0,2,6,11,32,36,43,45,50,66,67,71,72,80,216,218,219],higher:[0,19,20,42,63,67,74,75,79,148,196,218,220],highest:[67,208,209],highli:[1,3,42,45,71,77,218,219],highlight:[27,30],hint:[0,6,11,12,18,45,46,52,55,59,60,73,74,76,101,102,111,112,127,135,146,147,166,177,180,191,198,219],hint_delai:74,hinted_handoff_disabled_datacent:72,hinted_handoff_en:[60,72],hinted_handoff_throttl:72,hinted_handoff_throttle_in_kb:72,hintedhandoff:[6,73],hintedhandoffmanag:74,hints_compress:72,hints_creat:74,hints_directori:[46,60,72],hints_flush_period_in_m:72,hints_not_stor:74,hintsdispatch:[60,74,219],hintsfail:74,hintsservic:73,hintssucceed:74,hintstimedout:74,histogram:[4,66,72,74,146,149,194,208,218],histor:[11,27,42],histori:[27,33,34,61,88,90,146],hit:[6,67,74,220],hit_count:60,hit_ratio:60,hitrat:74,hoc:44,hold:[0,6,10,13,20,45,58,68,72,82,216,218,220],home:[0,22,56,60,81,82],honor:[6,40],hope:66,hopefulli:42,horizont:[0,56],hospit:25,host:[0,6,11,36,46,49,52,53,54,58,61,62,64,74,75,76,78,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,219,220],hostnam:[6,45,53,54,56,60,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,220],hot:[6,50,73,74,220],hotel:[23,26,27,28,29,31],hotel_id:[24,25,28,29],hotels_by_poi:[24,25,29],hotspot:[0,11],hotspotdiagnost:77,hottest:6,hour:[6,22,42,43,53,69,72],hourli:[53,54,61,109],how:[0,3,5,6,7,8,11,12,22,23,24,25,26,27,28,33,37,39,40,41,42,44,48,49,50,51,52,53,54,55,59,62,64,65,66,68,70,72,74,75,78,80,81,82,109,206,218,219,220],howev:[0,4,6,9,10,11,12,13,15,17,18,20,22,24,26,27,28,33,42,44,45,46,49,60,63,64,70,71,72,76,77,79,82],hoytech:220,html:[6,81,206],http:[6,33,34,36,38,40,43,49,56,74,204,205,206,207,208,209,210,211,212,214,215,220],httpadaptor:74,hub:45,hudson:33,huge:57,huge_daili:61,human:[11,53,61,65,91,145,195,219],hundr:28,hurt:11,hybrid:11,hypothet:38,iauthent:6,iauthor:6,ibm:80,icompressor:70,id1:32,id2:32,idct:[64,207],ide:40,idea:[6,14,28,30,36,41,42,44,45,54,67,68,82,219,220],ideal:[6,44,69,74,77],idealclwritelat:74,idempot:[13,22,72],idemptot:22,ident:[0,59,81],identifi:[0,3,6,9,10,11,13,14,15,16,20,21,22,23,24,25,26,28,30,32,60,72,81,216],idiomat:8,idl:6,idx:65,ieee:[17,22],iendpointsnitch:[6,78],iftop:220,ignor:[0,6,10,14,22,34,60,64,82,195,207],iinternodeauthent:6,illeg:14,illegalargumentexcept:209,illustr:[1,20,32,62,75,209],imag:[22,36,220],imagin:66,imbal:[0,50],immedi:[4,6,11,22,24,42,57,59,63,70,77,86,146],immut:[4,45,62,66,70,71],impact:[6,11,39,42,50,53,54,57,69,73,77,218,220],imper:[0,3],implement:[0,1,3,4,6,10,13,14,18,20,28,29,32,33,34,44,45,53,54,57,59,60,61,65,70,72,77,78,80],implementor:6,impli:[0,1,11,12,22,32,53,58,59,75],implic:[0,77],implicitli:[14,20],import_:82,importantli:54,impos:[6,57],imposs:[50,67],improv:[0,3,6,11,22,32,37,42,44,52,55,63,67,71,73,78,79,82,220],inaccur:220,inact:45,inam:210,inboundconnectioniniti:56,inboundmessagehandl:57,inc:[23,24,25,26,27,28,29,30],incast:220,includ:[0,3,4,6,10,11,12,13,18,20,22,23,24,25,26,27,28,30,32,33,34,35,36,42,43,49,53,54,56,57,58,59,60,61,62,64,66,67,68,71,72,74,75,77,80,82,83,105,162,199,203,210,216,217,218,219,220],included_categori:[53,61,105],included_keyspac:[53,61,105],included_us:[53,61,105],inclus:[42,161],incom:[6,57,61],incomingbyt:74,incompat:[6,10,57,60],incomplet:[39,214],inconsist:[45,72,75,76],incorrect:45,increas:[0,3,4,6,11,18,45,50,58,59,63,67,70,71,72,74,76,78,79,161,207,215,216],increment:[2,3,6,10,13,22,42,43,50,57,59,66,72,73,74,97,107,146,162,168,188,211,214],incremental_backup:[60,62],incur:[13,22,56,59,74],inde:0,indefinit:[53,61],indent:34,independ:[0,11,66,71,77,219],index:[2,4,6,9,10,11,12,13,15,22,25,32,49,52,54,58,59,60,62,64,65,66,73,82,146,152,207,213,214,218],index_build:192,index_identifi:16,index_nam:16,index_summari:192,index_summary_off_heap_memory_us:195,indexclass:16,indexedentrys:74,indexinfocount:74,indexinfoget:74,indexnam:152,indexsummaryoffheapmemoryus:74,indic:[0,3,5,6,12,13,24,26,34,42,45,56,61,64,65,76,161,208,209,216,218,219,220],indirectli:13,indirectori:64,individu:[0,6,10,14,22,24,28,42,44,57,58,71,77,207,215],induc:13,industri:25,inequ:[10,13],inet:[9,11,14,17,22,60],inetaddress:[6,45],inetaddressandport:53,inetworkauthor:6,inexpens:71,inf:56,infin:[9,10,12],inflex:50,influenc:11,info:[6,46,48,56,61,74,94,146,204,218],inform:[0,4,6,12,13,22,24,25,26,27,29,49,50,51,54,57,58,60,64,72,74,75,76,77,78,79,81,82,85,88,114,134,136,137,138,145,146,167,184,186,206,207,208,216,217],infrastructur:[42,80],ing:11,ingest:[6,72],ingestr:82,inher:[1,11,22],inherit:20,init:74,initcond:[9,14],initi:[3,6,14,18,34,39,41,43,50,57,59,60,61,64,74,75,77,79,82,146,179,207],initial_token:[60,79],inject:61,innov:80,input:[0,9,10,14,17,22,39,58,64,82,210,218],inputd:22,insecur:6,insensit:[11,12],insert:[0,6,9,10,11,12,14,15,16,20,22,45,48,52,54,61,62,64,71,76,77,81,82,214],insert_stat:[12,13],insertedtimestamp:204,insertstmt:64,insid:[6,11,12,13,22,34,81,82],insight:[24,30,218,219],inspect:[0,6,40,81,82,215],instabl:6,instal:[6,21,33,35,44,45,48,52,56,64,77,82,213,220],instanc:[0,10,11,12,13,14,16,18,19,20,21,22,24,33,40,44,45,57,65,66,71,74],instantan:74,instanti:10,instantli:[6,57],instead:[0,4,10,11,13,18,22,24,27,28,30,34,36,43,45,53,60,61,64,66,67,167,186,204,220],instrospect:217,instruct:[0,6,8,11,36,37,38,40,49,52,72,220],instrument:[35,77],insuffic:216,insuffici:220,insufici:218,int16:54,int32:54,intact:57,intasblob:13,integ:[0,10,11,12,13,17,22,28,57,65,74,211],integr:[3,30,32,41,44,49,52,57,80],intellig:0,intellij:[30,34,41],intend:[11,27,32,39,59,77,207],intens:[6,44,45],intent:[0,39,58],intention:20,inter:[6,57,58,64,78,125,146,178,207],inter_dc_stream_throughput_outbound_megabits_per_sec:58,interact:[3,30,44,51,82,220],interest:[0,23,24,26,27,54,68,77,219],interfac:[3,6,10,14,26,34,36,45,46,49,54,60,70,77,80,220],interleav:81,intern:[6,9,11,13,18,22,36,39,45,56,60,71,72,74,83,203,216,220],internaldroppedlat:74,internalresponsestag:[60,74,219],internet:6,internod:[0,6,45,52,55,58,64,77,207,216,220],internode_application_receive_queue_capacity_in_byt:57,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:57,internode_application_receive_queue_reserve_global_capacity_in_byt:57,internode_application_send_queue_capacity_in_byt:57,internode_application_send_queue_reserve_endpoint_capacity_in_byt:57,internode_application_send_queue_reserve_global_capacity_in_byt:57,internode_application_timeout_in_m:[6,57],internode_encrypt:[6,77],internode_inbound:[57,60],internode_outbound:[57,60],internode_tcp_connect_timeout_in_m:6,internode_tcp_user_timeout_in_m:6,internodeconnect:[132,182],internodeus:[132,182],interpret:[6,10,22,30,82],interrupt:[45,54],intersect:0,interv:[4,6,9,24,74,77,81,208],intra:[6,57,74,78,81],intrins:22,introduc:[0,1,6,10,17,24,28,37,54,57,59,61,72,79,214],introduct:[10,20,31,44,52],introspect:220,intrus:206,inttyp:64,intvalu:14,invalid:[0,6,13,20,39,57,64,77,137,139,140,141,146,209,215,219],invalidatecountercach:146,invalidatekeycach:146,invalidaterowcach:146,inventori:28,invert:81,invertedindex:21,investig:[6,41,217,218,219,220],invoc:[14,54],invoic:27,invok:[14,38,54,58,64,77,200],involv:[0,1,6,13,23,24,28,32,36,57,58,67,70,75,76,77,214,218,220],ioerror:34,ios:220,ip1:6,ip2:6,ip3:6,ip_address:84,ipaddressandport:57,ipartition:64,ipv4:[6,17,22,45,57],ipv6:[6,17,22,57],irolemanag:6,irrevers:[11,22],irrevoc:57,is_avail:[28,29],isn:[0,18,34,42,45],iso8601:[61,204],iso:22,isol:[1,6,11,13,57,74,216,217,219],issu:[0,6,20,28,33,35,36,37,38,42,43,44,45,49,50,54,55,57,63,66,70,161,204,205,206,207,208,209,210,211,212,214,215,216,218,219,220],item:[12,22,23,24,25,33,39,40,42,54],iter:[0,6,25,58,209],its:[0,1,4,6,11,12,13,14,22,24,26,40,45,49,53,54,56,57,58,61,62,64,66,67,69,72,74,77,78,79,80,81,205,209],itself:[6,11,16,27,45,53,57,66,72,79,219],iv_length:6,jaa:77,jacki:38,jamm:40,januari:22,jar:[14,34,35,40,44,56,74],java7:77,java8_hom:40,java:[3,6,14,21,22,34,40,42,48,49,52,54,55,65,69,71,74,77,146,184,209,217,218,220],java_hom:[56,220],javaag:40,javac:56,javadoc:[33,34,39,49],javas:6,javascript:[6,14],javax:77,jbod:71,jce8:6,jce:6,jcek:6,jconsol:[52,66,77],jdbc:30,jdk11:56,jdk:[6,33,56],jdwp:40,jeff:[23,24,25,26,27,28,29,30],jenkin:[35,41,52],jetbrain:40,jira:[5,6,35,37,39,41,42,44,65,204,205,206,207,208,209,210,211,212,214,215],jks:81,jkskeyprovid:6,jmap:220,jmc:[66,77],jmx:[6,18,20,52,53,54,57,60,62,64,73,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],jmx_password:77,jmx_user:77,jmxremot:77,jni:56,job:[42,76,86,116,159,161,168,199],job_thread:161,john:[13,22],join:[3,6,8,13,32,52,54,56,69,76,77,79,146,218,219],joss:13,journal:[62,64],jpg:22,jre:56,jre_hom:56,jsmith:22,json:[9,10,13,15,52,64,66,70,90,195,197,204],json_claus:13,jsonz:62,jsr:[6,14],jsse:6,jsserefguid:6,jstackjunit:44,jstackjunittask:44,judgement:34,jul:[56,220],jump:56,junit:[33,34,35,40,44],junittask:44,jurisdict:6,just:[0,6,14,20,24,25,27,37,40,42,44,45,53,57,60,65,66,76,77,81,216,220],jvm:[6,21,40,45,46,50,56,69,73,77,79,217,218],jvm_extra_opt:40,jvm_opt:[46,77],jvmstabilityinspector:39,k_i:28,kashlev:30,kbp:72,keep:[0,6,8,11,25,27,28,32,34,37,42,45,50,57,58,61,64,66,67,74,75,83,137,203,214,216,219,220],keepal:[6,45],kei:[0,3,4,6,9,10,11,13,14,17,22,24,25,26,27,28,29,30,32,44,45,49,50,54,57,58,60,62,64,65,66,67,70,71,72,74,75,76,77,80,81,83,86,124,128,130,140,146,150,170,171,195,203,208],kept:[1,4,6,32,64,66,67,74,214],kernel:[6,45,65,220],key_alia:6,key_password:6,key_provid:6,keycach:74,keycachehitr:74,keyserv:43,keyspac:[0,3,6,9,10,12,14,15,16,20,22,25,28,29,48,52,54,55,59,61,63,66,70,72,73,76,77,79,81,82,83,86,87,89,94,96,105,106,115,116,119,124,128,130,137,146,148,150,151,152,153,159,161,167,168,172,185,186,187,194,195,196,199,200,202,203,204,205,206,207,210,211,212,213,214,215,218,219],keyspace1:[20,60,64,205,207,208,209,210,211,213,218],keyspace_definit:81,keyspace_nam:[11,14,20,22,54,60,64,66,76,218],keystor:[6,64,77,207],keystore_password:6,keystorepassword:77,keytyp:208,keyvalu:60,keyword:[10,11,13,14,15,16,17,22],kib:[54,62,72,76,91,145,195,219],kick:[146,163],kill:6,kilobyt:70,kind:[0,3,11,12,22,33,42,57,60,62,65,66,75,216,219],kitten:22,knife:[146,184],know:[0,4,6,11,13,22,24,34,37,43,58,60,66,210,218,219,220],knowledg:37,known:[0,20,22,24,27,28,32,47,51,59,63,66],krumma:44,ks_owner:77,ks_user:77,kspw:[64,207],ktlist:[62,185],kundera:47,label:[22,33],lack:[74,218,219],lag:74,laid:27,land:70,landlin:22,lang:[52,56,74,77,209,220],languag:[3,6,9,10,12,14,21,22,32,36,47,51,52,82],larg:[0,1,3,6,11,13,14,22,31,33,44,52,57,59,64,67,71,74,77,80,82,204,210,212,216,218,219,220],large_daili:61,larger:[0,6,44,45,50,54,67,70,71],largest:[6,74],last:[0,6,12,13,14,15,19,36,50,54,56,66,67,74,84,146,208,209,210,216,218,220],last_nam:29,lastli:[13,22],lastnam:13,latenc:[0,1,3,6,11,32,45,50,53,54,59,60,74,78,80,81,217,218],latent:[0,216,220],later:[0,11,22,26,28,34,36,42,45,72],latest:[0,6,32,33,43,49,66,75,82,200,206,218],latest_ev:81,latter:[12,27],launch:0,law:27,layer:71,layout:[0,11,36],lazi:11,lazili:11,lead:[0,6,10,11,22,24,50,66,69,218,220],leak:54,learn:[6,23,24,26,44,45,50,82],least:[0,4,6,11,12,13,18,36,45,59,64,66,71,76],leav:[0,6,12,13,24,34,44,45,54,57,76,82,216,218,219],left:[6,17,19,49,67,214],legaci:[4,6,20,81],legal:10,len:57,length:[4,6,10,11,17,22,39,50,57,58,66],lengthier:42,less:[0,4,6,22,35,42,45,49,57,58,63,71,212,215,218,219,220],let:[6,23,24,25,26,27,28,37,43,67],letter:17,level:[1,4,6,10,11,13,19,20,34,39,46,58,59,60,61,64,66,70,71,72,73,74,77,82,83,126,137,146,179,203,208,209,211,216,218,219],leveledcompactionstrategi:[11,58,63,66,67,69,206,209,219],leverag:[30,50],lexic:45,lib:[4,6,21,35,39,40,44,49,56,204,205,206,207,208,209,210,211,212,213,214,215,220],libqtcassandra:47,librari:[8,39,41,44,47,74,82],licenc:39,licens:[35,39,40,42],lie:216,lies:216,lieu:75,life:42,lifespan:71,light:57,lightweight:[0,2,3,27,32,59,83],like:[0,3,6,11,12,13,14,17,22,25,26,27,28,34,36,38,39,42,44,45,49,52,57,59,60,64,66,67,69,70,71,72,75,76,77,209,210,211,216,217,218,220],likewis:20,limit:[0,1,4,6,9,10,11,20,22,28,32,45,49,50,54,55,58,65,66,70,77,81,218,220],line:[6,12,24,27,34,42,44,46,50,51,54,56,57,62,64,65,77,83,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,207,209,214,218],linear:[0,3,71],lineariz:2,linearli:63,link:[6,8,11,12,42,44,54,57,62,65,213],linux:[6,36,43,45,49,50,56,217,218,220],list:[1,3,4,5,6,9,10,11,12,13,14,17,24,26,27,29,32,33,35,36,40,41,42,43,44,46,49,51,52,53,54,55,56,57,58,61,64,65,66,72,74,76,77,79,81,82,83,84,86,87,88,89,94,96,102,105,106,112,115,116,119,121,124,128,129,130,132,136,137,143,144,146,148,151,152,153,156,159,160,161,162,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,205,207,208,209,210,215],list_liter:[13,22],list_permiss:53,list_permissions_stat:12,list_rol:53,list_roles_stat:12,list_us:53,list_users_stat:12,listarg:14,listen:[6,49,52,56,74,220],listen_address:[46,51,52],listen_interfac:46,listsnapshot:[62,146,213],liter:[10,12,14,17,20,30,82],littl:[34,54,216,219],live:[0,3,13,30,36,52,66,74,79,208,213,214,218,220],live_scan:60,livediskspaceus:74,liveness_info:204,livescannedhistogram:74,livesstablecount:74,load:[0,3,6,11,21,22,37,50,52,56,58,72,73,74,76,77,78,79,81,83,138,146,153,161,186,203,216,219,220],loader:[64,207],loadm:207,local:[0,1,3,4,6,11,20,35,40,41,42,44,51,53,54,55,60,62,71,72,74,77,78,82,146,155,161,165,198,208,216,217,218,220],local_jmx:77,local_on:[0,75,77,82,216,219],local_quorum:[0,72,75,82,216,220],local_read:60,local_read_count:195,local_read_lat:60,local_read_latency_m:195,local_scan:60,local_seri:82,local_writ:60,local_write_latency_m:195,localhost:[6,49,51,54,56,60,61,77],localpartition:60,locat:[6,24,26,32,43,48,49,61,70,74,77,78,82,192,207,216,218,220],lock:[6,45,74,220],log:[0,3,4,6,11,13,36,39,44,48,49,52,55,57,59,60,65,73,74,77,81,83,95,99,105,109,126,146,161,164,179,192,203,217,220],log_al:66,log_dir:54,logback:[46,61,218],logdir:[53,61],logger:[34,46,55,61,105],loggernam:61,logic:[0,6,21,25,28,30,31,52,57,59,218,219],login:[6,9,20,43,44,53,54,56,61,77,83,216],login_error:53,login_success:53,logmessag:[53,61],lol:22,longer:[6,9,10,11,30,43,45,54,57,58,66,79,86,146,211,214,216],longest:218,look:[0,6,12,24,25,28,33,36,37,38,42,44,57,66,67,71,75,76,209,211,216,218,220],lookup:[26,32,74],loop:[34,57],lose:[0,4,6,27,57,66,79,214],loss:[0,6,22,50,57,69,76,220],lost:[1,58,61,62,66,79,211],lot:[6,24,27,28,36,51,52,54,66,76,83,203,212,218,219,220],low:[0,3,6,42,80,146,148,220],lower:[0,4,6,11,12,13,20,45,63,64,66,67,70,74,79,216,218],lowercas:12,lowest:[6,42,67,72,208],lsm:[0,219,220],lucen:52,luckili:217,lwt:[0,32,59],lww:0,lz4:[4,6,57,72],lz4compressor:[4,6,11,50,64,70,72,208],mac:220,macaddr:9,machin:[6,11,44,45,54,62,74,77,78,79,208,217,220],made:[0,3,6,22,32,43,52,53,56,57,58,59,63,71,72,75,77,218],magazin:[32,62,64],magazine_nam:32,magazine_publish:32,magic:57,magnet:[4,6],magnitud:13,mai:[0,1,4,6,9,10,11,13,14,16,17,18,20,22,25,26,27,28,32,33,35,36,39,40,42,44,45,50,53,54,56,57,58,59,60,61,62,63,64,66,69,72,74,75,77,78,79,81,82,168,209,211,216,217,218,219,220],mail:[5,37,42,43,52],main:[0,6,14,18,40,45,48,49,54,56,62,64,77,82,209,216,218],main_actor:13,mainli:[6,58],maintain:[0,6,11,23,37,42,57,72],mainten:[0,72,74],major:[0,4,10,36,42,49,66,69,75,77,89,146,213,219],make:[0,1,6,8,9,21,22,23,24,25,27,28,29,30,32,33,34,35,36,37,40,42,44,45,46,54,57,59,60,64,66,67,69,75,76,77,79,81,82,143,146,204,218,220],malform:216,malfunct:57,malici:77,man:6,manag:[0,3,6,20,24,25,27,28,30,33,36,40,41,43,44,52,59,60,74,77,79,83,85,146,203],mandatori:[3,11,14,64],mani:[0,3,6,11,23,24,25,27,30,34,39,42,54,59,60,64,66,67,68,70,71,72,74,75,77,81,82,83,86,89,96,106,109,115,116,161,168,187,199,200,203,209,215,216,219,220],manifest:[62,64,83,203],manipul:[3,12,15,18,44,52,204],manual:[6,25,35,38,45,214,220],map:[0,3,6,9,10,11,13,14,17,20,23,29,32,50,52,65,74,218,220],map_liter:[11,16,20,22],mar:22,mark:[0,6,20,24,42,54,66,76,79,100,146,208,210,214],marker:[4,6,11,12,39,45,214],markup:36,marshal:208,mashup:23,massiv:[37,50,220],master:[2,3],match:[0,4,6,12,13,14,17,20,74,75,78,208,213],materi:[0,3,6,10,11,12,15,23,24,25,26,27,28,29,30,31,52,59,60,74,82,146,202],materialized_view_stat:12,math:72,matrix:55,matter:[11,45,75,220],maven:35,max:[4,6,11,52,54,56,57,60,66,72,74,77,81,82,109,119,127,146,161,172,180,208,211,218,219],max_archive_retri:[53,54],max_hint_window_in_m:[72,79],max_hints_delivery_thread:72,max_hints_file_size_in_mb:72,max_index_interv:64,max_log_s:[53,54,61,109],max_m:60,max_map_count:45,max_mutation_size_in_kb:[4,6,45],max_partition_s:60,max_queue_weight:[53,54,61,109],max_threshold:[64,66],maxattempt:82,maxbatchs:82,maxfiledescriptorcount:74,maxfiles:61,maxhintwindow:180,maxhistori:61,maxim:71,maximum:[4,6,14,50,53,54,57,60,61,63,72,74,82,109,121,146,168,174,208,211,212,216,218,219],maximum_live_cells_per_slice_last_five_minut:195,maximum_tombstones_per_slice_last_five_minut:195,maxinserterror:82,maxldt:205,maxoutputs:82,maxparseerror:82,maxpartitions:74,maxpools:74,maxrequest:82,maxrow:82,maxt:205,maxtasksqueu:74,maxthreshold:172,maxtimestamp:4,maxtimeuuid:10,mayb:13,mbean:[6,20,66,74,77],mbeanserv:20,mbit:[64,207],mbp:[6,58,72],mct:6,mean:[0,6,9,11,12,13,14,17,18,22,27,52,54,59,64,66,67,72,74,75,78,81,82,161,216,217,218,219,220],meaning:[13,58],meanpartitions:74,meant:[0,22,45,72,74],measur:[6,24,28,32,39,42,44,57,74,79,81,82,220],mebibyt:60,mechan:[0,53,65],media:[23,24,25,26,27,28,29,30],median:[60,74,218],medium:220,meet:[0,1,3,6,39,75,77],megabit:[72,207],megabyt:[6,72,212,219],mem:220,member:[0,34,77,81],membership:[1,2,6],memlock:45,memor:54,memori:[0,4,6,11,50,52,53,54,57,61,63,65,67,70,73,80,215,218,220],memory_pool:74,memtabl:[2,6,11,62,63,64,65,68,69,70,71,74,185,218,220],memtable_allocation_typ:4,memtable_cell_count:195,memtable_cleanup_threshold:4,memtable_data_s:195,memtable_flush_period_in_m:[11,64],memtable_off_heap_memory_us:195,memtable_switch_count:195,memtablecolumnscount:74,memtableflushwrit:[60,74,219],memtablelivedatas:74,memtableoffheaps:74,memtableonheaps:74,memtablepool:6,memtablepostflush:[60,74,219],memtablereclaimmemori:[60,74,219],memtableswitchcount:74,mention:[6,22,27,42,56,74,77,207,216],menu:[0,40],mere:34,merg:[0,36,38,42,63,68,70,71,73,75,220],mergetool:38,merkl:[0,6,74,76],mess:[42,44],messag:[0,1,6,22,33,36,39,42,52,53,54,55,56,58,59,61,64,72,74,75,76,77,207,211,212,213,214,215,216,218,219],messagingservic:57,met:13,meta:[13,56,74,81],metadata:[0,4,20,28,43,58,70,71,74,83,203,211,214,215,218],metal:6,meter:74,method:[10,13,14,20,34,37,39,40,44,48,52,54,56,64,77,81],methodolog:30,metric:[6,55,60,72,73,81,217,219,220],metricnam:74,metricsreporterconfigfil:74,mib:[53,54,91,145,195],micro:219,microsecond:[6,11,13,22,74,208,219],microservic:25,midnight:22,midpoint:0,might:[0,6,11,13,23,24,25,26,27,28,43,54,62,64,66,67,74,75,84,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,215,216,219],migrat:[6,50,60,74,78],migrationstag:[60,74,219],milli:4,millisecond:[4,6,10,11,22,28,54,74,148,168,196,208,211,219,220],min:[4,6,11,45,65,66,74,81,82,119,146,172,208,218,219],min_index_interv:64,min_sstable_s:68,min_threshold:[64,66],minbatchs:82,mind:28,minim:[6,25,27,54,59,69,71],minimum:[6,11,14,28,32,46,74,76,206,208],minlocaldeletiontim:208,minor:[10,12,36,54,73,75],minpartitions:74,mint:205,minthreshold:172,mintimestamp:208,mintimeuuid:10,minttl:208,minu:28,minut:[6,22,27,53,54,56,61,66,69,72,74,77,81,109],mirror:[36,49],misbehav:[52,66,217],misc:[132,182],miscelen:74,miscellan:6,miscstag:[60,74,219],mismatch:[0,6,54,57,75],misrepres:211,miss:[11,18,33,35,66,72,74,76,79,214,220],misslat:74,misspel:206,mistak:50,mistaken:[54,62,64,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202],mistun:218,mit:43,mitig:[6,77],mix:[6,49,50,54,66,69,81,220],mkdir:[53,54,56,64,218],mmap:45,mnt:16,mock:44,mode:[4,6,49,53,54,62,64,77,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,216],model:[3,11,15,20,26,27,29,42,52,56,58,77,81,220],moder:28,modern:[71,72,220],modestli:6,modif:[13,20,218],modifi:[6,9,10,11,14,20,22,27,32,42,60,63,69,70,210],modification_stat:13,modul:82,modular:39,modulo:0,moment:[6,42],monitor:[45,49,52,53,73,77,78,85,146,216,220],monkeyspeci:[11,18],monkeyspecies_by_popul:18,monoton:[0,11,59,73],month:[22,28,56],monument:23,more:[0,1,4,6,10,11,12,13,22,23,24,25,28,29,32,34,36,37,42,44,46,49,50,51,52,53,54,56,57,58,59,60,61,62,63,64,67,69,70,71,72,73,74,75,77,78,79,81,83,89,115,116,146,148,161,168,184,196,200,203,208,209,215,217,219,220],moreov:[13,64],most:[0,1,6,11,12,13,22,24,27,28,36,37,40,42,44,45,46,49,50,54,57,60,61,66,67,68,70,71,72,74,75,77,82,88,146,196,208,209,216,218,219,220],mostli:[4,6,11,22,25,66,217,218],motiv:[44,69],mount:[6,220],move:[0,6,28,42,45,52,54,59,65,73,74,76,146,211,214,219],movement:[0,58,59,73,218],movi:[13,22],movingaverag:6,msg:61,mtime:[11,210],mtr:220,much:[0,5,6,11,23,24,27,28,50,58,60,63,64,65,66,67,68,69,78,207,216,218,220],multi:[1,2,3,6,12,39,55,62,72,218,220],multilin:41,multipl:[1,4,6,10,11,12,13,14,19,22,23,24,27,30,32,34,39,40,42,45,46,50,54,58,69,71,72,75,78,81,82,83,151,203,204,216,219],multipli:[28,67],multivari:80,murmur3:4,murmur3partit:4,murmur3partition:[6,14,64,82,208],museum:23,must:[0,1,3,4,6,10,11,13,14,17,18,20,24,27,28,32,34,35,40,42,44,45,46,50,53,54,56,57,58,59,62,64,66,69,74,75,77,79,81,82,185,203,204,205,206,207,208,209,210,211,212,213,214,215],mutant:16,mutat:[0,4,6,13,45,57,60,65,72,74,75,200,219],mutatedanticompactiongaug:74,mutationsizehistogram:74,mutationstag:[60,74,219],mv1:18,mvn:35,mx4j:74,mx4j_address:74,mx4j_port:74,mx4jtool:74,mxbean:20,myaggreg:14,mycolumn:17,mydir:82,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:[14,50],mytabl:[11,14,17,19,21],mytrigg:21,n_c:28,n_r:28,n_v:28,nairo:22,naiv:0,name:[4,6,9,10,11,12,13,14,16,17,18,20,21,22,24,26,29,30,32,33,36,39,40,42,43,44,45,46,49,53,54,60,61,62,74,76,77,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,206,207,210,213,216,218,219,220],names_valu:13,nan:[9,10,12,60],nano:[57,60],nanosecond:[22,74],narrow:[216,218,219,220],nathan:13,nativ:[4,6,10,12,15,17,30,39,45,51,53,54,56,60,61,64,74,82,98,108,146,152,189,207,219,220],native_transport_port:46,native_transport_port_ssl:77,native_typ:22,natur:[11,22,26,34,57,66,69,70,72,220],navig:36,nbproject:40,ncurs:220,nearli:40,neccessari:6,necessari:[6,11,14,20,27,42,53,54,57,61,70,77,204,208,211],necessarili:[6,12,25,46],need:[0,1,4,6,10,11,12,13,20,22,23,24,25,26,27,28,32,33,34,39,40,42,43,44,45,46,51,53,54,57,58,59,60,61,62,63,64,66,67,70,71,72,75,76,77,78,80,82,124,128,207,212,213,215,219,220],needlessli:0,neg:[6,50],negat:[19,77],neglig:[13,220],neighbor:[0,58,216],neighbour:66,neither:[6,18,22,77],neon:40,nerdmovi:[13,16],nest:[12,13,34],net:[6,40,45,48,61,77],netbean:[30,41],netstat:[79,146],netti:[6,55,57],network:[0,1,6,13,45,50,57,58,62,71,72,75,76,77,78,145,146,149,218],network_author:20,network_permiss:6,networktopologystrategi:[48,59,77,81],never:[0,6,10,11,12,13,14,22,27,34,45,50,57,59,66,67,76,209],nevertheless:[13,32],new_rol:20,new_superus:77,newargtuplevalu:14,newargudtvalu:14,newer:[0,28,66,69,71,82,116,205,220],newest:[11,69,205],newli:[0,11,20,22,42,65,146,153],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,newvalu:64,next:[0,6,24,45,51,54,56,59,60,62,64,67,68,72,75,82,217,218],ngem3b:13,ngem3c:13,nic:[56,220],nice:[28,57],nid:220,nifti:38,night:[24,28],ninth:0,nio:[6,14,55,58,74],nntp:220,node:[1,3,4,6,11,13,14,21,22,27,28,32,39,44,46,47,50,51,52,53,54,55,56,57,59,60,61,62,63,64,65,66,67,71,73,74,75,76,78,80,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,208,209,217,218,219,220],nodej:48,nodetool:[3,4,6,18,49,52,54,55,56,58,59,60,62,63,70,73,76,77,79,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,210,213,215,216,217,218,220],nois:[6,218],noiser:218,noisi:218,nologin:9,nomenclatur:0,nomin:28,non:[0,6,9,10,11,12,13,14,20,22,25,45,56,57,58,60,63,66,70,74,77,82,208,211],nonblockingbufferhandl:57,none:[0,1,6,11,13,22,59,60,62,64,74,76,77,208],nonsens:20,noopauditlogg:53,nor:[6,11,18,22,49],norecurs:[9,20],norm:74,normal:[1,11,14,17,20,27,32,40,45,49,54,56,59,60,72,74,76,81,82,216,218,219,220],nosql:[3,30,80],nosuperus:[9,20],notabl:[14,17,49],notat:[10,12,13,24,25,28,82],note:[0,4,5,6,10,11,12,13,14,15,17,20,22,24,25,38,41,42,43,45,49,50,56,57,59,64,66,77,203,204,205,206,207,208,209,210,211,212,213,214,215,218,220],noth:[6,11,14,38,44,45,205],notic:[6,24,29,58,77,219,220],notif:8,notifi:57,notion:[0,11,12],notori:[0,23],noun:27,now:[10,24,25,26,29,33,34,36,40,50,66,79,220],nowinsecond:54,npercentil:11,ntp:[0,6],nullval:82,num_cor:82,num_token:[50,79],number:[0,1,4,6,10,11,12,13,14,15,17,18,22,24,25,26,27,28,29,32,33,40,42,43,44,45,50,53,54,55,57,59,61,62,63,64,66,67,69,70,72,74,75,77,79,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,208,212,216,217,219,220],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:195,numer:[3,15,19,33,63,81],numprocess:82,numsampl:218,ny229:25,object:[1,3,6,11,12,39,55,56,57,58,64,204],objectnam:20,observ:34,obsolet:[6,71,74,215],obtain:[12,24,26,28,62,64,77,220],obviou:[14,38,49],obvious:[11,23],occasion:[76,219],occup:[13,220],occupi:[6,57,74],occur:[0,6,10,12,13,21,22,45,56,57,66,71,74,76,203,204,205,206,207,208,209,210,211,212,213,214,215,220],occurr:22,octet:[6,78],odbc:30,oddli:6,off:[0,4,6,25,45,50,57,65,70,74,77,82,146,163,220],off_heap_memory_used_tot:195,offer:[0,15,44,54,70],offheap:[63,71],offheap_buff:6,offheap_object:6,offici:[36,42,52,80,82],offset:[4,11,58,65,74],often:[0,6,11,12,25,26,27,28,34,36,37,42,44,45,53,54,61,64,66,70,71,72,76,77,78,82,109,209,216,219,220],ohc:6,ohcprovid:6,okai:34,old:[0,4,6,54,57,60,66,69,79,83,103,113,146,203,214,220],older:[4,6,14,40,49,66,71,75,82,205,213],oldest:[4,6,11,53,54,61,205],omit:[4,6,10,11,13,17,22,24,54,72,179],onc:[0,4,6,11,12,14,22,25,28,29,33,38,40,42,44,45,50,54,57,62,65,66,67,70,71,74,76,77,79,81,82,209,216],one:[0,1,4,6,9,10,11,12,13,14,17,18,20,22,24,25,27,28,32,34,37,40,42,44,46,49,50,52,53,54,56,57,58,59,61,62,63,64,66,67,68,71,72,74,75,76,77,78,79,82,83,86,89,96,106,115,116,132,146,161,168,182,185,187,199,200,203,204,208,211,213,214,216,218,219,220],oneminutecachehitr:74,ones:[6,11,12,13,14,18,20,54,74,209],ongo:[37,67,74,79],onli:[0,4,6,9,11,12,13,14,17,18,20,22,24,26,27,28,32,34,36,42,43,44,46,49,50,52,54,56,57,58,59,60,62,63,64,65,66,67,68,70,71,74,75,76,77,78,79,81,82,83,161,185,195,203,205,207,210,211,212,213,215,216,219,220],onlin:[3,82],only_purge_repaired_tombston:66,onto:[0,4,66],open:[0,3,5,6,20,33,37,41,43,56,58,64,77,78,80,207,220],openfiledescriptorcount:74,openhft:61,openjdk:[49,56],openssl:56,oper:[0,1,2,6,10,11,12,13,15,16,18,20,22,27,34,41,49,50,52,53,54,55,57,59,60,61,62,63,64,65,66,71,72,74,76,77,79,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,206,213,214,216,217,218,219,220],operand:19,operatingsystem:74,operationtimedoutexcept:216,opertaion:6,oplog:214,opnam:81,opportun:[36,63],oppos:[50,57],ops:[45,81],opt:14,optim:[0,4,6,11,12,13,28,32,45,55,58,59,66,67,69,71,79,208,218],optimis:[76,161],option1_valu:20,option:[0,3,4,6,9,10,11,12,13,14,16,20,22,24,27,28,30,36,40,42,44,45,49,50,55,56,57,58,60,62,70,71,73,75,77,79,81,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,207,208,209,210,211,212,213,214,215,218,219,220],oracl:[6,49,77,220],order:[0,1,4,6,9,10,11,14,18,22,24,25,26,27,28,29,32,34,42,45,54,59,60,63,64,65,69,78,79,81,82,208],ordering_claus:13,orderpreservingpartition:6,ordinari:6,org:[6,14,21,33,34,35,36,40,43,44,45,49,56,61,64,66,70,74,77,204,205,206,207,208,209,210,211,212,214,215,218],organ:[3,4,24,27,32,33,40,47],orgapachecassandra:43,orient:3,origin:[0,4,9,28,36,38,42,53,65,72,168,209,211,212,213],orign:13,os_prio:220,osx:36,other:[0,3,4,6,10,11,12,13,14,18,20,22,23,24,25,27,29,30,32,35,36,37,38,40,42,46,49,52,57,58,61,62,63,64,66,67,69,71,72,73,74,75,77,78,79,146,151,162,205,208,209,214,216,217,218,219,220],other_rol:20,otherwis:[0,6,9,12,13,16,22,24,58,72,76,121,205,216],our:[5,6,8,33,36,37,38,40,43,57,59,66,220],ourselv:38,out:[1,2,3,4,6,11,12,24,25,27,30,34,35,37,40,42,43,54,58,66,67,68,69,74,75,76,77,78,79,80,161,204,205,216,219,220],outag:0,outbound:[6,58,60],outboundtcpconnect:6,outdat:75,outgo:[6,57,220],outgoingbyt:74,outlin:[33,77],outofmemori:64,outofmemoryerror:52,output:[14,20,39,40,43,49,53,54,56,58,59,63,64,66,68,76,81,82,83,89,90,195,197,203,208,211,212,214,215],output_dir:64,outputdir:64,outsid:[11,21,22,57],outstand:[214,219],oval:23,over:[0,3,4,6,11,22,24,27,32,45,50,54,57,58,59,66,67,74,76,77,78,79,81,209,211,214],overal:[14,57],overflow:[17,83,168,203],overhead:[6,32,45,50,55,59,70,74,79],overidden:77,overlap:[0,66,67,209],overli:[27,28],overload:[6,14,45,207],overrid:[6,11,33,34,53,64,72,77,79,82,168,207,211],overridden:[6,11,54,60,61],overs:75,overview:[2,52,73],overwrit:[24,61,70,71,72,77],overwritten:[74,116],own:[0,11,12,14,22,37,41,42,45,49,54,56,57,64,66,70,74,76,77,80,81,124,130,137,146,200,209,219],owner:22,ownership:[0,66,167],ownersip:218,p0000:22,p50:219,p50th_m:60,p99:[11,220],p99th:11,p99th_m:60,pacif:22,packag:[33,40,44,45,46,48,51,82,218],packet:[6,218],page:[6,22,33,36,37,40,44,45,50,71,74,80,83,217,219],paged_rang:219,paged_slic:74,pages:82,pagetimeout:82,pai:[34,35,43],pair:[6,11,20,22,57,62,69,77],pane:30,paper:[0,50],parallel:[18,44,55,66,67,76,161,219],param:[11,57],paramet:[4,6,11,14,32,33,34,39,40,46,54,57,61,63,71,78,79,146,179],parameter:33,params:57,paranoid:6,parent:[35,207],parenthes:[29,32,53],parenthesi:[11,81,82,216],park:23,parnew:71,pars:[6,12,53,57,61,65,82,220],parser:[9,10],part:[3,5,6,11,13,14,18,22,24,25,26,28,35,39,40,42,44,45,49,64,77,78,79,82,207,216],parti:[39,52,74,204],partial:[4,11,28,75,214],particip:[0,21,57],particular:[0,6,11,12,13,14,17,20,22,24,32,45,57,62,71,74,77,216,218,219,220],particularli:[12,22,49,77,218,219,220],partit:[1,2,3,4,6,10,11,13,14,24,27,29,30,31,45,50,54,58,60,62,63,64,66,67,71,72,74,75,81,116,124,128,146,168,196,204,208,216,218,219],partition:[4,10,13,14,60,64,76,82,93,146,161,208],partition_kei:[11,13],partitionspercounterbatch:74,partitionsperloggedbatch:74,partitionsperunloggedbatch:74,partitionsvalid:74,partli:13,pass:[39,42,46,82,184,207,208,219,220],password:[6,9,13,20,53,54,60,62,64,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207],password_a:20,password_b:20,passwordauthent:[6,77],passwordfilepath:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],past:[24,74],patch:[10,13,33,34,36,38,39,41,44,52,58],path1:[53,54,61],path2:[53,54,61],path:[0,5,6,16,27,39,53,54,56,58,59,62,63,64,69,70,71,74,77,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,205,207,211,213,215,218,220],pathn:[53,54,61],patter:20,pattern:[6,11,20,22,23,29,30,31,32,61,216,219,220],paus:[6,45,72,146,147,218,220],pausehandoff:[72,146],pauser:[53,54],paxo:[1,13,55,74,82],paxos_prepar:57,payload:57,payloads:57,pcap:220,peak:[207,220],peer:[0,6,20,50,54,56,57,74,220],peerip:74,pem:[54,56],penalti:[6,13,50],pend:[6,55,57,60,66,72,74,76,146,160,219],pending_flush:195,pending_task:60,pendingcompact:74,pendingflush:74,pendingrangecalcul:[60,74,219],pendingtask:74,pendingtasksbytablenam:74,pennsylvania:22,peopl:[36,42,43,45],per:[4,6,10,11,13,28,32,34,38,42,45,54,57,59,63,64,65,66,67,68,69,70,72,74,76,77,81,82,146,169,177,204,207,214,216,218,219,220],per_second:60,percent:74,percent_repair:195,percentag:[6,74,78,220],percentil:[11,74,216,219,220],percentrepair:74,perdiskmemtableflushwriter_0:[60,74,219],perf:220,perfdisablesharedmem:220,perfect:14,perfectli:[27,57],perform:[0,1,3,6,11,13,20,22,24,25,27,28,30,32,37,38,39,41,42,45,46,50,53,55,58,59,60,61,63,66,67,71,72,74,75,76,77,78,82,161,218,219,220],perhap:[24,28,57,216,218],period:[0,4,6,33,53,57,59,71,74,76,77,79,146,148,220],perl:48,perman:[11,45,66,71,72,218],permiss:[6,9,12,23,24,25,26,27,28,29,30,44,49,53,54,64,77],permit:[6,20,54,57,58,65,77],persist:[0,4,45,56,63,65,71,77,220],person:220,perspect:[24,26,45],pet:22,petabyt:[0,1],peter:23,pgp:43,phantom:47,phase:[79,82,219],phi:[0,6],phone:[13,22,25,29],phone_numb:29,php:48,physic:[6,11,28,29,30,31,45,52,71,78],pib:0,pick:[6,38,42,45,58,66,67,77,79,81,151],pickl:50,pid:[45,220],piec:[0,12,66,74],pile:6,pin:[6,78],ping:[42,220],pkcs5pad:6,place:[5,6,16,21,23,32,34,38,42,54,65,69,74,76,77,82,146,153,207,212,218,220],placehold:[14,82],plai:[14,22],plain:4,plan:[11,28,38,42,50],plane:[0,36],platform:[20,33,49,80],platter:[6,71],player:[14,22],playorm:47,pleas:[5,6,11,13,14,22,33,34,36,40,42,43,44,45,56,61,77,81,215],plu:[14,28,66,74,219],plug:[6,28,33],pluggabl:[0,20,77],plugin:[30,52,74],pmc:43,poe:22,poi:[26,29],poi_nam:[24,29],point:[0,3,4,6,10,17,22,23,24,26,27,30,34,36,40,43,50,52,57,59,72,77,81,82,124,146,207,216,220],pointer:[14,58],pois_by_hotel:[24,29],polici:[6,11,42,43,77,200,216],poll:77,pom:41,pool:[6,56,74,76,146,174,197,219,220],poorli:24,pop:81,popul:[11,18,81],popular:[23,24,30,40,49,71],port:[0,6,40,46,52,53,54,60,61,62,64,74,77,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,220],portabl:62,portion:[0,24,50,71,82,212],posit:[0,4,6,10,11,19,22,58,63,74,79,204,208],possbili:6,possess:20,possibl:[0,1,6,10,11,13,14,17,20,22,28,33,39,42,44,45,50,57,58,59,63,66,67,71,72,74,77,79,81,209,216,218],post:[13,33,41,57,146,171],post_at:13,postal_cod:29,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,24,28,39,54,61,69,71,77,79,168,209,211],power8:80,power:[6,11,80],pr3z1den7:22,practic:[0,6,11,12,13,27,29,30,43,57,59,72,73,77],pre:[6,11,17,22,54,57,58,71,77,211,212,214],preced:[19,32,45,62,64,81],precis:[10,17,22,66,208],precondit:74,predefin:11,predict:[13,28,54,209],prefer:[0,6,11,12,22,27,34,42,57,59,77,78],preferipv4stack:40,prefix:[11,12,22,208,214],premis:50,prepar:[6,14,15,53,55,61,64,74],prepare_releas:43,prepare_stat:53,preparedstatementscount:74,preparedstatementsevict:74,preparedstatementsexecut:74,preparedstatementsratio:74,prepend:22,prerequisit:[41,48],prescript:49,presenc:6,presens:4,present:[0,3,12,13,18,30,46,60,65,74,77,211,220],preserv:[6,11,17,20,27,28,75],preserveframepoint:220,pressur:[6,58,74,219,220],presum:27,pretti:[24,28,82,220],prevent:[0,6,11,44,65,74,75,76,207,211,220],preview:[36,55,76,161],previewkind:76,previou:[0,6,10,11,22,28,43,49,61,67,76,79,213],previous:[6,24,28,57,72,214],previsouli:[112,146],price:27,primari:[0,3,9,10,11,13,14,22,24,25,27,28,29,32,44,54,60,62,64,65,69,70,75,76,77,79,81],primarili:[0,6,11],primary_kei:[11,18],print0:210,print:[49,53,54,62,64,72,76,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,218],prio:220,prior:[6,13,20,22,64,79],prioriti:[1,42,220],privat:[6,34,43,77,78],privileg:[20,77],proactiv:57,probabilist:[0,63,70],probabl:[0,4,6,11,24,44,63,66,76,133,146,183,218,219,220],problem:[0,5,6,11,14,24,27,38,39,43,45,50,72,77,216,217,219,220],problemat:[22,50,216],proc:[6,45],proce:[39,70,79,216],procedur:[13,77],proceed:215,process:[0,1,6,14,24,25,32,33,35,37,38,39,40,41,42,44,45,50,52,57,58,59,60,65,70,71,72,74,75,76,77,79,80,82,85,121,146,147,166,174,206,207,211,213,214,215,218,219,220],processed_byt:60,processed_count:60,processor:[3,53,54],prod_clust:82,produc:[13,14,26,37,54,57,66,69,109,216],product:[0,1,6,11,27,28,35,37,42,45,48,49,52,54,56,59,71,78],profil:[13,40,83,146,148,220],profileload:146,program:[14,44,217,220],programm:0,programmat:210,progress:[34,38,42,43,49,57,60,63,64,73,76,81,83,146,202,203,214,219],project:[26,33,34,35,36,37,43,44,74],promin:11,promot:4,prompt:82,prone:50,propag:[0,6,14,34,39,57,72,78],proper:[0,11,22,36,45,54,77],properli:[6,27,39],properti:[4,6,11,18,20,25,32,40,43,48,53,54,59,65,66,75,77,78,79],propertyfilesnitch:[6,78],proport:[6,13,50],proportion:[6,72,118,146,169],propos:[6,24,43,55,74],protect:[6,11,24,57,59,71,76,77,214],protocol:[0,1,6,39,45,51,54,55,60,61,64,74,77,82,88,98,103,108,113,146,189,207,218,220],protocol_vers:60,prove:[25,220],provid:[0,1,3,4,5,6,11,12,13,14,15,17,22,24,27,28,30,32,40,42,43,51,53,54,57,58,61,64,65,66,70,71,72,74,75,76,77,78,79,80,81,83,145,146,156,160,207,208,209,212,214,215,216,218],provis:[50,64,220],proxim:[6,78],proxyhistogram:[146,219],prtcl:[64,207],prune:57,prv:[58,76,161],ps1:77,ps22dhd:13,pt89h8m53:22,publicationfrequ:32,publish:[23,24,25,26,27,28,29,30,32,35,62,64],published_d:81,pull:[0,27,36,44,50,69,74,76,161],pure:[66,220],purg:71,purpos:[12,13,22,27,57,58,62,71,77],push:[38,42,43,74],put:[15,24,28,42,46,54,58,64,66,67,68,79,137,161,209,219],pwf:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],pylib:49,python:[14,33,42,44,48,49,82],pytz:83,qos:61,quak:[14,22],qualifi:[11,14,42,64,219],qualiti:[36,55,77],quantiti:[1,22,59,219],queri:[1,3,6,10,11,12,13,14,16,18,20,24,25,28,29,30,31,48,52,53,55,57,59,60,61,62,64,66,69,74,76,80,81,82,99,109,146,164,217,220],query_fail:54,queryabl:1,queryopt:[53,54],queryst:[53,54],querytimemilli:54,queryvalidationexcept:216,question:[8,20,27,41,52,220],queu:[6,55,74],queue:[6,24,53,54,57,61,74,109,219,220],quick:[24,54,64,137,200,215],quickli:[24,45,58,62,67,72,219],quill:47,quintana:22,quit:[0,28,60,66,82,207,219,220],quorum:[0,11,55,72,73,77,82,216],quot:[9,10,11,12,14,17,20,81,82],quotat:20,quoted_identifi:12,quoted_nam:11,r_await:220,r_k:28,race:[22,38],rack1:6,rack:[0,6,48,54,60,76,77,78,216,219],rackdc:[6,78],rackinferringsnitch:[6,78],raid0:71,raid1:71,raid5:71,rain:12,rais:[6,12,45,53,72,216],raison:9,ram:[50,63,70,71,220],ran:210,random:[0,11,14,45,79],randomli:[6,79],randompartition:[6,13,14],rang:[0,6,10,11,13,22,24,26,39,55,58,66,67,73,74,76,81,82,89,94,132,146,151,161,182,216,219],range_slic:[74,219],rangekeysampl:146,rangelat:74,rangemov:79,rangeslic:74,rapid:[11,32,59,71],rapidli:220,rare:[10,27,63,64,216],raspberri:71,rate:[0,6,11,23,26,72,74,77,81,82,207,220],ratebasedbackpressur:6,ratefil:82,rather:[6,13,24,26,45,57,58,60,65,66,71,81],ratio:[6,70,71,74,81,208],ration:4,raw:[4,6,14,83,203,218],rdbm:[0,31,52],reacah:61,reach:[0,1,4,6,11,28,42,45,50,53,54,59,65,66,75,209],reachabl:54,read:[0,1,3,6,11,13,22,24,29,32,34,36,39,44,45,48,52,54,55,57,60,63,64,66,67,68,69,70,71,72,73,74,77,78,81,82,132,182,195,200,207,208,215,216,218,219,220],read_ahead_kb:220,read_lat:195,read_repair:[0,11,59,74,75,219],read_repair_ch:75,read_request_timeout:45,readabl:[11,25,53,54,61,65,91,145,195,219],reader:54,readi:[0,11,24,29,36,42,49,60,77],readlat:[74,216],readm:[36,43],readrepair:74,readrepairstag:[74,219],readstag:[60,74,76,219],readtimeoutexcept:216,readwrit:77,real:[1,4,8,11,23,25,27,34,45,80,218],realclean:35,realis:81,realiz:[27,66],realli:[6,27,28,44,58,60,206,210,216,220],realtim:65,reappear:76,reason:[0,4,6,11,13,14,15,18,27,28,45,46,53,57,64,69,71,76,77,79,219,220],rebas:36,rebuild:[0,58,59,63,66,70,74,146,152,168],rebuild_index:146,receiv:[0,1,6,14,18,42,45,49,57,58,59,60,66,71,72,75,79,216,220],received_byt:60,received_count:60,recent:[1,6,42,44,56,71,75,88,209,214],recent_hit_rate_per_second:60,recent_request_rate_per_second:60,reclaim:[54,57,59,61,69],recogn:[13,28,40,42,56],recommend:[4,6,11,22,27,36,45,48,49,52,54,56,59,60,61,64,71,77,79,218],recompact:66,recompress:70,reconcil:[0,1,11,75],reconnect:77,reconstruct:209,record:[4,6,11,13,19,22,23,24,26,27,28,42,50,53,54,61,66,74,81,220],recov:[6,45,55,58,60,66],recoveri:[6,58],recreat:[20,54,82],rectangl:23,recurs:[54,109],recycl:[4,6,74],redhat:[43,49,56],redirect:61,redistribut:[6,76,218],redo:42,reduc:[4,6,11,25,28,37,45,54,57,58,59,62,67,70,72,75,76,83,92,118,146,161,169,203],reduct:6,redund:[0,32,34,39,42,57,59,71],reenabl:[108,110,111,146],ref:[43,61,204,205,206,207,208,209,210,211,212,214,215],refer:[1,6,11,12,13,14,22,24,25,26,27,28,32,33,34,35,44,45,50,51,59,61,79,81,82,216,218],referenc:[6,26,28,81],referenti:3,refin:[29,31,52],reflect:[65,66,204],refresh:[6,64,77,82,146,154],refreshsizeestim:146,refus:[52,57],regain:57,regard:[11,13],regardless:[0,6,20,42,59,72,220],regener:[58,63],regexp:12,region:[6,78],regist:22,registri:77,regress:[39,44],regular:[9,12,28,36,40,44,45,55,57,74,82],regularcolumn:208,regularli:[50,76],regularstatementsexecut:74,regularupd:81,reifi:58,reilli:[23,24,25,26,27,28,29,30],reinforc:30,reinsert:[168,211],reject:[6,13,45,57,65,77,216],rel:[6,22,25,28,82,220],relat:[0,3,8,10,12,13,23,24,27,28,30,31,35,40,42,53,58,66,72,74,81,208,216,220],relationship:[6,23,24,27,32],releas:[6,10,27,28,32,41,42,49,52,56,59,82,220],relev:[13,20,22,42,64,70,77,80,207,208,211,220],relevel:[83,203],reli:[0,6,14,22,24,45],reliabl:[1,3,37,50,69],reload:[6,73,146,155,156,157,158],reloadlocalschema:146,reloadse:146,reloadssl:[77,146],reloadtrigg:146,reloc:[58,146,159,192,218],relocatesst:146,remain:[0,6,13,14,20,22,32,38,50,57,58,60,66,69,74,76,79,195,219],remaind:[17,19,70],remeb:61,remedi:67,rememb:[24,25,28,61,216],remind:24,remot:[0,4,36,38,40,52,53,54,62,64,67,77,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,216],remov:[0,1,4,6,10,11,12,13,14,15,17,18,20,22,35,39,45,52,54,57,59,64,65,73,75,77,84,87,116,146,160,209,214,215,220],removenod:[79,84,146],renam:[9,22],render:36,reorder:6,repair:[0,4,6,11,18,45,52,55,60,64,68,69,70,72,73,74,78,79,83,137,146,162,179,200,203,208,211,215,219],repair_admin:146,repairedat:210,repairpreparetim:74,repairtim:74,repeat:[12,54,70,77],replac:[0,6,9,14,20,22,35,39,43,45,52,53,54,57,58,59,66,73,75,76,109,213,214],replace_address_first_boot:[0,79],replai:[0,3,4,22,55,71,72,74,118,146,163,169,208],replaybatchlog:146,repli:[37,57],replic:[1,2,3,6,11,28,29,32,50,52,53,54,55,58,60,62,64,66,71,72,76,77,79,81,84,146],replica:[1,6,11,13,28,45,55,66,72,74,75,76,78,79,92,128,146,216,219,220],replica_2:72,replication_factor:[0,11,29,53,54,59,62,76,77,81],repo:[35,38,40,43,49],repo_gpgcheck:49,repodata:43,repomd:43,report:[6,27,35,41,42,49,52,73,216],report_writ:20,reportfrequ:82,repositori:[5,8,33,35,36,37,40,42,44,49,80],repres:[0,6,10,17,20,22,23,24,25,26,27,28,45,66,74,77,78,81,82,208,218],represent:[10,17,25,204],reproduc:[25,37],reproduct:37,request:[0,1,6,11,13,20,21,32,36,44,45,53,54,55,58,59,60,61,62,63,66,69,71,73,75,77,78,82,146,183,199,215,216,219,220],request_count:60,request_failur:53,request_respons:[57,74,219],requestresponsest:219,requestresponsestag:[60,74,76,219],requesttyp:74,requir:[0,1,3,6,11,13,14,20,24,27,28,32,34,36,38,39,40,41,42,43,45,49,50,53,54,58,59,63,64,70,71,75,77,81,206,207,210,213],require_client_auth:6,require_endpoint_verif:6,requisit:75,resampl:6,reserv:[6,10,12,15,23,26,27,28,29,30,31,57,60,220],reservations_by_confirm:29,reservations_by_guest:[24,29],reservations_by_hotel_d:29,reservoir:216,reset:[6,13,36,54,57,146,165,179,206],reset_bootstrap_progress:79,resetfullquerylog:[54,146],resetlocalschema:146,resid:[6,13,45,74,220],resolut:[0,6,13,41,45],resolv:[0,35,38,45,75,167,186],resort:[84,146],resourc:[0,20,50,54,55,77,207,219],resp:14,respect:[6,10,11,14,33,35,54,57,60,62,64,76,78,109,218],respond:[0,6,12,72,75,220],respons:[0,1,6,20,25,32,45,57,58,59,74,75,79,219],ressourc:22,rest:[6,11,12,22,32,33,39,79,216],restart:[3,45,66,72,77,79,146,153,171,206,218],restor:[58,64,66,73,79,82,207,213,214],restrict:[6,10,11,13,18,25,59,76],restructuredtext:36,result:[0,6,10,11,12,14,17,20,22,25,27,28,30,32,37,42,45,50,54,57,58,64,66,68,74,76,82,203,204,205,206,207,208,209,210,211,212,213,214,215,220],resum:[72,85,146,166],resumehandoff:[72,146],resurrect:66,resync:[146,165],retain:[20,45,53,54,61,66,72,211,213],retent:27,rethrow:34,retir:36,retri:[0,6,11,22,54,57,74,75,109],retriev:[0,11,13,20,24,32,35],reus:[28,39],reveal:28,revers:[11,13,24],revert:218,review:[11,34,36,41,42,44,52],revis:[25,81],revok:[9,53,77],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[63,66,70,83,146,168,199,203,211],rewritten:[71,168,211],rfc:[14,22],rhel:[49,52],rich:[22,218],rid:35,rider:22,riderresult:22,right:[6,19,23,24,25,26,27,28,29,30,40,43,45,66,76,82,219,220],ring:[6,52,56,59,64,72,76,77,79,82,142,144,146,179,207,216],rise:[0,216],risk:[0,1,11,24,49,69],riski:69,rmb:220,rmem_max:6,rmi:[45,77],robin:72,rogu:14,role:[6,9,10,12,15,73],role_a:20,role_admin:20,role_b:20,role_c:20,role_manag:77,role_nam:20,role_opt:20,role_or_permission_stat:12,role_permiss:6,roll:[45,61,72,77,109],roll_cycl:[53,54,61,109],rollcycl:61,rollingfileappend:61,rollingpolici:61,rollov:61,romain:22,room:[5,8,23,24,26,27,28,29,43],room_id:28,room_numb:[28,29],root:[6,38,42,49,54,56,64,215,218],rotat:[6,54,218],roughli:[0,6,64],round:[13,24,67,72,74],rout:[0,6,57,78],routin:[72,220],row:[0,3,4,6,10,11,13,14,15,17,18,24,28,32,44,50,51,54,58,60,62,63,64,70,71,74,75,76,81,82,83,116,137,141,146,168,170,171,203,208,211,215,220],row_column_count:54,rowcach:[52,74],rowcachehit:74,rowcachehitoutofrang:74,rowcachemiss:74,rowindexentri:74,rows_per_partit:[11,64],rpc:[6,74],rpc_address:[54,60],rpc_timeout_in_m:[132,182],rpm:[43,48],rrqm:220,rsc:200,rst:36,rubi:[14,48],rule:[0,6,12,14,42,45,216,218],run:[0,4,5,6,12,22,24,28,30,33,35,38,40,42,43,45,46,49,53,55,56,57,58,59,60,62,64,66,68,69,71,72,74,75,76,77,79,80,81,83,137,146,161,184,203,206,207,208,210,212,213,217,218,219,220],runnabl:220,runtim:[3,6,18,28,42,48,49,56,73,126,146],runtimeexcept:34,rust:48,s_j:28,s_t:28,safe:[0,14,22,54,57,59,66,77,220],safeguard:71,safepoint:218,safeti:[11,54,66,79],sai:[52,57],said:[11,42,45,64,146,199,220],salient:60,same:[0,1,4,5,6,11,12,13,14,15,17,18,19,20,22,24,25,27,32,36,38,40,42,46,50,52,53,54,57,59,60,63,64,66,67,68,69,72,74,75,76,77,78,79,81,161,209,214,216,218,220],samerow:81,sampl:[4,6,12,14,25,54,58,64,74,81,82,109,146,148,150,196],sampler:[60,74,148,196,219],san:71,sandbox:[6,14],sasi:6,satisfi:[0,11,24,27,34,59,71,74,75,79],satur:[6,74,219,220],save:[6,13,22,33,35,45,46,57,63,70,71,79,81,146,171],saved_cach:6,saved_caches_directori:46,sbin:45,scala:[14,48],scalabl:[0,2,3,43,80],scalar:15,scale:[1,2,3,44,70,80,81],scan:[6,13,24,59,63,74],scenario:38,scene:45,schedul:[0,6,33,57,60],scheduled_byt:60,scheduled_count:60,schema:[0,3,9,11,14,17,30,31,50,52,54,60,62,64,74,75,77,81,82,93,146,155,165,206,208],schema_own:20,schema_vers:54,scheme:0,scientif:24,scope:[20,25,53,61,74,77],score:[6,14,22,78],script:[6,14,30,33,40,43,44,49,53,54,56,83,109,203,204,205,206,207,208,209,211,212,213,214,215,220],scrub:[63,66,70,74,83,146,192,203],sda1:50,sda:220,sdb:220,sdc1:220,sdc:220,search:[24,27,28,42,62,80,218],searchabl:220,second:[0,6,11,12,13,22,24,27,28,32,45,56,62,65,71,72,75,76,77,81,82,83,146,169,177,203,216,218,219,220],secondari:[0,2,3,10,12,13,15,25,27,52,59,60,66,74,80,146,152],secondary_index_stat:12,secondaryindexmanag:[60,74,219],section:[2,4,5,7,10,11,12,13,15,20,22,43,45,48,49,51,53,54,55,57,61,62,64,66,74,76,77,79,83,203,207,218,219],sector:50,secur:[6,14,15,43,52,73],seda:3,see:[0,4,6,10,11,12,13,14,17,20,22,23,24,26,27,28,35,37,40,42,43,44,49,51,52,54,57,61,66,68,74,77,78,79,82,116,146,161,206,208,209,212,218,219,220],seed:[0,6,46,52,56,78,129,146,156],seedprovid:6,seek:[0,3,4,6,50,71,74],seem:24,seen:[6,11],segment:[4,6,54,61,65,72,74,82,109,218,219],segment_nam:65,segmentid:208,select:[0,6,9,10,11,12,14,15,19,20,24,26,27,29,30,33,40,43,44,45,51,53,54,58,59,60,61,62,63,64,67,69,75,76,77,81,82,151,218,219,220],select_claus:13,select_stat:[12,18],self:[24,39],selinux:45,semant:[3,10,13,14,27],semi:45,send:[0,6,8,11,32,45,57,58,59,60,72,75,81,216,220],sender:57,sendto:81,sens:[10,13,15,25,45],sensic:14,sensit:[11,12,14,17,60,220],sensor:[22,24,28],sent:[0,6,11,22,45,57,58,59,60,72,74,75,216,220],sentenc:42,separ:[0,4,6,11,13,24,25,27,28,32,34,36,42,46,53,54,57,58,59,61,62,64,66,69,71,77,79,82,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,207,211],septemb:42,seq:[6,161],sequenc:[12,54,57],sequenti:[1,6,58,71,161],seren:13,seri:[11,24,43,49,66,69,82],serial:[4,6,54,58,72,83],serializeds:57,serializingcacheprovid:6,seriou:[36,216,219],serv:[1,13,24,50,57,58,59,71,77,220],server:[6,12,13,22,27,40,41,43,44,45,49,50,56,57,64,71,74,77,80,81,207,216],server_encryption_opt:[64,77,207],servic:[0,6,25,40,49,56,72,74,77,79,218,220],session:[6,20,57,76,77,83,146,162],set:[0,1,3,4,6,9,10,11,12,13,14,17,18,24,27,29,32,36,39,41,42,44,46,50,52,56,57,58,59,61,63,64,65,66,69,70,71,72,74,75,77,78,79,81,82,83,86,105,116,146,159,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,199,203,204,206,207,213,215,216,217,218,219,220],set_liter:[20,22],setbatchlogreplaythrottl:146,setcachecapac:146,setcachekeystosav:146,setcompactionthreshold:[66,146],setcompactionthroughput:[66,146],setconcurr:146,setconcurrentcompactor:146,setconcurrentviewbuild:[18,146],sethintedhandoffthrottlekb:[72,146],setint:[14,64],setinterdcstreamthroughput:146,setlogginglevel:[146,218],setlong:14,setmaxhintwindow:[72,146],setra:50,setstr:14,setstreamthroughput:146,setter:[20,33,34],settimeout:146,settraceprob:146,setup:[6,36,42,44,54,77],sever:[3,4,13,20,24,26,27,30,53,54,57,58,60,62,66,68,72,76,77,81,207],sfunc:[9,14],sha1:[43,213],sha256:49,sha:[38,43],shadow:[18,66,69],shall:[54,56,60,62,64,72],shape:81,shard:[4,28],share:[0,11,13,40,50,208,216,220],sharedpool:[57,82],sharp:47,shed:45,shelf:0,shell:[51,52,56,83],shift:22,ship:[35,44,51,77,82,218,220],shop:[23,24,26],shortcut:18,shorter:[36,77],shorthand:[26,82],shortli:28,shortlog:43,should:[0,4,5,6,10,11,12,13,14,17,20,22,24,27,32,33,35,36,39,40,42,44,45,46,47,48,49,50,51,53,54,56,57,59,61,62,63,64,66,67,68,69,70,71,72,74,76,77,78,79,81,82,151,161,182,212,214,216,220],shouldn:[11,46],show:[20,23,24,25,27,35,52,57,61,76,79,83,94,114,134,146,150,160,167,186,187,195,202,203,215,216,218,219,220],shown:[12,24,25,26,28,32,75,82,195,207],shrink:[6,50],shuffl:57,shut:6,shutdown:[4,6,71],side:[6,11,13,17,22,27,32,57,58,77,216],sig:43,sign:[13,22,45],signal:[6,146,157],signatur:[49,65],signifi:220,signific:[6,36,40,42,44,50,57,58,71,75,216],significantli:[0,6,28,50,58,76,220],silent:14,similar:[0,6,13,14,24,26,53,56,57,59,64,70,71,215,216,220],similarli:[0,10,17,29,34,54,62,71,146,151],similiar:76,simpl:[6,11,23,24,25,27,28,35,37,40,44,59,72,77],simple_classnam:44,simple_select:13,simplequerytest:44,simpler:0,simplereplicationstrategi:77,simpleseedprovid:6,simplesnitch:[6,78],simplest:49,simplestrategi:[29,50,53,54,59,62,76,81],simpli:[0,4,6,11,13,14,17,22,27,28,32,40,44,57,69,71,74,79,200],simplic:54,simplifi:[25,28],simul:44,simultan:[0,1,6,57,58,71,72,82,86,116,159,168,199],sinc:[0,6,11,13,14,22,28,32,36,40,44,45,50,54,56,60,64,66,67,72,74,75,76,78,79,206,209,211,219,220],singl:[0,1,3,6,10,11,12,13,14,17,18,20,22,24,27,28,29,32,34,42,46,49,50,51,52,54,56,57,58,67,69,72,73,74,75,76,77,78,82,83,89,203,216,218,219,220],singleton:39,sit:54,site:[36,43,49],situat:[6,44,66,72,220],six:56,size:[0,4,6,11,22,25,31,32,34,45,46,50,53,54,55,56,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,77,81,82,83,109,143,146,203,206,208,209,210,213,218,219,220],size_byt:[58,60],size_estim:[146,154,218],sizeandtimebasedrollingpolici:61,sizeof:28,sizetieredcompactionstrategi:[11,64,68,69,219],sjk:146,skinni:219,skip:[0,6,13,45,54,57,62,74,79,82,83,168,185,203,206,212],skipcol:82,skipflush:62,skiprow:82,sla:[39,50],slack:[5,42,52,76],slash:12,slave:33,sleep:220,sleepi:[53,54],slf4j:[34,35,53,61],slf4jexceptionhandl:54,slight:0,slightli:6,slow:[0,3,6,11,58,59,78,216,218,219,220],slower:[6,11,63,72,219,220],slowest:6,slowli:[6,22],small:[0,4,6,11,13,22,28,45,50,57,66,71,83,203,207,216,220],smaller:[0,4,6,28,45,68,71,82,212],smallest:[0,11,14,74,209],smallint:[9,10,14,17,19,22,29],smith:22,smoother:10,smoothli:6,snappi:[4,6,72],snappycompressor:[11,70],snapshot:[3,4,6,27,35,54,56,73,74,83,87,143,146,168,203,211,215,220],snapshot_before_compact:62,snapshot_nam:[87,213],snapshotnam:[87,146],snippet:56,snitch:[0,6,48,52,73,75,93,146],snt:220,socket:[6,77,182],soft:[36,59],softwar:[24,35],sold:24,sole:[11,37],solid:[6,50,71],solr:80,solut:[33,50,53],solv:0,some:[0,1,3,6,9,11,12,13,14,22,24,25,26,27,28,30,35,36,37,40,42,43,44,45,46,50,53,54,57,58,59,60,62,64,65,66,70,72,74,75,77,79,82,208,210,216,218,219,220],some_funct:14,some_keysopac:[11,59],some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[38,66],someth:[6,23,27,64,75,210,218,220],sometim:[6,12,13,24,57,72,216,217,218,219,220],someudt:14,somewher:76,soon:[26,77],sooner:6,sophist:0,sort:[4,11,13,22,24,28,32,60,64,66,71,80,195,209,218],sort_kei:195,sound:28,sourc:[3,4,5,6,8,14,33,35,36,37,41,43,49,53,56,61,64,74,83,151,204,213,216],source_elaps:82,space:[0,4,6,28,32,34,45,54,57,58,59,62,65,66,67,69,71,74,212,220],space_used_by_snapshots_tot:195,space_used_l:195,space_used_tot:195,span:[0,6,13,66],spare:[33,218],sparingli:13,spark:47,spd:64,speak:[0,217,218,220],spec:[39,51,54,74,81,82],speci:[11,18],special:[0,12,13,44,45,57,60,66,74,83,214],specif:[0,9,11,12,13,22,24,28,32,36,40,42,45,47,53,54,57,60,62,64,65,66,69,74,76,77,81,82,146,151,161,207],specifc:74,specifi:[0,6,10,11,12,13,14,16,18,20,22,24,27,28,35,40,45,51,53,54,58,59,62,64,65,69,70,72,74,75,77,79,81,82,83,87,89,130,146,151,161,167,180,182,185,192,195,198,203,207,213,216],specific_dc:161,specific_host:161,specific_keyspac:151,specific_sourc:151,specific_token:151,specifii:20,specnam:81,specul:[0,11,55,74,75],speculative_retri:[11,64],speculative_write_threshold:59,speculativefailedretri:74,speculativeinsufficientreplica:74,speculativeretri:74,speculativesamplelatencynano:74,speed:[6,49,52,58,64,70,83,203,219],spend:220,spent:[49,74,220],sphinx:41,spike:45,spin:[6,50,66,71],spindl:[4,6],spirit:[6,78],split:[28,34,45,57,66,68,69,74,81,82,83,89,203],spread:[0,6,11,78],sql:[0,3,13,15,30],squar:12,squash:[36,42],src:[43,151],ssd:[6,16,50,71,220],ssh:[54,56,216],ssl:[6,45,60,64,73,81,82,83,146,157,203],ssl_cipher_suit:60,ssl_enabl:60,ssl_protocol:60,ssl_storage_port:[60,78],ssp:[64,207],sss:17,sstabl:[2,6,11,28,45,52,62,63,68,69,70,71,73,76,83,86,89,116,130,137,143,146,153,159,168,199,200,204,208,209,211,212,213,215,218,219,220],sstable_act:218,sstable_compression_ratio:195,sstable_count:195,sstable_s:68,sstable_size_in_mb:[66,67],sstable_task:[60,218],sstabledump:[83,203],sstableexpiredblock:[66,83,203],sstablelevelreset:[83,203],sstableload:[62,73,77,83,203],sstablemetadata:[83,203,206,210],sstableofflinerelevel:[83,203],sstablerepairedset:[83,203,208],sstablerepairset:210,sstablescrub:[83,203],sstablesperreadhistogram:74,sstablesplit:[83,203],sstableupgrad:[83,203],sstableutil:[83,203,204,208],sstableverifi:[83,203],sstablewrit:34,stabil:[33,42,57],stabl:[82,218],stack:[6,57,211,212,213,214,215,220],stackcollaps:220,staff:[24,26,81],staff_act:81,stage:[3,42,43,55,60,121,146,174,216,219],staging_numb:43,stai:[23,24,52,59,66],stakehold:[24,26],stale:77,stall:[6,79],stamp:61,stand:[44,49],standalon:44,standard1:[60,64,205,207,208,210,211,213,218],standard:[6,22,33,37,45,49,53,74,81,204,208,218],start:[0,6,9,13,24,25,26,27,28,30,36,41,45,46,49,50,52,54,56,59,62,66,67,69,71,72,74,75,76,77,79,89,161,192,209,213,216,218,219,220],start_dat:29,start_native_transport:60,start_token:[89,161],start_token_1:151,start_token_2:151,start_token_n:151,starter:42,starttl:64,startup:[4,6,21,40,45,49,60,67,74,79,214],startupcheck:218,starvat:6,stat:208,state:[0,1,6,11,14,50,53,54,56,59,63,66,71,74,76,79,146,186,217,218],state_or_provinc:29,statement:[6,9,10,11,13,14,15,16,17,20,21,22,27,39,41,55,60,61,62,63,64,66,74,75,77,81,82,216,220],static0:11,static1:11,staticcolumn:208,statist:[4,58,62,64,66,74,82,91,117,146,149,194,195,197,207,208,213,214,219],statu:[20,33,39,42,45,49,54,62,72,76,77,82,83,146,160,187,188,189,190,191,200,203,216,217],statusautocompact:146,statusbackup:[62,146],statusbinari:146,statusgossip:146,statushandoff:[72,146],stc:[11,66],stdev:[81,220],stdin:82,stdout:82,stdvrng:81,steadi:59,step:[0,6,26,28,33,36,40,41,43,77,217,218],still:[0,1,6,10,11,13,14,17,20,22,27,28,30,33,34,43,54,57,72,76,77,79,82,205,216,220],stop:[0,4,6,54,56,57,82,104,146,164,193,203,204,205,206,207,208,209,210,211,212,213,214,215,218],stop_commit:6,stop_paranoid:6,stopdaemon:[56,146],storag:[0,1,2,3,11,15,16,28,32,42,45,52,54,59,62,64,70,71,73,80,207,208],storage_port:[46,60,78],storageservic:[6,34,56,77],store:[1,4,6,10,11,12,13,22,24,26,27,28,29,32,50,52,53,54,57,58,59,60,62,63,64,66,70,71,72,74,77,80,82,101,109,111,146,191,207,208,211],store_queri:54,store_typ:6,stort:209,straggler:54,straight:[35,79,220],straightforward:[28,65],strain:28,strategi:[6,11,28,50,58,62,64,72,73,78,81,206,219],stratio:52,stream:[4,6,50,52,55,57,59,62,64,66,67,69,70,72,73,76,85,125,131,146,151,161,178,179,181,182,207,214,220],stream_entire_sst:[58,60],stream_throughput_outbound_megabits_per_sec:[58,64,207],streamer:58,street:[22,29],strength:6,stress:[52,56,83,220],stresscql:81,strict:[10,57,66],strictli:[8,11,14],string:[4,6,10,11,12,13,14,16,17,20,21,22,33,53,57,64,74,82,130,204],strong:0,strongli:[6,11,12,28,77],structur:[0,4,6,9,20,23,27,32,36,39,63,64,73,74,83,203,220],struggl:[0,3],stub:77,stuck:209,style:[0,6,24,25,28,39,40,41,42,44,52],stype:[9,14],sub:[0,11,13,22,54,62,66,67,220],subclass:6,subdirectori:[6,21,49,62],subject:[6,14,20,58,77],submiss:[6,42],submit:[41,42,44,52,89],subopt:81,subqueri:27,subrang:6,subscrib:[8,37,53],subscript:8,subsequ:[0,6,11,13,20,26,45,54,56,62,64,69,70,75,77],subset:[0,20,59,66,82,216],substanti:[58,220],substract:19,subsystem:77,subtract:208,subvert:69,succe:[0,1],succed:74,succeed:215,succesfulli:74,success:[26,28,43,53,56,57,61,75,82],successfulli:[0,1,43,54,57,59,61,72,74,76,215],sudden:6,sudo:[45,49,53,54,56,64,220],suffer:220,suffici:[0,6,28,54,57,59,77],suffix:49,suggest:[12,27,36,37,42,71,215],suit:[6,32,33,42,44,64,77,207],suitabl:[13,14,28,39,42,59],sum:[28,65],sum_i:28,sum_j:28,sum_k:28,sum_l:28,summari:[4,6,42,58,62,64,74,207,208,213,214],sun:[34,77,220],sunx509:207,supercolumn:9,supersed:[10,168,211],superus:[9,20,77],suppli:[3,11,13,27,38,64,204,216],supplier:24,support:[0,3,4,6,9,10,11,12,13,14,15,16,18,19,20,22,24,25,27,29,30,32,37,40,42,44,45,47,49,52,53,54,55,58,60,62,64,67,72,75,77,82,83,168,192,211,218,220],suppos:13,sure:[0,6,8,24,30,33,34,35,36,37,40,42,44,45,46,66,67,81,220],surfac:[0,77],surplu:45,surpris:[0,59],surprisingli:6,surround:[17,29,82],suscept:14,suse:49,suspect:[5,42,220],suspend:40,svctm:220,svg:220,svn:43,svnpubsub:43,swamp:45,swap:[0,4,6,220],swiss:[146,184],symbol:[24,220],symlink:64,symmetri:17,symptom:45,sync:[0,4,6,27,36,45,72,74,76,161,220],synchron:[53,58,76],synctim:74,synonym:20,synopsi:[54,62,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],syntact:[11,20],syntax:[10,12,13,14,20,22,30,36,53,54,62,66,70,81],syntaxerror:216,sys:6,sysctl:[6,45],sysf:220,sysintern:6,system:[0,1,3,6,11,14,20,25,28,33,40,44,45,46,49,50,51,53,54,57,60,61,62,66,71,74,77,80,82,120,122,123,125,131,137,146,153,154,155,173,175,176,178,181,207,212,214,216,217,220],system_auth:[6,54,60,77],system_distribut:[54,60],system_schema:[20,53,54,60,61],system_trac:[54,60,161],system_view:[54,60,218],system_virtual_schema:[53,54,55,61,218],tab:[34,40],tabl:[0,1,3,4,6,9,10,12,13,14,15,16,17,18,20,21,22,24,25,26,27,28,29,32,44,50,52,53,54,55,56,58,59,61,63,64,66,70,72,73,76,77,81,82,83,86,89,96,104,106,115,116,119,124,128,137,146,152,153,155,159,161,168,172,185,187,192,194,195,199,200,203,205,207,208,210,214,215,216,218,219],table1:[20,76],table2:76,table_definit:81,table_nam:[11,13,16,20,21,54,60,64,66,195,218],table_opt:[11,18],tablehistogram:[146,219],tablestat:146,tag:[22,39,43,62,185],tail:[11,49,54,59,218],take:[0,3,6,10,11,13,14,22,27,28,36,39,40,42,43,45,50,54,57,58,59,63,66,67,68,70,71,72,79,146,185,210,212,215,218,219,220],taken:[1,6,65,69,74,81,213],talk:26,tar:49,tarbal:[46,48,82],tarball_instal:49,target:[11,20,35,40,44,54,61,67,72,207],task:[0,6,26,33,35,37,40,42,58,74,76,82,218,219,220],task_id:60,taskdef:44,taught:27,tbl:75,tcp:[6,45,57,220],tcp_keepalive_intvl:45,tcp_keepalive_prob:45,tcp_keepalive_tim:45,tcp_nodelai:6,tcp_retries2:6,tcp_wmem:6,tcpdump:220,teach:[0,6,78],team:[24,43,45],technetwork:6,technic:[11,15],techniqu:[0,3,27,28,32,72,217,220],technot:6,tee:49,tell:[6,13,39,45,46,74,220],templat:[33,43],tempor:6,temporari:[72,77,83,203],temporarili:[0,1,6,59],tempt:[24,28],ten:28,tend:[4,6,28,45,71,72],tendenc:[6,26],tension:26,tent:43,terabyt:70,term:[0,6,13,14,15,18,22,26,27,28,54,56,80],termin:[12,20,82],ternari:34,test:[0,6,25,34,35,39,41,42,43,49,51,52,54,56,58,60,71,81,82],test_keyspac:[77,218],testabl:[39,42],testbatchandlist:44,testmethod1:44,testmethod2:44,testsom:44,teststaticcompactt:44,text:[4,9,11,12,13,14,17,22,25,28,29,32,36,43,54,60,61,62,64,65,70,76,77,80,81,220],than:[0,1,4,6,11,12,13,14,15,18,19,22,24,28,34,42,50,52,53,54,57,58,65,66,67,68,69,70,71,72,75,77,78,79,81,162,175,176,205,207,209,212,213,216,218,219,220],thei:[0,1,3,6,9,10,11,12,13,14,15,18,19,20,22,23,24,25,26,27,30,32,34,39,42,44,52,54,56,58,59,60,61,62,63,64,67,70,71,72,74,77,205,209,214,215,216,218,219,220],them:[0,6,10,11,13,14,22,23,26,27,32,33,34,37,42,43,44,45,51,54,57,59,61,63,66,69,72,74,75,77,146,199,207,214,216,218,220],themselv:[0,13,20],theorem:1,theoret:11,therefor:[0,24,28,36,42,44,77,206,214],theses:77,thi:[0,1,2,4,5,6,7,10,11,12,13,14,15,17,18,20,22,23,24,25,26,27,28,29,30,33,34,35,36,37,38,39,40,42,43,44,45,46,48,49,50,52,53,54,55,56,57,58,59,61,62,63,64,66,67,68,69,70,71,72,74,75,76,77,78,79,81,82,83,84,86,87,89,92,94,96,102,106,112,115,116,118,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,218,219,220],thing:[6,22,24,28,37,38,42,45,48,57,66,76,220],think:[6,26,27],third:[22,28,39,52,74,219],thobb:82,those:[11,12,13,14,16,17,18,20,22,23,24,27,42,45,65,66,67,68,72,77,82,199,207,211,212,214,216,220],though:[10,12,22,29,32,52,57,60,62,66,70,74],thought:212,thousand:82,thousandssep:82,thread:[4,6,18,54,56,57,58,61,71,72,74,76,77,81,86,116,146,159,161,168,177,197,199,209,218,219],thread_pool:60,threaddump:220,threadpool:[73,217],threadpoolnam:74,threadprioritypolici:40,three:[0,6,11,25,28,54,57,58,60,62,63,66,70,72,75,76,77,82,216,218,219],threshold:[4,11,53,59,65,71,78,119,146,172,179,220],thrift:[9,81],throttl:[6,33,57,58,60,64,72,83,118,146,169,173,177,178,181,203],throttled_count:60,throttled_nano:60,through:[0,5,9,10,11,12,13,18,24,25,26,33,36,40,42,45,49,50,51,57,60,61,65,66,82,220],throughout:[26,77],throughput:[0,3,6,32,50,58,64,66,70,71,74,120,125,131,146,173,178,181,207,218,219],throwabl:[39,44],thrown:[22,64,209],thu:[6,10,11,12,13,18,22,45,74,75,78,79,146,199],thumb:[6,42],thusli:22,tib:[91,145,195],tick:42,ticket:[5,36,37,38,39,42,43,44,65],tid:220,tie:45,tier:66,ties:[13,219],tighter:6,tightli:6,tild:82,time:[0,1,3,4,6,8,9,10,11,12,13,15,16,17,18,24,26,27,28,32,34,36,39,40,42,43,44,45,49,50,54,56,57,58,59,60,61,62,63,65,66,70,74,75,76,77,80,81,82,146,148,208,210,215,216,218,219,220],timefram:79,timehorizon:6,timelin:11,timeout:[6,22,45,55,72,74,82,132,146,182,216,219],timeout_in_m:182,timeout_typ:[132,182],timer:[6,74],timestamp:[0,4,9,10,11,13,14,15,17,19,24,28,52,53,54,57,61,62,69,72,75,82,83,168,203,205,208,211],timeunit:69,timeuuid:[9,10,11,17,22,81],timewindowcompactionstrategi:[11,66],timezon:[17,82],tini:[6,66],tinyint:[9,10,14,17,19,22],tip:216,titl:[24,29,42,81],tjake:34,tlp_stress:60,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,tmp:[54,213,214,218,220],tmpf:220,tmplink:214,toc:[4,58,62,64,213,214],tock:42,todai:12,todat:14,todo:39,togeth:[0,6,11,13,14,27,28,32,33,69,216,219,220],toggl:77,tojson:15,token:[4,6,9,10,12,13,45,48,54,57,58,59,64,66,67,72,74,76,81,82,89,94,137,138,144,146,151,161,167,200,208,209,216,218,219],tokenawar:216,tokenrang:81,toler:[0,1,63,72],tom:13,tombston:[4,6,11,17,24,45,67,73,74,76,116,168,205,208,211,220],tombstone_compact:192,tombstone_compaction_interv:66,tombstone_threshold:66,tombstones_scan:60,tombstonescannedhistogram:74,ton:44,too:[6,11,12,14,22,28,39,50,60,66,67,81,216,219,220],took:[216,218],tool:[3,6,12,31,33,35,36,42,43,45,49,50,52,53,56,61,62,66,73,74,77,79,81,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219],toolset:220,top:[13,22,33,42,43,52,74,148,195,196,209],topcount:[148,196],topic:[50,82],topolog:[6,78,167],toppartit:146,total:[4,6,13,28,49,53,54,56,58,60,62,64,65,66,68,74,75,81,143,146,207,218,219,220],total_replica:[0,11,59],totalblockedtask:74,totalcolumnsset:208,totalcommitlogs:74,totalcompactionscomplet:74,totaldiskspaceus:74,totalhint:74,totalhintsinprogress:74,totallat:74,totalrow:208,totalsizecap:61,totimestamp:14,touch:[8,23,28,45,66,68],tough:44,tounixtimestamp:14,tour:22,tpstat:[76,146,219],trace:[6,30,57,74,83,133,146,161,183,211,212,213,214,215,218,220],tracerout:220,track:[0,4,6,54,57,66,72,74],trackeddatainputplu:57,tracker:[36,42],trade:25,tradeoff:[0,1,6,50,220],tradit:[69,70],traffic:[6,50,54,58,59,75,77,78,220],trail:34,trailer:57,transact:[0,2,3,13,21,24,27,32,57,59,74,83,192,203],transfer:[6,45,58,62,77,207],transform:13,transient_replica:[0,11,59],transit:[10,20,35,55],translat:220,transmit:[58,75],transpar:[6,27,45],transport:[6,40,53,60,64,74,81,98,108,146,189,207,219],trap:24,treat:[0,6,10,27,45,78],tree:[0,6,35,40,74,76],tri:[6,53,58,67,69,216],trigger:[4,6,9,11,12,15,33,52,54,60,61,63,69,70,72,73,77,86,146,158],trigger_nam:21,trigger_stat:12,trip:[6,13],trivial:77,troubl:[28,218],troubleshoot:[6,39,41,49,50,52,76,216,218,219,220],truediskspaceus:[62,143,146],truesnapshotss:74,truli:9,truncat:[4,6,9,10,15,20,53,54,60,62,72,77,81,132,146,182,198],truncate_stat:12,truncatehint:[72,146],trunk:[36,38,39,40,42,44],trust:[49,77],trusti:220,trustor:6,truststor:[6,60,64,77,81,207],truststore_password:6,truststorepassword:77,tspw:[64,207],tstamp:204,ttl:[4,6,9,10,11,14,17,22,28,69,73,168,208,211],tty:82,tunabl:2,tune:[0,11,45,50,63,67,71,75,218,219],tupl:[0,6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:22,tuplevalu:[10,14],turn:[0,6,24,42,45,54,77,216],twc:[11,69],twice:[4,6,22,57],two:[0,1,3,6,11,12,13,14,17,19,27,28,32,40,52,53,54,56,57,60,61,62,63,64,66,68,69,71,72,75,77,78,82,208,219,220],txt:[4,14,38,39,42,43,49,58,62,64,213,214],type:[0,3,4,6,10,11,12,13,14,15,19,20,24,25,28,29,30,39,41,49,52,53,54,55,59,60,61,64,71,73,76,77,81,82,132,146,182,192,204,207,208,212,214,218,219],type_hint:12,typeasblob:14,typecodec:14,typic:[0,3,6,11,13,23,25,27,28,30,32,45,62,63,64,66,69,71,74,76,77,80,82,213,216,218,219,220],typo:36,ubuntu:[40,49],udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17,25,30],udt_liter:12,udt_nam:22,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:45,ultim:[0,27],ultra:70,unabl:[4,39,52,56,219],unacknowledg:6,unaffect:22,unari:19,unauthorized_attempt:53,unavail:[0,6,11,59,72,74,77,79,220],unavailableexcept:216,unblock:74,unbootstrap:58,unbound:[6,22],uncaught:218,unchecked_tombstone_compact:[66,69],uncom:[6,74,77],uncommit:1,uncommon:[24,42],uncompress:[4,6,11,58,70,72,74],unconfirm:6,undecor:4,undelet:66,under:[0,6,22,33,34,44,50,61,74,77,220],underli:[6,18,57,66,77,220],underlin:23,undersold:24,understand:[1,6,23,42,45,50,72,76,77,218,220],understood:23,undropp:57,unencrypt:[6,49,77],uneven:0,unexpect:[4,57,203,204,205,206,207,208,209,210,211,212,213,214,215],unexpectedli:22,unfinishedcommit:74,unflush:[62,185],unfortun:[44,72],unifi:1,uniform:81,uniq:218,uniqu:[0,3,11,14,22,23,24,25,26,30,55,57,81,208],unit:[22,27,39,41,54,60,69,146,170,207,212,219],unix:[61,217],unixtimestampof:[10,14],unknown:[55,209],unknowncfexcept:57,unless:[6,11,13,16,18,20,22,34,49,57,59,65,77,78,208,212,220],unlik:[0,6,10,13,22,32],unlimit:[6,45,64,82,207],unlock:26,unlog:[9,74,81],unmodifi:72,unnecessari:[39,58,79],unnecessarili:[57,65],unpack:49,unpredict:13,unprepar:74,unprocess:57,unprotect:57,unquot:12,unquoted_identifi:12,unquoted_nam:11,unreach:[0,76],unrecogn:54,unrecov:[57,60],unrecover:1,unrel:[42,216],unrepair:[6,59,68,73,74,76,83,203],unrespons:[11,59],unsafe_aggressive_sstable_expir:[66,69],unsecur:77,unselected_column:18,unset:[6,10,13,17,58,72,210],unsign:22,unspecifi:6,unsubscrib:[8,52],unsuccess:61,unsupport:50,unsupportedclassversionerror:56,unsupportedoperationexcept:54,until:[0,4,6,11,18,22,54,57,59,63,65,66,67,70,72,75,77,78],unus:[6,57],unusu:39,unwrit:[6,57],upcom:24,updat:[0,1,3,6,9,10,11,12,14,15,17,18,20,22,24,27,32,36,39,41,42,44,49,52,53,60,61,64,66,70,72,74,75,77,81,82,218,219],update_paramet:13,update_stat:[12,13],updatewithlwt:81,upgrad:[4,6,11,59,60,66,146,199,213,214],upgrade_sst:192,upgradesst:[63,66,70,146],upload:[42,49,64],upload_bintrai:43,upon:[6,22,53,61,63,65,70],upper:[12,17,66,77],ups:71,upstream:42,uptim:[138,146],urgent:[6,43,57],url:[36,38,81],usag:[0,4,6,11,22,50,52,53,54,57,59,60,62,63,64,65,70,73,74,82,83,203],use:[0,4,6,9,10,11,12,13,14,16,17,18,20,22,23,24,25,26,27,28,32,33,34,36,39,40,42,43,44,46,49,50,51,52,53,54,56,58,59,60,61,62,63,64,65,66,67,71,72,74,75,76,77,78,79,81,82,86,116,129,146,148,159,168,196,199,204,207,208,210,211,212,214,216,217,218,219,220],use_k:61,use_keyspac:53,use_stat:12,usec:220,usecas:66,useconcmarksweepgc:40,usecondcardmark:40,used:[0,1,3,4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,26,28,30,32,35,39,40,42,43,44,45,53,54,56,57,58,59,60,61,62,64,66,70,71,72,74,75,77,78,79,81,82,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,205,206,207,208,209,210,212,213,214,216,219,220],useecassandra:77,useful:[0,4,6,11,14,24,28,30,35,42,54,59,60,62,64,66,70,72,74,76,79,82,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,206,208,218,219,220],useparnewgc:40,user1:13,user2:13,user3:13,user4:13,user:[0,3,5,6,8,9,10,11,12,13,15,16,17,18,24,25,26,27,33,39,41,42,43,45,49,54,55,56,58,59,60,61,62,63,64,66,67,69,70,71,76,77,82,83,89,105,146,205,213,218,220],user_count:13,user_defined_typ:22,user_funct:20,user_nam:13,user_occup:13,user_opt:20,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,53,54,60,62,64,74,77,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207],usertyp:64,uses:[0,4,6,11,12,13,14,16,20,21,24,25,36,44,45,53,54,56,60,75,77,81,215,219,220],usethreadprior:40,using:[1,3,4,6,10,11,12,13,14,18,20,22,23,25,27,28,29,30,32,33,40,41,42,44,48,49,50,51,52,53,54,56,57,58,59,60,61,62,63,64,67,69,70,71,72,73,74,75,77,79,82,89,151,168,185,204,206,208,209,210,211,214,216,217,218,219,220],using_byt:60,using_reserve_byt:60,usr:[56,82,220],usual:[0,6,13,22,27,38,42,44,50,63,77,161,211,216,218],utc:[17,82],utd:11,utf8:[22,82],utf8typ:[9,208],utf:[57,82],util:[4,14,39,49,54,57,66,82,218,220],uuid:[9,10,11,12,17,22,25,29,53],val0:[11,54,62,64,76],val1:[11,54,62,64,76],val2:[62,64,76],val:[14,81],valid:[0,6,10,11,12,13,14,17,22,24,25,43,45,56,57,59,66,70,74,76,77,82,83,161,168,192,203,215],validationexecutor:[60,74,219],validationtim:74,valu:[0,1,4,6,9,10,11,12,13,14,16,17,19,22,25,28,32,39,40,43,45,50,53,54,57,58,59,60,61,62,63,64,66,70,72,74,75,76,77,78,80,81,82,83,105,133,137,146,169,173,175,176,177,178,180,181,182,183,203,204,215,216,218,220],valuabl:218,value1:13,value2:13,value_in_kb_per_sec:[169,177],value_in_m:180,value_in_mb:[173,178,181],valueof:14,varchar:[9,11,14,17,22],vari:[11,28,32,70],variabl:[6,10,12,17,22,28,33,40,43,48,56,64,210],varianc:[25,218],variant:[0,12,32],variat:32,varieti:65,varint:[9,11,14,17,19,22],variou:[6,11,23,24,25,26,33,40,44,71,75,77,81,203,217,218],vector:[0,77],vendor:56,verb:57,verbatim:57,verbos:[64,207,211,214,215],veri:[0,6,11,13,27,32,36,42,44,45,63,66,67,70,71,210,215,216,218,219,220],verif:[83,203],verifi:[11,42,45,47,49,64,70,76,137,146,192,203,204,205,206,207,208,209,210,211,212,213,214,215],versa:214,version:[1,2,5,6,9,11,14,15,22,35,40,42,47,49,54,56,57,58,59,60,64,66,72,74,79,83,88,93,103,113,146,199,200,203,211,214,218],versu:27,vertic:[56,82],via:[0,4,6,8,10,18,20,24,27,35,39,40,45,46,54,57,60,61,62,64,69,70,71,72,74,76,77,78,208,210,220],vice:214,view:[0,3,6,10,11,12,15,20,24,25,26,27,30,31,52,55,59,60,74,82,123,146,176,202,210,218,219,220],view_build:192,view_nam:[18,54],viewbuildexecutor:[60,74,219],viewbuildstatu:146,viewlockacquiretim:74,viewmutationstag:[60,74,219],viewpendingmut:74,viewreadtim:74,viewreplicasattempt:74,viewreplicassuccess:74,viewwrit:74,viewwritelat:74,vint:57,violat:[0,27,54],virtual:[0,3,6,45,52,55,69,74,79],virtualenv:33,visibl:[0,11,20,34,63,72,75],visit:[23,49,81],visual:[0,24,25,36,218],vnode:[6,50,59,70],volum:[3,4,6,65,70,215,219,220],vote:41,vpc:78,vulner:[6,43,77],w_await:220,wai:[0,4,6,12,15,17,18,22,24,27,28,33,37,38,40,44,45,53,61,66,69,70,72,161,208,209,210,211,218,220],wait:[0,4,6,11,42,45,53,54,56,57,58,59,61,62,72,74,75,146,163,218,219,220],waitingoncommit:74,waitingonfreememtablespac:74,waitingonsegmentalloc:74,walk:[0,25],want:[0,4,6,11,13,23,26,27,28,30,33,40,42,43,44,45,49,61,72,76,77,79,81,206,207,210,218,220],warmup:[81,146,171],warn:[6,11,34,44,53,54,73,76,161,215,218],warrant:219,washington:22,wasn:10,wast:6,watch:[44,220],weaker:0,web:[1,36],websit:[24,44,220],wed:56,week:[22,76,210],weibul:81,weigh:[11,54],weight:[53,54,57,61,74,109],weightedqueuetest:54,welcom:8,well:[0,6,11,13,14,17,22,24,26,27,28,30,39,40,49,50,53,54,57,59,61,62,65,70,71,77,78,146,164,213,218,220],went:74,were:[0,3,6,9,10,20,24,30,39,40,43,54,64,66,74,75,211,214,218,219],west:43,what:[0,2,11,13,22,23,24,31,36,37,41,44,46,52,55,57,58,66,69,71,75,77,81,82,208,216,217,218,219,220],whatev:[10,13,45],whedon:13,wheel:213,when:[0,1,4,6,9,10,11,12,13,14,15,16,17,20,22,24,26,27,28,33,34,36,39,42,43,44,46,49,50,52,53,54,55,56,59,61,62,63,64,65,67,68,69,70,71,72,73,74,75,76,77,78,79,81,82,84,86,87,89,92,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,208,209,214,216,218,219,220],whenev:[209,220],where:[0,3,4,6,9,10,11,12,14,16,17,18,19,20,22,24,26,28,39,44,46,50,53,54,57,59,60,61,63,64,67,69,70,72,77,79,81,82,109,161,216,218,220],where_claus:13,wherea:[22,77,219],whether:[0,6,9,11,13,27,28,40,53,54,57,62,66,75,78,82,109],which:[0,1,3,4,5,6,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,32,36,42,43,44,45,46,49,50,51,53,54,56,57,58,59,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,79,81,89,124,128,137,143,146,151,161,214,216,217,218,219,220],whichev:[0,6],whilst:6,whitelist:77,whitespac:41,who:[20,33,42,45,59,72],whole:[0,6,13,14,22,58,66,76],whose:[11,22,27,192],why:[0,24,39,42,50,52,205,216,218,220],wide2:60,wide:[0,3,4,23,24,28,49,65,219],wider:59,width:12,wiki:[6,36,40],wildcard:[13,20,54,212],wildli:11,win:[0,6],window:[0,4,6,66,72,74,77,127,135,146,180,217],winner:45,wip:42,wipe:[45,79],wire:[6,45,54,57],wirefram:26,wireshark:220,wise:11,wish:[6,24,43,69,74,218],withbuffersizeinmb:64,within:[0,3,4,6,11,12,13,16,24,32,40,42,43,45,54,58,62,68,69,71,74,77],withing:6,without:[0,1,6,11,12,13,14,20,22,38,40,42,43,44,45,50,54,56,57,58,59,64,65,69,71,72,74,77,82,83,84,137,146,153,203,204],withpartition:64,withtyp:64,wmb:220,wmem_max:6,wnen:30,won:[4,6,13,36,38,57,75,76,220],wont:[61,67],word:[10,11,12,18,20,22,45,65,77],work:[0,4,6,10,11,14,15,17,23,24,25,27,30,33,34,37,38,40,41,43,44,45,50,52,59,66,67,69,71,74,76,77,78,79,82,207,220],workaround:[207,211],worker:[57,82],workflow:[24,26],workload:[0,6,37,39,50,54,63,66,67,69,71,81,219,220],workspac:40,worktre:40,world:[23,27],worri:[26,42,45],wors:[6,78],worst:[6,28,42],worth:[6,27,57,61],worthwhil:[6,24],would:[0,6,12,13,14,17,20,23,24,25,26,27,28,32,36,40,42,44,50,52,53,54,57,58,59,60,64,66,69,70,71,72,75,76,77,78,208,210,214,218,220],wouldn:11,wrap:[24,57,78],wrapper:54,writabl:64,write:[0,2,3,4,6,10,11,13,22,24,27,32,34,36,37,39,44,45,50,54,55,60,64,65,66,67,68,69,70,71,74,75,76,77,78,79,81,82,104,132,146,182,195,208,211,214,216,218,219,220],write_lat:195,write_request_timeout:45,write_request_timeout_in_m:72,writefailedideacl:74,writelat:[74,216],writer:[4,6,34,64],writetim:[9,14],writetimeoutexcept:[6,216],written:[0,1,3,4,6,11,21,26,27,33,45,53,54,57,59,61,63,64,66,69,70,72,74,75,76],wrong:[6,27,43,219],wrqm:220,wrst:220,wrte:74,www:[6,49,220],x86:80,xandra:47,xarg:[210,218],xdm:220,xferd:49,xlarge_daili:61,xml:[35,40,43,44,46,56,64,218],xmn220m:40,xms1024m:40,xmx1024m:40,xmx:71,xss256k:40,xzvf:49,yaml:[0,3,4,6,14,18,20,46,50,53,54,57,58,59,60,62,64,72,74,75,77,78,79,81,90,105,109,146,164,195,197,207,208,216],year:[13,22,27,28,56],yes:[9,11],yet:[0,6,11,33,37,62,65,74,214],ygc:220,ygct:220,yield:[13,27,28,61,79,220],ymmv:218,you:[0,4,5,6,8,10,11,12,13,14,16,17,18,20,21,22,23,24,25,26,27,28,29,30,33,34,35,36,37,38,40,41,43,44,45,46,47,48,49,50,51,52,54,59,61,62,64,65,66,67,68,72,74,75,76,77,78,79,80,81,82,84,146,185,204,206,207,208,210,211,212,214,215,216,217,218,219,220],young:220,younger:14,your:[0,5,6,8,10,11,12,24,25,26,27,28,29,30,34,36,37,40,41,42,44,45,46,49,50,52,56,59,66,67,71,76,77,78,81,82,207,212,215,217,218,219,220],yourself:[37,38,44,72],yum:[43,49,56],yyyi:[17,22,61],z_0:[11,16,18],zero:[3,6,10,11,45,55,74,78,218],zerocopi:58,zgrep:218,zip:[22,61],zipcod:22,zone:[0,6,22,78],zoomabl:220,zstd:4,zstdcompressor:[11,70]},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Arithmetic Operators","Security","Triggers","Data Types","Conceptual Data Modeling","Logical Data Modeling","Physical Data Modeling","Defining Application Queries","RDBMS Design","Evaluating and Refining Data Models","Defining Database Schema","Cassandra Data Modeling Tools","Data Modeling","Introduction","Jenkins CI Environment","Code Style","Dependency Management","Working on Documentation","Getting Started","How-to Commit","Review Checklist","Building and IDE Integration","Contributing to Cassandra","Contributing Code Changes","Release Process","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Production Recommendations","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Audit Logging","Full Query Logging","New Features in Apache Cassandra 4.0","Support for Java 11","Improved Internode Messaging","Improved Streaming","Transient Replication","Virtual Tables","Audit Logging","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Leveled Compaction Strategy","Leveled Compaction Strategy","Time Window CompactionStrategy","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","Third-Party Plugins","Cassandra Stress","cqlsh: the CQL shell","Cassandra Tools","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","SSTable Tools","sstabledump","sstableexpiredblockers","sstablelevelreset","sstableloader","sstablemetadata","sstableofflinerelevel","sstablerepairedset","sstablescrub","sstablesplit","sstableupgrade","sstableutil","sstableverify","Find The Misbehaving Nodes","Troubleshooting","Cassandra Logs","Use Nodetool","Diving Deep, Use External Tools"],titleterms:{"break":28,"class":78,"final":214,"function":[13,14,17],"import":[34,64,137],"long":44,"new":[45,55],"switch":66,"transient":[0,59],Added:57,Adding:79,Doing:209,IDE:40,IDEs:34,LCS:67,QoS:57,TLS:77,The:[13,15,17,60,66,216],USE:11,Use:[70,207,219,220],Uses:70,Using:[32,40,53,56,64,210],Will:45,With:77,about:33,abov:208,accept:64,access:77,adcanc:61,add:[35,45],address:45,advanc:[70,220],after:79,aggreg:14,ahead:50,alias:13,all:[20,45,62,208,214],alloc:79,allocate_tokens_for_keyspac:6,allocate_tokens_for_local_replication_factor:6,allow:[13,72],alreadi:206,alter:[11,18,20,22],analysi:32,ani:45,announc:43,answer:37,anti:24,apach:[33,40,52,55],api:64,appendic:9,appendix:9,applic:[26,57,72],architectur:2,archiv:53,arithmet:19,artifact:43,ask:45,assassin:84,assign:79,assur:57,attempt:212,audit:[53,61],audit_logging_opt:6,auditlog:61,auth:77,authent:[6,20,77],author:[6,77],auto_snapshot:6,automat:20,automatic_sstable_upgrad:6,avail:[1,58],avg:14,back_pressure_en:6,back_pressure_strategi:6,background:75,backup:[62,64],base:[36,58],basic:[211,215,220],batch:[1,13,45,74],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,bcc:220,befor:42,behavior:75,below:58,benefit:[58,70],best:76,between:[27,59],binari:49,binauditlogg:61,bintrai:43,blob:[14,45],block:[75,205],bloom:63,boilerpl:34,bootstrap:[45,67,79,85],branch:42,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:74,bug:[5,37,42],build:[40,56],bulk:[45,64],cach:[60,74,77,220],calcul:28,call:[43,45],can:45,cap:1,capi:80,captur:[61,65,82,220],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,27,30,33,36,40,41,43,44,45,46,49,52,55,61,65,73,75,77,80,81,83,213,218],cast:14,categori:53,cdc:65,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,certif:77,chang:[10,42,45,46,63,65,69],characterist:22,cheap:59,check:211,checklist:39,choic:71,choos:[42,49],circleci:44,claus:13,clean:214,cleanup:[79,86],clear:[62,82],clearsnapshot:87,client:[47,51,60,74,77,216],client_encryption_opt:6,clientstat:88,clojur:47,close:57,cloud:71,cluster:[0,45,207,219],cluster_nam:6,code:[4,34,42],collect:[22,66,220],column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[40,61,66,82,210],comment:12,commit:38,commit_failure_polici:6,commitlog:[4,74],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_group_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:65,committ:36,commod:0,common:[11,56,66,71,218],compact:[9,50,66,67,68,74,89,219],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:90,compactionstat:91,compactionstrategi:69,compar:[32,54],compat:82,compon:58,compress:[50,70],conceptu:23,concern:69,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_build:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_valid:6,concurrent_writ:6,condition:20,config:207,configur:[6,7,46,50,53,54,58,61,62,65,70,72,75],conflict:35,connect:[20,45,57],consist:[0,1,75,82],constant:12,contact:8,content:[43,61],contribut:[37,41,42],control:20,convent:[12,34],convers:14,coordin:219,copi:[58,82],corrupt:[57,211,215],corrupted_tombstone_strategi:6,count:14,counter:[13,22,211],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:[71,220],cql:[9,15,54,74,82],cqlsh:[51,82],cqlshrc:82,cqlsstablewrit:64,creat:[11,14,16,18,20,21,22,37,42,43,50,62],credenti:20,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:44,current:[14,213],custom:22,cycl:54,cython:82,dart:47,data:[0,11,13,17,20,22,23,24,25,28,30,31,32,45,62,64,65,66,79],data_file_directori:6,databas:[20,29],datacent:20,dataset:0,date:[14,22,211],datetim:[14,19],dead:79,deal:211,debian:49,debug:[40,218],decis:27,decommiss:[58,92],deep:220,defin:[14,22,26,29],definit:[11,12,60],defragment:68,delet:[13,43,45,66],deliveri:72,demo:[53,64],denorm:27,depend:[35,82],deploy:58,describ:[60,82,94],describeclust:93,deseri:57,design:[27,32],detail:[66,207],detect:0,develop:43,diagnost:[53,75],diagnostic_events_en:6,dies:45,differ:[27,60,62],directori:[46,53,54,62,66],disabl:[61,65],disableauditlog:95,disableautocompact:96,disablebackup:97,disablebinari:98,disablefullquerylog:99,disablegossip:100,disablehandoff:101,disablehintsfordc:102,disableoldprotocolvers:103,disallow:58,disk:[28,45,71,72],disk_failure_polici:6,disk_optimization_strategi:6,displai:204,distribut:[0,43],dive:220,document:[36,37,52],doe:[45,53,61],down:72,drain:104,driven:32,driver:[47,51],drop:[9,11,14,16,18,20,21,22,45,58],droppedmessag:74,dry:209,dtest:[37,44],dump:204,durabl:1,durat:22,dynam:78,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:[45,208],each_quorum:59,eclips:40,effici:57,elig:58,elixir:47,email:45,enabl:[53,54,58,59,61,65,77],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_transient_repl:6,enable_user_defined_funct:6,enableauditlog:105,enableautocompact:106,enablebackup:107,enablebinari:108,enablefullquerylog:109,enablegossip:110,enablehandoff:111,enablehintsfordc:112,enableoldprotocolvers:113,encod:17,encrypt:[50,77],endpoint_snitch:6,engin:4,ensur:50,entir:204,entri:45,environ:[33,46],erlang:47,error:[45,57,216],evalu:28,even:45,event:[53,75],eventu:1,exampl:[4,32,62,75,76],except:34,exclud:204,exist:45,exit:82,expand:82,expect:75,experiment:6,expir:66,expiri:57,explan:208,extend:215,extern:[64,220],factor:45,fail:[45,79],failur:[0,45,57],failuredetector:114,faq:81,faster:72,featur:[3,6,55],file:[6,34,35,61,207,212,215,218],file_cache_size_in_mb:6,fileauditlogg:61,filedescriptorratio:74,filter:[13,61,63],find:[62,216],first:27,fix:[37,42],flamegraph:220,flexibl:53,flow:36,flush:115,format:[34,204],found:[206,209],fql:54,frame:57,freez:42,frequenc:53,frequent:45,from:[40,43,45,57,60,62,64,82,207],fromjson:17,full:[54,59,76,218],full_query_logging_opt:6,fulli:66,further:[49,65],garbag:[66,220],garbagecollect:116,garbagecollector:74,gc_grace_second:66,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:117,gener:[34,64],get:[37,48,207,218],getbatchlogreplaythrottl:118,getcompactionthreshold:119,getcompactionthroughput:120,getconcurr:121,getconcurrentcompactor:122,getconcurrentviewbuild:123,getendpoint:124,getinterdcstreamthroughput:125,getlogginglevel:126,getmaxhintwindow:127,getreplica:128,getse:129,getsstabl:130,getstreamthroughput:131,gettimeout:132,gettraceprob:133,github:36,give:45,goal:32,gossip:0,gossipinfo:134,gpg:43,grace:[66,208],grant:20,graph:81,group:13,guarante:1,handl:34,handoff:72,handoffwindow:135,hang:79,happen:45,hardwar:[0,71],has:206,hash:0,haskel:47,heap:45,help:[82,136],hide:207,high:[1,58,220],hint:[57,72],hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:74,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,hintsservic:74,host:[45,82],hot:77,hotel:[24,25],how:[36,38,45,60,61],htop:220,idea:40,ideal_consistency_level:6,identifi:12,impact:70,improv:[57,58,75],inbound:[57,60],includ:214,increment:[0,62,64,76],incremental_backup:6,index:[1,16,74,80],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:[49,138],inform:[218,220],initi:37,initial_token:6,insert:[13,17,51],instal:49,integr:[27,40,77],intellij:40,inter:77,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,interfac:0,intern:[20,77,204],internod:[57,60],internode_application_receive_queue_capacity_in_byt:6,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:6,internode_application_receive_queue_reserve_global_capacity_in_byt:6,internode_application_send_queue_capacity_in_byt:6,internode_application_send_queue_reserve_endpoint_capacity_in_byt:6,internode_application_send_queue_reserve_global_capacity_in_byt:6,internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,introduct:32,invalidatecountercach:139,invalidatekeycach:140,invalidaterowcach:141,investig:[37,216],iostat:220,issu:56,java:[45,47,56,64],jconsol:45,jenkin:33,jira:[36,43],jmx:[45,66,74,77],job:33,join:[27,45,142],json:17,jstack:220,jstat:220,jvm:[74,220],keep:213,kei:[16,18,43,204],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,45,50,53,58,60,62,64,74,209],keyword:[9,12],lang:45,languag:15,larg:[28,45],latenc:[216,219,220],level:[0,67,68,75,206,220],librari:35,lightweight:[1,81],limit:[13,18,57,60,61],line:[40,82],lineariz:1,list:[8,20,22,37,45,60,62,214],listen:45,listen_address:[6,45],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:143,liter:22,live:45,load:[45,64,207],local:[36,57,219],locat:46,log:[45,46,53,54,61,66,214,216,218],logger:[53,54,218],logic:24,login:82,longer:72,lot:[45,210],lucen:80,made:45,mail:8,main:46,major:[67,68],make:72,manag:[35,204],mani:210,manifest:211,manipul:13,manual:79,map:[16,22,45],master:0,materi:[18,32],matrix:56,max:[14,45],max_concurrent_automatic_sstable_upgrad:6,max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:45,membership:0,memori:[45,71,74],memorypool:74,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:66,messag:[45,57,60],metadata:[208,210],method:[45,49],metric:[57,74,216],min:14,minor:66,mintimeuuid:14,misbehav:216,mode:81,model:[0,23,24,25,28,30,31,32],monitor:[72,74,79],monoton:75,more:[45,66,204,207,218],move:[79,144],movement:79,multi:[0,58],multilin:34,multipl:[0,62,212],name:64,nativ:[14,22],native_transport_allow_older_protocol:6,native_transport_flush_in_batches_legaci:6,native_transport_frame_block_size_in_kb:6,native_transport_idle_timeout_in_m:6,native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:47,netbean:40,netstat:145,netti:58,network:220,network_author:6,networktopologystrategi:[0,11,50],newer:40,next:[43,216],nexu:43,nio:57,node:[0,45,58,72,77,79,216],nodej:47,nodetool:[45,53,61,64,66,72,146,219],none:75,note:36,noteworthi:22,now:14,num_token:6,number:[19,58],object:[54,59,60],old:[43,213],one:[45,210],onli:[45,204,214],open:[40,57],oper:[3,19,43,45,58,69,70,73],optim:[27,57],option:[18,53,54,59,61,64,66,67,68,69,76,82],order:13,otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[45,53,54,60,76],out:[0,72],outbound:57,outofmemoryerror:45,output:[61,204,205,207],overflow:211,overhead:54,overload:57,overview:[3,65],own:33,packag:[43,49],packet:220,page:[82,220],parallel:58,paramet:[13,65,66],parti:80,partit:[0,28,32],partition:6,password:77,patch:[37,42],path:57,pattern:24,pausehandoff:147,paxo:57,pend:59,per:0,perform:[43,44,54],periodic_commitlog_sync_lag_block_in_m:6,perl:47,permiss:20,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:47,physic:[0,25],pick:0,plai:72,plugin:[33,80],point:45,pom:35,pool:60,port:45,post:43,practic:76,prepar:[12,57],prepared_statements_cache_size_mb:6,prerequisit:[43,49],prevent:57,preview:58,primari:18,print:[208,210],process:43,product:50,profil:81,profileload:148,progress:[79,207],project:40,promot:43,properti:46,propos:57,protocol:57,proxyhistogram:149,publish:[36,43],python:47,pytz:82,qualiti:57,queri:[0,15,26,27,32,51,54,216,218,219],question:[37,45],queu:57,quorum:[59,75],rack:50,rang:[59,79],range_request_timeout_in_m:6,rangekeysampl:150,rate:216,raw:204,rdbm:27,read:[50,59,65,75],read_request_timeout_in_m:6,rebuild:151,rebuild_index:152,recommend:50,reconnect:57,record:0,recov:57,reduc:206,referenti:27,refin:28,refresh:153,refreshsizeestim:154,refus:45,regular:60,relat:32,releas:43,relevel:209,reliabl:220,reload:[61,77],reloadlocalschema:155,reloadse:156,reloadssl:157,reloadtrigg:158,relocatesst:159,remot:45,remov:[66,79],removenod:160,repair:[58,59,66,75,76,161,210],repair_admin:162,repair_session_space_in_mb:6,repaired_data_tracking_for_partition_reads_en:6,repaired_data_tracking_for_range_reads_en:6,replac:79,replai:54,replaybatchlog:163,replic:[0,45,59],replica:[0,58,59],report:[5,37,45,74],report_unconfirmed_repaired_data_mismatch:6,repositori:43,request:[57,72,74],request_timeout_in_m:6,requir:[33,35],reserv:[9,24,25],resetfullquerylog:164,resetlocalschema:165,resili:57,resolut:35,resourc:[57,220],restor:62,restrict:20,result:13,resum:79,resumehandoff:166,retriev:14,review:[37,39],revok:20,rewrit:213,rhel:45,right:42,ring:[0,45,167],role:[20,77],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,roll:[53,54],row:204,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rowcach:80,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpm:49,rubi:47,run:[44,54,209],runtim:[46,72],rust:47,safeti:6,sai:45,same:[45,62],sampl:61,saved_caches_directori:6,scala:47,scalabl:1,scalar:14,scale:0,schema:[29,32],script:210,scrub:[168,211],second:208,secondari:[1,16],secur:[20,77],see:45,seed:[33,45],seed_provid:6,select:[13,17,18],selector:13,send:43,serial:[57,82],server:33,server_encryption_opt:6,session:82,set:[20,22,33,40,45,53,54,60,62,210],setbatchlogreplaythrottl:169,setcachecapac:170,setcachekeystosav:171,setcompactionthreshold:172,setcompactionthroughput:173,setconcurr:174,setconcurrentcompactor:175,setconcurrentviewbuild:176,sethintedhandoffthrottlekb:177,setinterdcstreamthroughput:178,setlogginglevel:179,setmaxhintwindow:180,setstreamthroughput:181,settimeout:182,settraceprob:183,setup:[33,40],share:82,shell:82,show:[45,82,210],sign:43,signatur:14,simpl:0,simplestrategi:[0,11],singl:[45,62,66,204],size:[28,57,212],sjk:184,skip:211,slack:[8,43],slow_query_log_timeout_in_m:6,small:212,snapshot:[62,64,185,207,212,213],snapshot_before_compact:6,snitch:[50,78],sort:27,sourc:[40,82],special:82,specif:20,specifi:[208,212],specul:59,speed:[45,207],sphinx:36,split:212,ssl:[77,207],ssl_storage_port:6,sstabl:[4,58,60,64,66,67,74,203,205,206,207,210,214],sstable_preemptive_open_interval_in_mb:6,sstabledump:204,sstableexpiredblock:205,sstablelevelreset:206,sstableload:[64,207],sstablemetadata:208,sstableofflinerelevel:209,sstablerepairedset:210,sstablescrub:211,sstablesplit:212,sstableupgrad:213,sstableutil:214,sstableverifi:215,stage:57,stai:45,standard:77,start:[37,40,42,48],start_native_transport:6,starv:67,state:[219,220],statement:[12,18,34,54],statu:[186,210,219],statusautocompact:187,statusbackup:188,statusbinari:189,statusgossip:190,statushandoff:191,stc:[67,68],step:[35,216],stop:192,stopdaemon:193,storag:[4,9,27,72,74],storage_port:6,store:[0,45],strategi:[0,66,67,68],stratio:80,stream:[45,58,74,79],stream_entire_sst:6,stream_throughput_outbound_megabits_per_sec:6,streaming_connections_per_host:6,streaming_keep_alive_period_in_sec:6,stress:[44,81],structur:[62,204],style:34,submit:37,sum:14,support:[17,56,59,81],sync:43,synchron:0,system:218,system_virtual_schema:60,tabl:[11,57,60,62,65,74,75,204,206,209,211,213],tablehistogram:194,tablestat:195,take:62,tarbal:49,target:64,task:60,temporari:214,term:12,test:[33,37,40,44],than:45,thei:45,third:80,though:45,thread:[60,220],threadpool:[74,219],threshold:6,throttl:207,throughput:220,time:[14,22,69,72],timeout:57,timestamp:[22,45,204],timeuuid:14,timewindowcompactionstrategi:69,todo:11,tojson:17,token:[0,14,50,79],tombston:66,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[30,44,64,83,203,220],top:[45,220],topic:43,toppartit:196,tpstat:197,trace:82,tracetype_query_ttl:6,tracetype_repair_ttl:6,transact:[1,81,214],transit:59,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[21,66],troubleshoot:[35,217],truncat:11,truncate_request_timeout_in_m:6,truncatehint:198,ttl:[13,66],tunabl:0,tupl:22,tweet:43,two:45,type:[9,17,22,35,58,62,66,74],udt:22,unabl:45,uniqu:58,unit:[37,40,44],unknown:57,unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:[66,210],unsubscrib:45,updat:[13,35,37,43,45],upgradesst:199,upload:43,usag:[45,76,81,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,205,206,207,208,209,210,211,212,213,214,215,220],use:45,user:[14,20,22,37,53,81],using:[0,36,45,66],uuid:14,valid:211,valu:208,variabl:46,verif:215,verifi:200,version:[0,4,10,43,82,201,213],view:[18,32,53,54,61],viewbuildstatu:202,virtual:[57,60],vmtouch:220,vnode:0,vote:43,wait:43,warn:65,websit:43,welcom:52,what:[1,32,42,45,53,61],when:[45,57,58,66],where:13,whitespac:34,why:[45,66],window:69,windows_timer_interv:6,without:[66,211,212],work:[22,36,42],write:[1,59,72],write_request_timeout_in_m:6,writetim:13,yaml:[61,65],you:42,your:[33,43],zero:58}}) \ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/cassandra_stress.html b/src/doc/4.0-alpha4/tools/cassandra_stress.html deleted file mode 100644 index 2459a46e2..000000000 --- a/src/doc/4.0-alpha4/tools/cassandra_stress.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Cassandra Stress" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Stress

-

cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model.

-

This documentation focuses on user mode as this allows the testing of your -actual schema.

-
-

Usage

-

There are several operation types:

-
-
    -
  • write-only, read-only, and mixed workloads of standard data
  • -
  • write-only and read-only workloads for counter columns
  • -
  • user configured workloads, running custom queries on custom schemas
  • -
-
-

The syntax is cassandra-stress <command> [options]. If you want more information on a given command -or options, just run cassandra-stress help <command|option>.

-
-
Commands:
-
-
read:
-
Multiple concurrent reads - the cluster must first be populated by a write test
-
write:
-
Multiple concurrent writes against the cluster
-
mixed:
-
Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test
-
counter_write:
-
Multiple concurrent updates of counters.
-
counter_read:
-
Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.
-
user:
-
Interleaving of user provided queries, with configurable ratio and distribution.
-
help:
-
Print help for a command or option
-
print:
-
Inspect the output of a distribution definition
-
legacy:
-
Legacy support mode
-
-
-
Primary Options:
-
-
-pop:
-
Population distribution and intra-partition visit order
-
-insert:
-
Insert specific options relating to various methods for batching and splitting partition updates
-
-col:
-
Column details such as size and count distribution, data generator, names, comparator and if super columns should be used
-
-rate:
-
Thread count, rate limit or automatic mode (default is auto)
-
-mode:
-
Thrift or CQL with options
-
-errors:
-
How to handle errors when encountered during stress
-
-sample:
-
Specify the number of samples to collect for measuring latency
-
-schema:
-
Replication settings, compression, compaction, etc.
-
-node:
-
Nodes to connect to
-
-log:
-
Where to log progress to, and the interval at which to do it
-
-transport:
-
Custom transport factories
-
-port:
-
The port to connect to cassandra nodes on
-
-sendto:
-
Specify a stress server to send this command to
-
-graph:
-
Graph recorded metrics
-
-tokenrange:
-
Token range settings
-
-
-
Suboptions:
-
Every command and primary option has its own collection of suboptions. These are too numerous to list here. -For information on the suboptions for each command or option, please use the help command, -cassandra-stress help <command|option>.
-
-
-
-

User mode

-

User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn’t scale.

-
-

Profile

-

User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname.

-

An identifier for the profile:

-
specname: staff_activities
-
-
-

The keyspace for the test:

-
keyspace: staff
-
-
-

CQL for the keyspace. Optional if the keyspace already exists:

-
keyspace_definition: |
- CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-
-
-

The table to be stressed:

-
table: staff_activities
-
-
-

CQL for the table. Optional if the table already exists:

-
table_definition: |
-  CREATE TABLE staff_activities (
-      name text,
-      when timeuuid,
-      what text,
-      PRIMARY KEY(name, when, what)
-  )
-
-
-

Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:

-
columnspec:
-  - name: name
-    size: uniform(5..10) # The names of the staff members are between 5-10 characters
-    population: uniform(1..10) # 10 possible staff members to pick from
-  - name: when
-    cluster: uniform(20..500) # Staff members do between 20 and 500 events
-  - name: what
-    size: normal(10..100,50)
-
-
-

Supported types are:

-

An exponential distribution over the range [min..max]:

-
EXP(min..max)
-
-
-

An extreme value (Weibull) distribution over the range [min..max]:

-
EXTREME(min..max,shape)
-
-
-

A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:

-
GAUSSIAN(min..max,stdvrng)
-
-
-

A gaussian/normal distribution, with explicitly defined mean and stdev:

-
GAUSSIAN(min..max,mean,stdev)
-
-
-

A uniform distribution over the range [min, max]:

-
UNIFORM(min..max)
-
-
-

A fixed distribution, always returning the same value:

-
FIXED(val)
-
-
-

If preceded by ~, the distribution is inverted

-

Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)

-

Insert distributions:

-
insert:
-  # How many partition to insert per batch
-  partitions: fixed(1)
-  # How many rows to update per partition
-  select: fixed(1)/500
-  # UNLOGGED or LOGGED batch for insert
-  batchtype: UNLOGGED
-
-
-

Currently all inserts are done inside batches.

-

Read statements to use during the test:

-
queries:
-   events:
-      cql: select *  from staff_activities where name = ?
-      fields: samerow
-   latest_event:
-      cql: select * from staff_activities where name = ?  LIMIT 1
-      fields: samerow
-
-
-

Running a user mode test:

-
cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once
-
-
-

This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test.

-

The full example can be found here yaml

-
-
Running a user mode test with multiple yaml files::
-
cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m “ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)” truncate=once
-
This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table
-
although care must be taken that the table definition is identical (data generation specs can be different).
-
-
-
-

Lightweight transaction support

-

cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s).

-

Lightweight transaction update query:

-
queries:
-  regularupdate:
-      cql: update blogposts set author = ? where domain = ? and published_date = ?
-      fields: samerow
-  updatewithlwt:
-      cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ?
-      fields: samerow
-
-
-

The full example can be found here yaml

-
-
-
-

Graphing

-

Graphs can be generated for each run of stress.

-../_images/example-stress-graph.png -

To create a new graph:

-
cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph"
-
-
-

To add a new run to an existing graph point to an existing file and add a revision name:

-
cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run"
-
-
-
-
-

FAQ

-

How do you use NetworkTopologyStrategy for the keyspace?

-

Use the schema option making sure to either escape the parenthesis or enclose in quotes:

-
cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)"
-
-
-

How do you use SSL?

-

Use the transport option:

-
cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra"
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/cqlsh.html b/src/doc/4.0-alpha4/tools/cqlsh.html deleted file mode 100644 index 5c194639e..000000000 --- a/src/doc/4.0-alpha4/tools/cqlsh.html +++ /dev/null @@ -1,488 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--python /path/to/python
-
Specify the full path to Python interpreter to override default on systems with multiple interpreters installed
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/index.html b/src/doc/4.0-alpha4/tools/index.html deleted file mode 100644 index f0cb2d940..000000000 --- a/src/doc/4.0-alpha4/tools/index.html +++ /dev/null @@ -1,258 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/assassinate.html b/src/doc/4.0-alpha4/tools/nodetool/assassinate.html deleted file mode 100644 index 7970cf926..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/assassinate.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/bootstrap.html b/src/doc/4.0-alpha4/tools/nodetool/bootstrap.html deleted file mode 100644 index c3aba387f..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/cleanup.html b/src/doc/4.0-alpha4/tools/nodetool/cleanup.html deleted file mode 100644 index e6e0bef48..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/cleanup.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/clearsnapshot.html b/src/doc/4.0-alpha4/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 5702c7c28..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/clientstats.html b/src/doc/4.0-alpha4/tools/nodetool/clientstats.html deleted file mode 100644 index e7516df3b..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/clientstats.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/compact.html b/src/doc/4.0-alpha4/tools/nodetool/compact.html deleted file mode 100644 index adbe9d72a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/compact.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/compactionhistory.html b/src/doc/4.0-alpha4/tools/nodetool/compactionhistory.html deleted file mode 100644 index 721efd9f4..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/compactionstats.html b/src/doc/4.0-alpha4/tools/nodetool/compactionstats.html deleted file mode 100644 index cc9283911..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/decommission.html b/src/doc/4.0-alpha4/tools/nodetool/decommission.html deleted file mode 100644 index 959fd87fc..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/decommission.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/describecluster.html b/src/doc/4.0-alpha4/tools/nodetool/describecluster.html deleted file mode 100644 index 28d391544..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/describecluster.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/describering.html b/src/doc/4.0-alpha4/tools/nodetool/describering.html deleted file mode 100644 index 3b2adde32..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/describering.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disableauditlog.html b/src/doc/4.0-alpha4/tools/nodetool/disableauditlog.html deleted file mode 100644 index d15cd25de..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disableautocompaction.html b/src/doc/4.0-alpha4/tools/nodetool/disableautocompaction.html deleted file mode 100644 index 3d93080ba..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablebackup.html b/src/doc/4.0-alpha4/tools/nodetool/disablebackup.html deleted file mode 100644 index 05976cf7d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablebinary.html b/src/doc/4.0-alpha4/tools/nodetool/disablebinary.html deleted file mode 100644 index 29d452581..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablefullquerylog.html b/src/doc/4.0-alpha4/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index f43159903..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablegossip.html b/src/doc/4.0-alpha4/tools/nodetool/disablegossip.html deleted file mode 100644 index 6a2520df9..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablehandoff.html b/src/doc/4.0-alpha4/tools/nodetool/disablehandoff.html deleted file mode 100644 index 8c04a3ec5..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disablehintsfordc.html b/src/doc/4.0-alpha4/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index 29966a33a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/disableoldprotocolversions.html b/src/doc/4.0-alpha4/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index 21022b9a5..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/drain.html b/src/doc/4.0-alpha4/tools/nodetool/drain.html deleted file mode 100644 index 5586f5647..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/drain.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enableauditlog.html b/src/doc/4.0-alpha4/tools/nodetool/enableauditlog.html deleted file mode 100644 index fc3863bd0..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enableautocompaction.html b/src/doc/4.0-alpha4/tools/nodetool/enableautocompaction.html deleted file mode 100644 index 8776707d6..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablebackup.html b/src/doc/4.0-alpha4/tools/nodetool/enablebackup.html deleted file mode 100644 index b2435c717..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablebinary.html b/src/doc/4.0-alpha4/tools/nodetool/enablebinary.html deleted file mode 100644 index 5d412e33c..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablefullquerylog.html b/src/doc/4.0-alpha4/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index 1fb35ca30..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablegossip.html b/src/doc/4.0-alpha4/tools/nodetool/enablegossip.html deleted file mode 100644 index 716c17f3c..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablehandoff.html b/src/doc/4.0-alpha4/tools/nodetool/enablehandoff.html deleted file mode 100644 index 683a7a5cb..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enablehintsfordc.html b/src/doc/4.0-alpha4/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 892492b6a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/enableoldprotocolversions.html b/src/doc/4.0-alpha4/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 6bd4a6a6d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/failuredetector.html b/src/doc/4.0-alpha4/tools/nodetool/failuredetector.html deleted file mode 100644 index fa35ffb60..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/flush.html b/src/doc/4.0-alpha4/tools/nodetool/flush.html deleted file mode 100644 index a5209732a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/flush.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/garbagecollect.html b/src/doc/4.0-alpha4/tools/nodetool/garbagecollect.html deleted file mode 100644 index d2bd50e57..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/gcstats.html b/src/doc/4.0-alpha4/tools/nodetool/gcstats.html deleted file mode 100644 index c1b765320..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/gcstats.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/4.0-alpha4/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index 658792fee..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getcompactionthreshold.html b/src/doc/4.0-alpha4/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 3086346b6..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getcompactionthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index c51589bf1..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getconcurrency.html b/src/doc/4.0-alpha4/tools/nodetool/getconcurrency.html deleted file mode 100644 index 766420f93..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getconcurrentcompactors.html b/src/doc/4.0-alpha4/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index 50d738bf5..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/4.0-alpha4/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 99e7e1075..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getendpoints.html b/src/doc/4.0-alpha4/tools/nodetool/getendpoints.html deleted file mode 100644 index 684819359..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 199e26074..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getlogginglevels.html b/src/doc/4.0-alpha4/tools/nodetool/getlogginglevels.html deleted file mode 100644 index ce85fb790..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getmaxhintwindow.html b/src/doc/4.0-alpha4/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index 63785c5e3..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getreplicas.html b/src/doc/4.0-alpha4/tools/nodetool/getreplicas.html deleted file mode 100644 index 7e0b9a961..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getseeds.html b/src/doc/4.0-alpha4/tools/nodetool/getseeds.html deleted file mode 100644 index ec121e459..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getsstables.html b/src/doc/4.0-alpha4/tools/nodetool/getsstables.html deleted file mode 100644 index 384e8bc75..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getsstables.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/getstreamthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index 9b43451d8..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/gettimeout.html b/src/doc/4.0-alpha4/tools/nodetool/gettimeout.html deleted file mode 100644 index ea023a857..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/gettraceprobability.html b/src/doc/4.0-alpha4/tools/nodetool/gettraceprobability.html deleted file mode 100644 index b505db4d4..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/gossipinfo.html b/src/doc/4.0-alpha4/tools/nodetool/gossipinfo.html deleted file mode 100644 index e15aad1d8..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/handoffwindow.html b/src/doc/4.0-alpha4/tools/nodetool/handoffwindow.html deleted file mode 100644 index 5bbea3e00..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/help.html b/src/doc/4.0-alpha4/tools/nodetool/help.html deleted file mode 100644 index 0f847477c..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/help.html +++ /dev/null @@ -1,112 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/import.html b/src/doc/4.0-alpha4/tools/nodetool/import.html deleted file mode 100644 index 881a0e2c7..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/import.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/info.html b/src/doc/4.0-alpha4/tools/nodetool/info.html deleted file mode 100644 index 308cd27ac..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/info.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/invalidatecountercache.html b/src/doc/4.0-alpha4/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index 90930dc57..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/invalidatekeycache.html b/src/doc/4.0-alpha4/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 62ca268c6..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/invalidaterowcache.html b/src/doc/4.0-alpha4/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index d083b01ad..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/join.html b/src/doc/4.0-alpha4/tools/nodetool/join.html deleted file mode 100644 index 40400ae20..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/join.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/listsnapshots.html b/src/doc/4.0-alpha4/tools/nodetool/listsnapshots.html deleted file mode 100644 index b776a9793..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size. True size is the total size of all SSTables which
-        are not backed up to disk. Size on disk is total size of the snapshot on
-        disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/move.html b/src/doc/4.0-alpha4/tools/nodetool/move.html deleted file mode 100644 index 2a248e917..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/move.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/netstats.html b/src/doc/4.0-alpha4/tools/nodetool/netstats.html deleted file mode 100644 index 64cf9d96a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/netstats.html +++ /dev/null @@ -1,130 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/nodetool.html b/src/doc/4.0-alpha4/tools/nodetool/nodetool.html deleted file mode 100644 index 7a9758876..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/nodetool.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-u <username> | –username <username>)]
-
[(-h <host> | –host <host>)] [(-p <port> | –port <port>)] -[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-pp | –print-port)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

sjk - Run commands of ‘Swiss Java Knife’. Run ‘nodetool sjk –help’ for more information.

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/pausehandoff.html b/src/doc/4.0-alpha4/tools/nodetool/pausehandoff.html deleted file mode 100644 index 97f84b6f5..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/profileload.html b/src/doc/4.0-alpha4/tools/nodetool/profileload.html deleted file mode 100644 index 487e4922d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/profileload.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/proxyhistograms.html b/src/doc/4.0-alpha4/tools/nodetool/proxyhistograms.html deleted file mode 100644 index 56ced57bc..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/rangekeysample.html b/src/doc/4.0-alpha4/tools/nodetool/rangekeysample.html deleted file mode 100644 index 840d37fe7..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/rebuild.html b/src/doc/4.0-alpha4/tools/nodetool/rebuild.html deleted file mode 100644 index eecab82e9..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/rebuild.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/rebuild_index.html b/src/doc/4.0-alpha4/tools/nodetool/rebuild_index.html deleted file mode 100644 index 08e25c2d2..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/refresh.html b/src/doc/4.0-alpha4/tools/nodetool/refresh.html deleted file mode 100644 index e585b9f31..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/refresh.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/refreshsizeestimates.html b/src/doc/4.0-alpha4/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 8f705918b..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/reloadlocalschema.html b/src/doc/4.0-alpha4/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index 16d673751..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/reloadseeds.html b/src/doc/4.0-alpha4/tools/nodetool/reloadseeds.html deleted file mode 100644 index a43911ec1..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/reloadssl.html b/src/doc/4.0-alpha4/tools/nodetool/reloadssl.html deleted file mode 100644 index 5446f6470..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/reloadtriggers.html b/src/doc/4.0-alpha4/tools/nodetool/reloadtriggers.html deleted file mode 100644 index 101372978..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/relocatesstables.html b/src/doc/4.0-alpha4/tools/nodetool/relocatesstables.html deleted file mode 100644 index b9f34dc5c..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/removenode.html b/src/doc/4.0-alpha4/tools/nodetool/removenode.html deleted file mode 100644 index aaaad5905..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/removenode.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/repair.html b/src/doc/4.0-alpha4/tools/nodetool/repair.html deleted file mode 100644 index babbc223c..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/repair.html +++ /dev/null @@ -1,199 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends (inclusive)
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-            (exclusive)
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/repair_admin.html b/src/doc/4.0-alpha4/tools/nodetool/repair_admin.html deleted file mode 100644 index 33aa4bbb9..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/replaybatchlog.html b/src/doc/4.0-alpha4/tools/nodetool/replaybatchlog.html deleted file mode 100644 index d8e5f4e8e..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/resetfullquerylog.html b/src/doc/4.0-alpha4/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index 7f71db1d3..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/resetlocalschema.html b/src/doc/4.0-alpha4/tools/nodetool/resetlocalschema.html deleted file mode 100644 index 161861f29..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/resumehandoff.html b/src/doc/4.0-alpha4/tools/nodetool/resumehandoff.html deleted file mode 100644 index 6df42ddfe..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/ring.html b/src/doc/4.0-alpha4/tools/nodetool/ring.html deleted file mode 100644 index 28b43e9b0..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/ring.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/scrub.html b/src/doc/4.0-alpha4/tools/nodetool/scrub.html deleted file mode 100644 index 7923543c8..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/scrub.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/4.0-alpha4/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index 660c6a316..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setcachecapacity.html b/src/doc/4.0-alpha4/tools/nodetool/setcachecapacity.html deleted file mode 100644 index 7914b67f2..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setcachekeystosave.html b/src/doc/4.0-alpha4/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index 7d310f7bb..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setcompactionthreshold.html b/src/doc/4.0-alpha4/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index 5a37756ce..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setcompactionthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 45ebac88d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setconcurrency.html b/src/doc/4.0-alpha4/tools/nodetool/setconcurrency.html deleted file mode 100644 index 71d524cb3..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setconcurrentcompactors.html b/src/doc/4.0-alpha4/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index cef60c81d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/4.0-alpha4/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 7c0a9e33b..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/4.0-alpha4/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index a3b429cf6..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index 2e5b9db07..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setlogginglevel.html b/src/doc/4.0-alpha4/tools/nodetool/setlogginglevel.html deleted file mode 100644 index be33f9683..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setmaxhintwindow.html b/src/doc/4.0-alpha4/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index 8d7d6a148..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/setstreamthroughput.html b/src/doc/4.0-alpha4/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index d1650d0ff..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/settimeout.html b/src/doc/4.0-alpha4/tools/nodetool/settimeout.html deleted file mode 100644 index 0099211b4..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/settimeout.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/settraceprobability.html b/src/doc/4.0-alpha4/tools/nodetool/settraceprobability.html deleted file mode 100644 index 57ead552b..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/sjk.html b/src/doc/4.0-alpha4/tools/nodetool/sjk.html deleted file mode 100644 index 26b0246bf..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/sjk.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/snapshot.html b/src/doc/4.0-alpha4/tools/nodetool/snapshot.html deleted file mode 100644 index 90638cae7..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/snapshot.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/status.html b/src/doc/4.0-alpha4/tools/nodetool/status.html deleted file mode 100644 index 4d14c70a0..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/status.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/statusautocompaction.html b/src/doc/4.0-alpha4/tools/nodetool/statusautocompaction.html deleted file mode 100644 index 790ebb814..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/statusbackup.html b/src/doc/4.0-alpha4/tools/nodetool/statusbackup.html deleted file mode 100644 index 21bd706c0..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/statusbinary.html b/src/doc/4.0-alpha4/tools/nodetool/statusbinary.html deleted file mode 100644 index ef07a7984..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/statusgossip.html b/src/doc/4.0-alpha4/tools/nodetool/statusgossip.html deleted file mode 100644 index 07d2ae034..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/statushandoff.html b/src/doc/4.0-alpha4/tools/nodetool/statushandoff.html deleted file mode 100644 index 3ebcc1e3d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/stop.html b/src/doc/4.0-alpha4/tools/nodetool/stop.html deleted file mode 100644 index ec56a595a..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/stop.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/stopdaemon.html b/src/doc/4.0-alpha4/tools/nodetool/stopdaemon.html deleted file mode 100644 index 75b226d63..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/tablehistograms.html b/src/doc/4.0-alpha4/tools/nodetool/tablehistograms.html deleted file mode 100644 index 7a7f90184..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/tablestats.html b/src/doc/4.0-alpha4/tools/nodetool/tablestats.html deleted file mode 100644 index 0b957afc2..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/tablestats.html +++ /dev/null @@ -1,169 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/toppartitions.html b/src/doc/4.0-alpha4/tools/nodetool/toppartitions.html deleted file mode 100644 index 1b65aead2..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/tpstats.html b/src/doc/4.0-alpha4/tools/nodetool/tpstats.html deleted file mode 100644 index 9cd88ed65..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/tpstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/truncatehints.html b/src/doc/4.0-alpha4/tools/nodetool/truncatehints.html deleted file mode 100644 index 12af5b5b4..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/upgradesstables.html b/src/doc/4.0-alpha4/tools/nodetool/upgradesstables.html deleted file mode 100644 index ada5dd4f4..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,145 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/verify.html b/src/doc/4.0-alpha4/tools/nodetool/verify.html deleted file mode 100644 index fa638c20d..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/verify.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/version.html b/src/doc/4.0-alpha4/tools/nodetool/version.html deleted file mode 100644 index f1b317fc0..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/version.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/nodetool/viewbuildstatus.html b/src/doc/4.0-alpha4/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index 90f53bf5e..000000000 --- a/src/doc/4.0-alpha4/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/index.html b/src/doc/4.0-alpha4/tools/sstable/index.html deleted file mode 100644 index c442f6edb..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/index.html +++ /dev/null @@ -1,229 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "SSTable Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

SSTable Tools

-

This section describes the functionality of the various sstable tools.

-

Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstabledump.html b/src/doc/4.0-alpha4/tools/sstable/sstabledump.html deleted file mode 100644 index 6e9d8e340..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstabledump.html +++ /dev/null @@ -1,404 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstabledump" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstabledump

-

Dump contents of a given SSTable to standard output in JSON format.

-

You must supply exactly one sstable.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstabledump <options> <sstable file path>

- ---- - - - - - - - - - - - - - - - - - - - - -
-dCQL row per line internal representation
-eEnumerate partition keys only
-k <arg>Partition key
-x <arg>Excluded partition key(s)
-tPrint raw timestamps instead of iso8601 date strings
-lOutput each row as a separate JSON object
-

If necessary, use sstableutil first to find out the sstables used by a table.

-
-
-

Dump entire table

-

Dump the entire table without any options.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26
-
-cat eventlog_dump_2018Jul26
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-]
-
-
-
-
-

Dump table in a more manageable format

-

Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines
-
-cat eventlog_dump_2018Jul26_justlines
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Dump only keys

-

Dump only the keys by using the -e option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys
-
-cat eventlog_dump_2018Jul26b
-[ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ]
-
-
-
-
-

Dump row for a single key

-

Dump a single key using the -k option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey
-
-cat eventlog_dump_2018Jul26_singlekey
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Exclude a key or keys in dump of rows

-

Dump a table except for the rows excluded with the -x option. Multiple keys can be used.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e  > eventlog_dump_2018Jul26_excludekeys
-
-cat eventlog_dump_2018Jul26_excludekeys
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display raw timestamps

-

By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times
-
-cat eventlog_dump_2018Jul26_times
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "1532118147028809" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display internal structure in output

-

Dump the table in a format that reflects the internal structure.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d
-
-cat eventlog_dump_2018Jul26_d
-[3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]:  | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711]
-[d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]:  | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522]
-[cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]:  | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809]
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableexpiredblockers.html b/src/doc/4.0-alpha4/tools/sstable/sstableexpiredblockers.html deleted file mode 100644 index 36d2e1910..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableexpiredblockers.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableexpiredblockers" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableexpiredblockers

-

During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable.

-

This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-10015

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableexpiredblockers <keyspace> <table>

-
-
-

Output blocked sstables

-

If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing.

-

Otherwise, the script will return <sstable> blocks <#> expired sstables from getting dropped followed by a list of the blocked sstables.

-

Example:

-
sstableexpiredblockers keyspace1 standard1
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstablelevelreset.html b/src/doc/4.0-alpha4/tools/sstable/sstablelevelreset.html deleted file mode 100644 index 3b5e5a577..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstablelevelreset.html +++ /dev/null @@ -1,175 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablelevelreset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablelevelreset

-

If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration.

-

See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5271

-
-

Usage

-

sstablelevelreset –really-reset <keyspace> <table>

-

The really-reset flag is required, to ensure this intrusive command is not run accidentally.

-
-
-

Table not found

-

If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error.

-

Example:

-
ColumnFamily not found: keyspace/evenlog.
-
-
-
-
-

Table has no sstables

-

Example:

-
Found no sstables, did you give the correct keyspace/table?
-
-
-
-
-

Table already at level 0

-

The script will not set the level if it is already set to 0.

-

Example:

-
Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0
-
-
-
-
-

Table levels reduced to 0

-

If the level is not already 0, then this will reset it to 0.

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 1
-
-sstablelevelreset --really-reset keyspace eventlog
-Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableloader.html b/src/doc/4.0-alpha4/tools/sstable/sstableloader.html deleted file mode 100644 index 9a2e4551f..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableloader.html +++ /dev/null @@ -1,409 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableloader" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableloader

-

Bulk-load the sstables found in the directory <dir_path> to the configured cluster. The parent directories of <dir_path> are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files.

-

Several of the options listed below don’t work quite as intended, and in those cases, workarounds are mentioned for specific use cases.

-

To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-1278

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableloader <options> <dir_path>

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-d, –nodes <initial hosts>Required. Try to connect to these hosts (comma-separated) -initially for ring information
-u, –username <username>username for Cassandra authentication
-pw, –password <password>password for Cassandra authentication
-p, –port <native transport port>port used for native connection (default 9042)
-sp, –storage-port <storage port>port used for internode communication (default 7000)
-ssp, –ssl-storage-port <ssl storage port>port used for TLS internode communication (default 7001)
–no-progressdon’t display progress
-t, –throttle <throttle>throttle speed in Mbits (default unlimited)
-idct, –inter-dc-throttle <inter-dc-throttle>inter-datacenter throttle speed in Mbits (default unlimited)
-cph, –connections-per-host <connectionsPerHost>number of concurrent connections-per-host
-i, –ignore <NODES>don’t stream to this (comma separated) list of nodes
-alg, –ssl-alg <ALGORITHM>Client SSL: algorithm (default: SunX509)
-ciphers, –ssl-ciphers <CIPHER-SUITES>Client SSL: comma-separated list of encryption suites to use
-ks, –keystore <KEYSTORE>Client SSL: full path to keystore
-kspw, –keystore-password <KEYSTORE-PASSWORD>Client SSL: password of the keystore
-st, –store-type <STORE-TYPE>Client SSL: type of store
-ts, –truststore <TRUSTSTORE>Client SSL: full path to truststore
-tspw, –truststore-password <TRUSTSTORE-PASSWORD>Client SSL: password of the truststore
-prtcl, –ssl-protocol <PROTOCOL>Client SSL: connections protocol to use (default: TLS)
-ap, –auth-provider <auth provider>custom AuthProvider class name for cassandra authentication
-f, –conf-path <path to config file>cassandra.yaml file path for streaming throughput and client/server SSL
-v, –verboseverbose output
-h, –helpdisplay this help message
-

You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

Load sstables from a Snapshot

-

Copy the snapshot sstables into an accessible directory and use sstableloader to restore them.

-

Example:

-
cp snapshots/1535397029191/* /path/to/keyspace1/standard1/
-
-sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4700000
-   Total duration (ms):          : 4390
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

The -d or –nodes option is required, or the script will not run.

-

Example:

-
sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Initial hosts must be specified (-d)
-
-
-
-
-

Use a Config File for SSL Clusters

-

If SSL encryption is enabled in the cluster, use the –conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line.

-

Example:

-
sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 9.165KiB/s (avg: 9.165KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 5.147MiB/s (avg: 18.299KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 9.751MiB/s (avg: 27.423KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 8.203MiB/s (avg: 36.524KiB/s)
-...
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 9356 ms
-   Average transfer rate   : 480.105KiB/s
-   Peak transfer rate      : 586.410KiB/s
-
-
-
-
-

Hide Progress Output

-

To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the –no-progress option.

-

Example:

-
sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2]
-
-
-
-
-

Get More Detail

-

Using the –verbose option will provide much more progress output.

-

Example:

-
sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 12.056KiB/s (avg: 12.056KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 9.092MiB/s (avg: 24.081KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 18.832MiB/s (avg: 36.099KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 2.253MiB/s (avg: 47.882KiB/s)
-progress: [/172.17.0.2]0:0/1 7  % total: 7% 6.388MiB/s (avg: 59.743KiB/s)
-progress: [/172.17.0.2]0:0/1 8  % total: 8% 14.606MiB/s (avg: 71.635KiB/s)
-progress: [/172.17.0.2]0:0/1 9  % total: 9% 8.880MiB/s (avg: 83.465KiB/s)
-progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s)
-progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s)
-progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s)
-progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s)
-progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s)
-progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s)
-progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s)
-progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s)
-progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s)
-progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s)
-progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s)
-progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s)
-progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s)
-progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s)
-progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s)
-progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s)
-progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s)
-progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s)
-progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s)
-progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s)
-progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s)
-progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s)
-progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s)
-progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s)
-progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s)
-progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s)
-progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s)
-progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s)
-progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s)
-progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s)
-progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s)
-progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s)
-progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s)
-progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s)
-progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s)
-progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s)
-progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s)
-progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s)
-progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s)
-progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s)
-progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s)
-progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s)
-progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s)
-progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s)
-progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s)
-progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s)
-progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s)
-progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s)
-progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s)
-progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s)
-progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s)
-progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s)
-progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s)
-progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s)
-progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s)
-progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s)
-progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s)
-progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s)
-progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s)
-progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s)
-progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s)
-progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s)
-progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 6706 ms
-   Average transfer rate   : 669.835KiB/s
-   Peak transfer rate      : 767.802KiB/s
-
-
-
-
-

Throttling Load

-

To prevent the table loader from overloading the system resources, you can throttle the process with the –throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below.

-

Example:

-
sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 0 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 37634
-   Average transfer rate (MB/s): : 0
-   Peak transfer rate (MB/s):    : 0
-
-
-
-
-

Speeding up Load

-

To speed up the load process, the number of connections per host can be increased.

-

Example:

-
sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 100
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 3486
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

This small data set doesn’t benefit much from the increase in connections per host, but note that the total duration has decreased in this example.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstablemetadata.html b/src/doc/4.0-alpha4/tools/sstable/sstablemetadata.html deleted file mode 100644 index 0e89f2396..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstablemetadata.html +++ /dev/null @@ -1,473 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablemetadata" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablemetadata

-

Print information about an sstable from the related Statistics.db and Summary.db files to standard output.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablemetadata <options> <sstable filename(s)>

- ---- - - - - - -
–gc_grace_seconds <arg>The gc_grace_seconds to use when calculating droppable tombstones
-
- -
-

Specify gc grace seconds

-

To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn’t access the schema directly, this is a way to more accurately estimate droppable tombstones – for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds).

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-12208

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4
-Estimated tombstone drop times:
-1536599100:         1
-1536599640:         1
-1536599700:         2
-
-echo $(date +%s)
-1536602005
-
-# if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 4.0E-5
-
-# if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 9.61111111111111E-6
-
-# if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 0.0
-
-
-
-
-

Explanation of each value printed above

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueExplanation
SSTableprefix of the sstable filenames related to this sstable
Partitionerpartitioner type used to distribute data across nodes; defined in cassandra.yaml
Bloom Filter FPprecision of Bloom filter used in reads; defined in the table definition
Minimum timestampminimum timestamp of any entry in this sstable, in epoch microseconds
Maximum timestampmaximum timestamp of any entry in this sstable, in epoch microseconds
SSTable min local deletion timeminimum timestamp of deletion date, based on TTL, in epoch seconds
SSTable max local deletion timemaximum timestamp of deletion date, based on TTL, in epoch seconds
Compressorblank (-) by default; if not blank, indicates type of compression enabled on the table
TTL mintime-to-live in seconds; default 0 unless defined in the table definition
TTL maxtime-to-live in seconds; default 0 unless defined in the table definition
First tokenlowest token and related key found in the sstable summary
Last tokenhighest token and related key found in the sstable summary
Estimated droppable tombstonesratio of tombstones to columns, using configured gc grace seconds if relevant
SSTable levelcompaction level of this sstable, if leveled compaction (LCS) is used
Repaired atthe timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds
Replay positions coveredthe interval of time and commitlog positions related to this sstable
totalColumnsSetnumber of cells in the table
totalRowsnumber of rows in the table
Estimated tombstone drop timesapproximate number of rows that will expire, ordered by epoch seconds
Count Row Size Cell Counttwo histograms in two columns; one represents distribution of Row Size -and the other represents distribution of Cell Count
Estimated cardinalityan estimate of unique values, used for compaction
EncodingStats* minTTLin epoch milliseconds
EncodingStats* minLocalDeletionTimein epoch seconds
EncodingStats* minTimestampin epoch microseconds
KeyTypethe type of partition key, useful in reading and writing data -from/to storage; defined in the table definition
ClusteringTypesthe type of clustering key, useful in reading and writing data -from/to storage; defined in the table definition
StaticColumnsa list of the shared columns in the table
RegularColumnsa list of non-static, non-key columns in the table
-
    -
  • For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableofflinerelevel.html b/src/doc/4.0-alpha4/tools/sstable/sstableofflinerelevel.html deleted file mode 100644 index 9035df58e..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableofflinerelevel.html +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableofflinerelevel" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableofflinerelevel

-

When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-8301

-

The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):

-
L3 [][][][][][][][][][][]
-L2 [    ][    ][    ][  ]
-L1 [          ][        ]
-L0 [                    ]
-
-
-

Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):

-
[][][]
-[    ][][][]
-    [    ]
-[          ]
-...
-
-
-

Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below.

-

If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableofflinerelevel [–dry-run] <keyspace> <table>

-
-
-

Doing a dry run

-

Use the –dry-run option to see the current level distribution and predicted level after the change.

-

Example:

-
sstableofflinerelevel --dry-run keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-Potential leveling:
-L0=1
-L1=1
-
-
-
-
-

Running a relevel

-

Example:

-
sstableofflinerelevel keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-New leveling:
-L0=1
-L1=1
-
-
-
-
-

Keyspace or table not found

-

If an invalid keyspace and/or table is provided, an exception will be thrown.

-

Example:

-
sstableofflinerelevel --dry-run keyspace evenlog
-
-Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog
-    at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstablerepairedset.html b/src/doc/4.0-alpha4/tools/sstable/sstablerepairedset.html deleted file mode 100644 index c3a752bb1..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstablerepairedset.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablerepairedset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablerepairedset

-

Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired.

-

Note that running a repair (e.g., via nodetool repair) doesn’t set the status of this metadata. Only setting the status of this metadata via this tool does.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5351

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablerepairedset –really-set <options> [-f <sstable-list> | <sstables>]

- ---- - - - - - - - - - - - - - - -
–really-setrequired if you want to really set the status
–is-repairedset the repairedAt status to the last modified time
–is-unrepairedset the repairedAt status to 0
-fuse a file containing a list of sstables as the input
-
-
-

Set a lot of sstables to unrepaired status

-

There are many ways to do this programmatically. This way would likely include variables for the keyspace and table.

-

Example:

-
find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired %
-
-
-
-
-

Set one to many sstables to repaired status

-

Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice.

-

Example:

-
nodetool repair keyspace1 standard1
-find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired %
-
-
-
- -
-

Using command in a script

-

If you know you ran repair 2 weeks ago, you can do something like the following:

-
sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstablescrub.html b/src/doc/4.0-alpha4/tools/sstable/sstablescrub.html deleted file mode 100644 index 62f400dd6..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstablescrub.html +++ /dev/null @@ -1,211 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablescrub" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablescrub

-

Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4321

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablescrub <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-m,–manifest-checkonly check and repair the leveled manifest, without actually scrubbing the sstables
-n,–no-validatedo not validate columns using column validator
-r,–reinsert-overflowed-ttlRewrites rows with overflowed expiration date affected by CASSANDRA-14092 -with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows.
-s,–skip-corruptedskip corrupt rows in counter tables
-v,–verboseverbose output
-
-
-

Basic Scrub

-

The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable.

-

Example:

-
sstablescrub keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped
-Checking leveled manifest
-
-
-
-
-

Scrub without Validation

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-9406

-

Use the –no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client.

-

Example:

-
sstablescrub --no-validate keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned
-
-
-
-
-

Skip Corrupted Counter Tables

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5930

-

If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the –skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+.

-

Example:

-
sstablescrub --skip-corrupted keyspace1 counter1
-
-
-
-
-

Dealing with Overflow Dates

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-14092

-

Using the option –reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow).

-

Example:

-
sstablescrub --reinsert-overflowed-ttl keyspace1 counter1
-
-
-
-
-

Manifest Check

-

As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstablesplit.html b/src/doc/4.0-alpha4/tools/sstable/sstablesplit.html deleted file mode 100644 index 3fcccfe16..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstablesplit.html +++ /dev/null @@ -1,202 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablesplit" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablesplit

-

Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4766

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablesplit <options> <filename>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h, –helpdisplay this help message
–no-snapshotdon’t snapshot the sstables before splitting
-s, –size <size>maximum size in MB for the output sstables (default: 50)
-

This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped.

-
-
-

Split a File

-

Split a large sstable into smaller sstables. By default, unless the option –no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-Pre-split sstables snapshotted into snapshot pre-split-1533144514795
-
-
-
-
-

Split Multiple Files

-

Wildcards can be used in the filename portion of the command to split multiple files.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1*
-
-
-
-
-

Attempt to Split a Small File

-

If the file is already smaller than the split size provided, the sstable will not be split.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB)
-No sstables needed splitting.
-
-
-
-
-

Split a File into Specified Size

-

The default size used for splitting is 50MB. Specify another size with the –size option. The size is in megabytes (MB). Specify only the number, not the units. For example –size 50 is correct, but –size 50MB is not.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db
-Pre-split sstables snapshotted into snapshot pre-split-1533144996008
-
-
-
-
-

Split Without Snapshot

-

By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the –no-snapshot option to skip it.

-

Example:

-
sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db
-
-
-

Note: There is no output, but you can see the results in your file system.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableupgrade.html b/src/doc/4.0-alpha4/tools/sstable/sstableupgrade.html deleted file mode 100644 index f2450c50a..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableupgrade.html +++ /dev/null @@ -1,249 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableupgrade" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableupgrade

-

Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version.

-

The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableupgrade <options> <keyspace> <table> [snapshot_name]

- ---- - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-k,–keep-sourcedo not delete the source sstables
-
-
-

Rewrite tables to the current Cassandra version

-

Start with a set of sstables in one version of Cassandra:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables:

-
sstableupgrade keyspace1 standard1
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 13:48 backups
--rw-r--r--   1 user  wheel      292 Aug 22 13:48 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4599475 Aug 22 13:48 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:48 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 13:48 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330807 Aug 22 13:48 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 13:48 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 13:48 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 13:48 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite tables to the current Cassandra version, and keep tables in old version

-

Again, starting with a set of sstables in one version:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:

-
sstableupgrade keyspace1 standard1 -k
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 14:00 backups
--rw-r--r--@  1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--@  1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--@  1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--@  1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--@  1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--@  1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--@  1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--@  1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
--rw-r--r--   1 user  wheel      292 Aug 22 14:01 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4596370 Aug 22 14:01 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 14:01 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 14:01 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330801 Aug 22 14:01 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 14:01 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 14:01 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 14:01 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite a snapshot to the current Cassandra version

-

Find the snapshot name:

-
nodetool listsnapshots
-
-Snapshot Details:
-Snapshot name       Keyspace name                Column family name           True size          Size on disk
-...
-1534962986979       keyspace1                    standard1                    5.85 MB            5.85 MB
-
-
-

Then rewrite the snapshot:

-
sstableupgrade keyspace1 standard1 1534962986979
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete.
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableutil.html b/src/doc/4.0-alpha4/tools/sstable/sstableutil.html deleted file mode 100644 index 42c4a92b2..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableutil.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableutil" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableutil

-

List sstable files for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7066

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableutil <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - -
-c, –cleanupclean up any outstanding transactions
-d, –debugdisplay stack traces
-h, –helpdisplay this help message
-o, –oploginclude operation logs
-t, –type <arg>all (list all files, final or temporary), tmp (list temporary files only), -final (list final files only),
-v, –verboseverbose output
-
-
-

List all sstables

-

The basic command lists the sstables associated with a given keyspace/table.

-

Example:

-
sstableutil keyspace eventlog
-Listing files...
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt
-
-
-
-
-

List only temporary sstables

-

Using the -t option followed by tmp will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra.

-
-
-

List only final sstables

-

Using the -t option followed by final will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option.

-
-
-

Include transaction logs

-

Using the -o option will include transaction logs in the listing, in the format above.

-
-
-

Clean up sstables

-

Using the -c option removes any transactions left over from incomplete writes or compactions.

-

From the 3.0 upgrade notes:

-

New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix “add:” or “remove:”. They also contain a special line “commit”, only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the “add” prefix) and delete the old sstables (those with the “remove” prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/tools/sstable/sstableverify.html b/src/doc/4.0-alpha4/tools/sstable/sstableverify.html deleted file mode 100644 index 69dd66baa..000000000 --- a/src/doc/4.0-alpha4/tools/sstable/sstableverify.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableverify" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableverify

-

Check sstable(s) for errors or corruption, for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5791

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableverify <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-e, –extendedextended verification
-h, –helpdisplay this help message
-v, –verboseverbose output
-
-
-

Basic Verification

-

This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-
-
-
-
-

Extended Verification

-

During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time.

-

Example:

-
root@DC1C1:/# sstableverify -e keyspace eventlog
-WARN  14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully
-
-
-
-
-

Corrupted File

-

Corrupted files are listed if they are detected by the script.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db
-
-
-

A similar (but less verbose) tool will show the suggested actions:

-
nodetool verify keyspace eventlog
-error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/troubleshooting/finding_nodes.html b/src/doc/4.0-alpha4/troubleshooting/finding_nodes.html deleted file mode 100644 index 0a9973bc6..000000000 --- a/src/doc/4.0-alpha4/troubleshooting/finding_nodes.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Find The Misbehaving Nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Find The Misbehaving Nodes

-

The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware).

-

There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below.

-
-

Client Logs and Errors

-

Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter’s nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with.

-

Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax drivers:

-
    -
  • SyntaxError (client). This and other QueryValidationException -indicate that the client sent a malformed request. These are rarely server -issues and usually indicate bad queries.
  • -
  • UnavailableException (server): This means that the Cassandra -coordinator node has rejected the query as it believes that insufficent -replica nodes are available. If many coordinators are throwing this error it -likely means that there really are (typically) multiple nodes down in the -cluster and you can identify them using nodetool status If only a single coordinator is throwing this error it may -mean that node has been partitioned from the rest.
  • -
  • OperationTimedOutException (server): This is the most frequent -timeout message raised when clients set timeouts and means that the query -took longer than the supplied timeout. This is a client side timeout -meaning that it took longer than the client specified timeout. The error -message will include the coordinator node that was last tried which is -usually a good starting point. This error usually indicates either -aggressive client timeout values or latent server coordinators/replicas.
  • -
  • ReadTimeoutException or WriteTimeoutException (server): These -are raised when clients do not specify lower timeouts and there is a -coordinator timeouts based on the values supplied in the cassandra.yaml -configuration file. They usually indicate a serious server side problem as -the default values are usually multiple seconds.
  • -
-
-
-

Metrics

-

If you have Cassandra metrics reporting to a -centralized location such as Graphite or -Grafana you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are:

-
-

Errors

-

Cassandra refers to internode messaging errors as “drops”, and provided a -number of Dropped Message Metrics to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue.

-
-
-

Latency

-

For timeouts or latency related issues you can start with Table -Metrics by comparing Coordinator level metrics e.g. -CoordinatorReadLatency or CoordinatorWriteLatency with their associated -replica metrics e.g. ReadLatency or WriteLatency. Issues usually show -up on the 99th percentile before they show up on the 50th percentile or -the mean. While maximum coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, maximum replica latencies that correlate with increased 99th -percentiles on coordinators can help narrow down the problem.

-

There are usually three main possibilities:

-
    -
  1. Coordinator latencies are high on all nodes, but only a few node’s local -read latencies are high. This points to slow replica nodes and the -coordinator’s are just side-effects. This usually happens when clients are -not token aware.
  2. -
  3. Coordinator latencies and replica latencies increase at the -same time on the a few nodes. If clients are token aware this is almost -always what happens and points to slow replicas of a subset of token -ranges (only part of the ring).
  4. -
  5. Coordinator and local latencies are high on many nodes. This usually -indicates either a tipping point in the cluster capacity (too many writes or -reads per second), or a new query pattern.
  6. -
-

It’s important to remember that depending on the client’s load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use TokenAware policies the same -node’s coordinator and replica latencies will often increase together, but if -you just use normal DCAwareRoundRobin coordinator latencies can increase -with unrelated replica node’s latencies. For example:

-
    -
  • TokenAware + LOCAL_ONE: should always have coordinator and replica -latencies on the same node rise together
  • -
  • TokenAware + LOCAL_QUORUM: should always have coordinator and -multiple replica latencies rise together in the same datacenter.
  • -
  • TokenAware + QUORUM: replica latencies in other datacenters can -affect coordinator latencies.
  • -
  • DCAwareRoundRobin + LOCAL_ONE: coordinator latencies and unrelated -replica node’s latencies will rise together.
  • -
  • DCAwareRoundRobin + LOCAL_QUORUM: different coordinator and replica -latencies will rise together with little correlation.
  • -
-
-
-

Query Rates

-

Sometimes the Table query rate metrics can help -narrow down load issues as “small” increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with BATCH writes, where a client may send a single BATCH -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator BATCH write turns into 450 -replica writes! This is why keeping BATCH’s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a “single” -query.

-
-
-
-

Next Step: Investigate the Node(s)

-

Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -logs, nodetool, and -os tools. If you are not able to login you may still -have access to logs and nodetool -remotely.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/troubleshooting/index.html b/src/doc/4.0-alpha4/troubleshooting/index.html deleted file mode 100644 index fcfd381a8..000000000 --- a/src/doc/4.0-alpha4/troubleshooting/index.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-

As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you.

-

These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don’t -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/troubleshooting/reading_logs.html b/src/doc/4.0-alpha4/troubleshooting/reading_logs.html deleted file mode 100644 index 4c77456d2..000000000 --- a/src/doc/4.0-alpha4/troubleshooting/reading_logs.html +++ /dev/null @@ -1,351 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Cassandra Logs" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Logs

-

Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs.

-
-

Common Log Files

-

Cassandra has three main logs, the system.log, debug.log and -gc.log which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively.

-

These logs by default live in ${CASSANDRA_HOME}/logs, but most Linux -distributions relocate logs to /var/log/cassandra. Operators can tune -this location as well as what levels are logged using the provided -logback.xml file.

-
-

system.log

-

This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log:

-
    -
  • Uncaught exceptions. These can be very useful for debugging errors.
  • -
  • GCInspector messages indicating long garbage collector pauses. When long -pauses happen Cassandra will print how long and also what was the state of -the system (thread state) at the time of that pause. This can help narrow -down a capacity issue (either not enough heap or not enough spare CPU).
  • -
  • Information about nodes joining and leaving the cluster as well as token -metadata (data ownersip) changes. This is useful for debugging network -partitions, data movements, and more.
  • -
  • Keyspace/Table creation, modification, deletion.
  • -
  • StartupChecks that ensure optimal configuration of the operating system -to run Cassandra
  • -
  • Information about some background operational tasks (e.g. Index -Redistribution).
  • -
-

As with any application, looking for ERROR or WARN lines can be a -great first step:

-
$ # Search for warnings or errors in the latest system.log
-$ grep 'WARN\|ERROR' system.log | tail
-...
-
-$ # Search for warnings or errors in all rotated system.log
-$ zgrep 'WARN\|ERROR' system.log.* | less
-...
-
-
-
-
-

debug.log

-

This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal system.log. Some -examples of activities logged to this log:

-
    -
  • Information about compactions, including when they start, which sstables -they contain, and when they finish.
  • -
  • Information about memtable flushes to disk, including when they happened, -how large the flushes were, and which commitlog segments the flush impacted.
  • -
-

This log can be very noisy, so it is highly recommended to use grep and -other log analysis tools to dive deep. For example:

-
$ # Search for messages involving a CompactionTask with 5 lines of context
-$ grep CompactionTask debug.log -C 5
-...
-
-$ # Look at the distribution of flush tasks per keyspace
-$ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c
-    6 compaction_history:
-    1 test_keyspace:
-    2 local:
-    17 size_estimates:
-    17 sstable_activity:
-
-
-
-
-

gc.log

-

The gc log is a standard Java GC log. With the default jvm.options -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:

-
$ grep stopped gc.log.0.current | tail
-2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds
-2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds
-2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds
-2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds
-2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds
-2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds
-2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds
-2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds
-2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds
-2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds
-
-
-

This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n  | tail | xargs -IX grep X gc.log.0.current | sort -k 1
-2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds
-2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds
-2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds
-2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds
-2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds
-2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds
-2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds
-2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds
-2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds
-2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds
-
-
-

In this case any client waiting on a query would have experienced a 56ms -latency at 17:13:41.

-

Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn’t know could have disk latency, so the JVM safepoint logic -doesn’t handle a blocking memory mapped read particularly well).

-

Using these logs you can even get a pause distribution with something like -histogram.py:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py
-# NumSamples = 410293; Min = 0.00; Max = 11.49
-# Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498
-# each ∎ represents a count of 5470
-    0.0001 -     1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
-    1.1496 -     2.2991 [    15]:
-    2.2991 -     3.4486 [     5]:
-    3.4486 -     4.5981 [     1]:
-    4.5981 -     5.7475 [     5]:
-    5.7475 -     6.8970 [     9]:
-    6.8970 -     8.0465 [     1]:
-    8.0465 -     9.1960 [     0]:
-    9.1960 -    10.3455 [     0]:
-   10.3455 -    11.4949 [     2]:
-
-
-

We can see in this case while we have very good average performance something -is causing multi second JVM pauses … In this case it was mostly safepoint -pauses caused by slow disks:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X  gc.log.0.current| sort -k 1
-2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds
-2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds
-2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds
-2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds
-2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds
-2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds
-2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds
-2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds
-2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds
-2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds
-
-
-

Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as GCViewer which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -200ms and GC throughput greater than 99% (ymmv).

-

Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues.

-
-
-
-

Getting More Information

-

If the default logging levels are insuficient, nodetool can set higher -or lower logging levels for various packages and classes using the -nodetool setlogginglevel command. Start by viewing the current levels:

-
$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-
-
-

Perhaps the Gossiper is acting up and we wish to enable it at TRACE -level for even more insight:

-
$ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE
-
-$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-org.apache.cassandra.gms.Gossiper                      TRACE
-
-$ grep TRACE debug.log | tail -2
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating
-heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ...
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local
-heartbeat version 2341 greater than 2340 for 127.0.0.1:7000
-
-
-

Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -logback.xml.

-
diff --git a/conf/logback.xml b/conf/logback.xml
-index b2c5b10..71b0a49 100644
---- a/conf/logback.xml
-+++ b/conf/logback.xml
-@@ -98,4 +98,5 @@ appender reference in the root level section below.
-   </root>
-
-   <logger name="org.apache.cassandra" level="DEBUG"/>
-+  <logger name="org.apache.cassandra.gms.Gossiper" level="TRACE"/>
- </configuration>
-
-
-
-

Full Query Logger

-

Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -nodetool and the logs are read with the provided bin/fqltool utility:

-
$ mkdir /var/tmp/fql_logs
-$ nodetool enablefullquerylog --path /var/tmp/fql_logs
-
-# ... do some querying
-
-$ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail
-Query time: 1530750927224
-Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name =
-'system_views' AND table_name = 'sstable_tasks';
-Values:
-
-Type: single
-Protocol version: 4
-Query time: 1530750934072
-Query: select * from keyspace1.standard1 ;
-Values:
-
-$ nodetool disablefullquerylog
-
-
-

Note that if you want more information than this tool provides, there are other -live capture options available such as packet capture.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/troubleshooting/use_nodetool.html b/src/doc/4.0-alpha4/troubleshooting/use_nodetool.html deleted file mode 100644 index 766c9aea5..000000000 --- a/src/doc/4.0-alpha4/troubleshooting/use_nodetool.html +++ /dev/null @@ -1,321 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Use Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Use Nodetool

-

Cassandra’s nodetool allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see nodetool help -for all the commands), but briefly some of the most useful for troubleshooting:

-
-

Cluster Status

-

You can use nodetool status to assess status of the cluster:

-
$ nodetool status <optional keyspace>
-
-Datacenter: dc1
-=======================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-UN  127.0.1.1  4.69 GiB   1            100.0%            35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e  r1
-UN  127.0.1.2  4.71 GiB   1            100.0%            752e278f-b7c5-4f58-974b-9328455af73f  r2
-UN  127.0.1.3  4.69 GiB   1            100.0%            9dc1a293-2cc0-40fa-a6fd-9e6054da04a7  r3
-
-
-

In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all “up”. The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -nodetool status on multiple nodes in a cluster to see the full view.

-

You can use nodetool status plus a little grep to see which nodes are -down:

-
$ nodetool status | grep -v '^UN'
-Datacenter: dc1
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-Datacenter: dc2
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-DN  127.0.0.5  105.73 KiB  1            33.3%             df303ac7-61de-46e9-ac79-6e630115fd75  r1
-
-
-

In this case there are two datacenters and there is one node down in datacenter -dc2 and rack r1. This may indicate an issue on 127.0.0.5 -warranting investigation.

-
-
-

Coordinator Query Latency

-

You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using nodetool proxyhistograms:

-
$ nodetool proxyhistograms
-Percentile       Read Latency      Write Latency      Range Latency   CAS Read Latency  CAS Write Latency View Write Latency
-                     (micros)           (micros)           (micros)           (micros)           (micros)           (micros)
-50%                    454.83             219.34               0.00               0.00               0.00               0.00
-75%                    545.79             263.21               0.00               0.00               0.00               0.00
-95%                    654.95             315.85               0.00               0.00               0.00               0.00
-98%                    785.94             379.02               0.00               0.00               0.00               0.00
-99%                   3379.39            2346.80               0.00               0.00               0.00               0.00
-Min                     42.51             105.78               0.00               0.00               0.00               0.00
-Max                  25109.16           43388.63               0.00               0.00               0.00               0.00
-
-
-

Here you can see the full latency distribution of reads, writes, range requests -(e.g. select * from keyspace.table), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds).

-
-
-

Local Query Latency

-

If you know which table is having latency/error issues, you can use -nodetool tablehistograms to get a better idea of what is happening -locally on a node:

-
$ nodetool tablehistograms keyspace table
-Percentile  SSTables     Write Latency      Read Latency    Partition Size        Cell Count
-                              (micros)          (micros)           (bytes)
-50%             0.00             73.46            182.79             17084               103
-75%             1.00             88.15            315.85             17084               103
-95%             2.00            126.93            545.79             17084               103
-98%             2.00            152.32            654.95             17084               103
-99%             2.00            182.79            785.94             17084               103
-Min             0.00             42.51             24.60             14238                87
-Max             2.00          12108.97          17436.92             17084               103
-
-
-

This shows you percentile breakdowns particularly critical metrics.

-

The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. SizeTieredCompactionStrategy typically has many more reads -per read than LeveledCompactionStrategy does for update heavy workloads.

-

The second column shows you a latency breakdown of local write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments.

-

The third column shows you a latency breakdown of local read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read.

-

The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it’s read.

-
-
-

Threadpool State

-

You can use nodetool tpstats to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:

-
$ nodetool tpstats
-Pool Name                         Active   Pending      Completed   Blocked  All time blocked
-ReadStage                              2         0             12         0                 0
-MiscStage                              0         0              0         0                 0
-CompactionExecutor                     0         0           1940         0                 0
-MutationStage                          0         0              0         0                 0
-GossipStage                            0         0          10293         0                 0
-Repair-Task                            0         0              0         0                 0
-RequestResponseStage                   0         0             16         0                 0
-ReadRepairStage                        0         0              0         0                 0
-CounterMutationStage                   0         0              0         0                 0
-MemtablePostFlush                      0         0             83         0                 0
-ValidationExecutor                     0         0              0         0                 0
-MemtableFlushWriter                    0         0             30         0                 0
-ViewMutationStage                      0         0              0         0                 0
-CacheCleanupExecutor                   0         0              0         0                 0
-MemtableReclaimMemory                  0         0             30         0                 0
-PendingRangeCalculator                 0         0             11         0                 0
-SecondaryIndexManagement               0         0              0         0                 0
-HintsDispatcher                        0         0              0         0                 0
-Native-Transport-Requests              0         0            192         0                 0
-MigrationStage                         0         0             14         0                 0
-PerDiskMemtableFlushWriter_0           0         0             30         0                 0
-Sampler                                0         0              0         0                 0
-ViewBuildExecutor                      0         0              0         0                 0
-InternalResponseStage                  0         0              0         0                 0
-AntiEntropyStage                       0         0              0         0                 0
-
-Message type           Dropped                  Latency waiting in queue (micros)
-                                             50%               95%               99%               Max
-READ                         0               N/A               N/A               N/A               N/A
-RANGE_SLICE                  0              0.00              0.00              0.00              0.00
-_TRACE                       0               N/A               N/A               N/A               N/A
-HINT                         0               N/A               N/A               N/A               N/A
-MUTATION                     0               N/A               N/A               N/A               N/A
-COUNTER_MUTATION             0               N/A               N/A               N/A               N/A
-BATCH_STORE                  0               N/A               N/A               N/A               N/A
-BATCH_REMOVE                 0               N/A               N/A               N/A               N/A
-REQUEST_RESPONSE             0              0.00              0.00              0.00              0.00
-PAGED_RANGE                  0               N/A               N/A               N/A               N/A
-READ_REPAIR                  0               N/A               N/A               N/A               N/A
-
-
-

This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the RequestResponseState queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ALL ties up RF -RequestResponseState threads whereas LOCAL_ONE only uses a single -thread in the ReadStage threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the concurrent_compactors or compaction_throughput options.

-

The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation.

-
-
-

Compaction State

-

As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS page cache, -and can put a lot of load on your disk drives. There are great -os tools to determine if this is the case, but often it’s a -good idea to check if compactions are even running using -nodetool compactionstats:

-
$ nodetool compactionstats
-pending tasks: 2
-- keyspace.table: 2
-
-id                                   compaction type keyspace table completed total    unit  progress
-2062b290-7f3a-11e8-9358-cd941b956e60 Compaction      keyspace table 21848273  97867583 bytes 22.32%
-Active compaction remaining time :   0h00m04s
-
-
-

In this case there is a single compaction running on the keyspace.table -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass -H to get the units in a human readable format.

-

Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don’t take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra’s concurrent_compactors -or compaction_throughput options.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-alpha4/troubleshooting/use_tools.html b/src/doc/4.0-alpha4/troubleshooting/use_tools.html deleted file mode 100644 index dbec3ef45..000000000 --- a/src/doc/4.0-alpha4/troubleshooting/use_tools.html +++ /dev/null @@ -1,609 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Diving Deep, Use External Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Diving Deep, Use External Tools

-

Machine access allows operators to dive even deeper than logs and nodetool -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes.

-
-

JVM Tooling

-

The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks.

-

NOTE: There are two common gotchas with JVM tooling and Cassandra:

-
    -
  1. By default Cassandra ships with -XX:+PerfDisableSharedMem set to prevent -long pauses (see CASSANDRA-9242 and CASSANDRA-9483 for details). If -you want to use JVM tooling you can instead have /tmp mounted on an in -memory tmpfs which also effectively works around CASSANDRA-9242.
  2. -
  3. Make sure you run the tools as the same user as Cassandra is running as, -e.g. if the database is running as cassandra the tool also has to be -run as cassandra, e.g. via sudo -u cassandra <cmd>.
  4. -
-
-

Garbage Collection State (jstat)

-

If you suspect heap pressure you can use jstat to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):

-
jstat -gcutil <cassandra pid> 500ms
- S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT
- 0.00   0.00  81.53  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.94  31.16  93.07  88.20     12    0.151     3    0.257    0.408
-
-
-

In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies.

-
-
-

Thread Information (jstack)

-

To get a point in time snapshot of exactly what Cassandra is doing, run -jstack against the Cassandra PID. Note that this does pause the JVM for -a very brief period (<20ms).:

-
$ jstack <cassandra pid> > threaddump
-
-# display the threaddump
-$ cat threaddump
-...
-
-# look at runnable threads
-$grep RUNNABLE threaddump -B 1
-"Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000]
-   java.lang.Thread.State: RUNNABLE
---
-"Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-...
-
-# Note that the nid is the Linux thread id
-
-
-

Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on.

-
-
-
-

Basic OS Tooling

-

A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of:

-
    -
  • CPU cores. For executing concurrent user queries
  • -
  • CPU processing time. For query activity (data decompression, row merging, -etc…)
  • -
  • CPU processing time (low priority). For background tasks (compaction, -streaming, etc …)
  • -
  • RAM for Java Heap. Used to hold internal data-structures and by default the -Cassandra memtables. Heap space is a crucial component of write performance -as well as generally.
  • -
  • RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS -disk cache is a crucial component of read performance.
  • -
  • Disks. Cassandra cares a lot about disk read latency, disk write throughput, -and of course disk space.
  • -
  • Network latency. Cassandra makes many internode requests, so network latency -between nodes can directly impact performance.
  • -
  • Network throughput. Cassandra (as other databases) frequently have the -so called “incast” problem where a small request (e.g. SELECT * from -foo.bar) returns a massively large result set (e.g. the entire dataset). -In such situations outgoing bandwidth is crucial.
  • -
-

Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource.

-
-

High Level Resource Usage (top/htop)

-

Cassandra makes signifiant use of system resources, and often the very first -useful action is to run top or htop (website)to see the state of the machine.

-

Useful things to look at:

-
    -
  • System load levels. While these numbers can be confusing, generally speaking -if the load average is greater than the number of CPU cores, Cassandra -probably won’t have very good (sub 100 millisecond) latencies. See -Linux Load Averages -for more information.
  • -
  • CPU utilization. htop in particular can help break down CPU utilization -into user (low and normal priority), system (kernel), and io-wait -. Cassandra query threads execute as normal priority user threads, while -compaction threads execute as low priority user threads. High system -time could indicate problems like thread contention, and high io-wait -may indicate slow disk drives. This can help you understand what Cassandra -is spending processing resources doing.
  • -
  • Memory usage. Look for which programs have the most resident memory, it is -probably Cassandra. The number for Cassandra is likely inaccurately high due -to how Linux (as of 2018) accounts for memory mapped file memory.
  • -
-
-
-

IO Usage (iostat)

-

Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:

-
$ sudo iostat -xdm 2
-Linux 4.13.0-13-generic (hostname)     07/03/2018     _x86_64_    (8 CPU)
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.28    0.32    5.42     0.01     0.13    48.55     0.01    2.21    0.26    2.32   0.64   0.37
-sdb               0.00     0.00    0.00    0.00     0.00     0.00    79.34     0.00    0.20    0.20    0.00   0.16   0.00
-sdc               0.34     0.27    0.76    0.36     0.01     0.02    47.56     0.03   26.90    2.98   77.73   9.21   1.03
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.00    2.00   32.00     0.01     4.04   244.24     0.54   16.00    0.00   17.00   1.06   3.60
-sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00    0.00    0.00   0.00   0.00
-sdc               0.00    24.50    0.00  114.00     0.00    11.62   208.70     5.56   48.79    0.00   48.79   1.12  12.80
-
-
-

In this case we can see that /dev/sdc1 is a very slow drive, having an -await close to 50 milliseconds and an avgqu-sz close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user.

-

Important metrics to assess using iostat:

-
    -
  • Reads and writes per second. These numbers will change with the workload, -but generally speaking the more reads Cassandra has to do from disk the -slower Cassandra read latencies are. Large numbers of reads per second -can be a dead giveaway that the cluster has insufficient memory for OS -page caching.
  • -
  • Write throughput. Cassandra’s LSM model defers user writes and batches them -together, which means that throughput to the underlying medium is the most -important write metric for Cassandra.
  • -
  • Read latency (r_await). When Cassandra missed the OS page cache and reads -from SSTables, the read latency directly determines how fast Cassandra can -respond with the data.
  • -
  • Write latency. Cassandra is less sensitive to write latency except when it -syncs the commit log. This typically enters into the very high percentiles of -write latency.
  • -
-

Note that to get detailed latency breakdowns you will need a more advanced -tool such as bcc-tools.

-
-
-

OS page Cache Usage

-

As Cassandra makes heavy use of memory mapped files, the health of the -operating system’s Page Cache is -crucial to performance. Start by finding how much available cache is in the -system:

-
$ free -g
-              total        used        free      shared  buff/cache   available
-Mem:             15           9           2           0           3           5
-Swap:             0           0           0
-
-
-

In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap.

-

If you suspect that you are missing the OS page cache frequently you can use -advanced tools like cachestat or -vmtouch to dive deeper.

-
-
-

Network Latency and Reliability

-

Whenever Cassandra does writes or reads that involve other replicas, -LOCAL_QUORUM reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ping and traceroute or most -effectively mtr:

-
$ mtr -nr www.google.com
-Start: Sun Jul 22 13:10:28 2018
-HOST: hostname                     Loss%   Snt   Last   Avg  Best  Wrst StDev
-  1.|-- 192.168.1.1                0.0%    10    2.0   1.9   1.1   3.7   0.7
-  2.|-- 96.123.29.15               0.0%    10   11.4  11.0   9.0  16.4   1.9
-  3.|-- 68.86.249.21               0.0%    10   10.6  10.7   9.0  13.7   1.1
-  4.|-- 162.141.78.129             0.0%    10   11.5  10.6   9.6  12.4   0.7
-  5.|-- 162.151.78.253             0.0%    10   10.9  12.1  10.4  20.2   2.8
-  6.|-- 68.86.143.93               0.0%    10   12.4  12.6   9.9  23.1   3.8
-  7.|-- 96.112.146.18              0.0%    10   11.9  12.4  10.6  15.5   1.6
-  9.|-- 209.85.252.250             0.0%    10   13.7  13.2  12.5  13.9   0.0
- 10.|-- 108.170.242.238            0.0%    10   12.7  12.4  11.1  13.0   0.5
- 11.|-- 74.125.253.149             0.0%    10   13.4  13.7  11.8  19.2   2.1
- 12.|-- 216.239.62.40              0.0%    10   13.4  14.7  11.5  26.9   4.6
- 13.|-- 108.170.242.81             0.0%    10   14.4  13.2  10.9  16.0   1.7
- 14.|-- 72.14.239.43               0.0%    10   12.2  16.1  11.0  32.8   7.1
- 15.|-- 216.58.195.68              0.0%    10   25.1  15.3  11.1  25.1   4.8
-
-
-

In this example of mtr, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between 200ms and 3s of additional latency, so that -can be a common cause of latency issues.

-
-
-

Network Throughput

-

As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is iftop which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ccm cluster:

-
$ # remove the -t for ncurses instead of pure text
-$ sudo iftop -nNtP -i lo
-interface: lo
-IP address is: 127.0.0.1
-MAC address is: 00:00:00:00:00:00
-Listening on lo
-   # Host name (port/service if enabled)            last 2s   last 10s   last 40s cumulative
---------------------------------------------------------------------------------------------
-   1 127.0.0.1:58946                          =>      869Kb      869Kb      869Kb      217KB
-     127.0.0.3:9042                           <=         0b         0b         0b         0B
-   2 127.0.0.1:54654                          =>      736Kb      736Kb      736Kb      184KB
-     127.0.0.1:9042                           <=         0b         0b         0b         0B
-   3 127.0.0.1:51186                          =>      669Kb      669Kb      669Kb      167KB
-     127.0.0.2:9042                           <=         0b         0b         0b         0B
-   4 127.0.0.3:9042                           =>     3.30Kb     3.30Kb     3.30Kb       845B
-     127.0.0.1:58946                          <=         0b         0b         0b         0B
-   5 127.0.0.1:9042                           =>     2.79Kb     2.79Kb     2.79Kb       715B
-     127.0.0.1:54654                          <=         0b         0b         0b         0B
-   6 127.0.0.2:9042                           =>     2.54Kb     2.54Kb     2.54Kb       650B
-     127.0.0.1:51186                          <=         0b         0b         0b         0B
-   7 127.0.0.1:36894                          =>     1.65Kb     1.65Kb     1.65Kb       423B
-     127.0.0.5:7000                           <=         0b         0b         0b         0B
-   8 127.0.0.1:38034                          =>     1.50Kb     1.50Kb     1.50Kb       385B
-     127.0.0.2:7000                           <=         0b         0b         0b         0B
-   9 127.0.0.1:56324                          =>     1.50Kb     1.50Kb     1.50Kb       383B
-     127.0.0.1:7000                           <=         0b         0b         0b         0B
-  10 127.0.0.1:53044                          =>     1.43Kb     1.43Kb     1.43Kb       366B
-     127.0.0.4:7000                           <=         0b         0b         0b         0B
---------------------------------------------------------------------------------------------
-Total send rate:                                     2.25Mb     2.25Mb     2.25Mb
-Total receive rate:                                      0b         0b         0b
-Total send and receive rate:                         2.25Mb     2.25Mb     2.25Mb
---------------------------------------------------------------------------------------------
-Peak rate (sent/received/total):                     2.25Mb         0b     2.25Mb
-Cumulative (sent/received/total):                     576KB         0B      576KB
-============================================================================================
-
-
-

In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring.

-
-
-
-

Advanced tools

-

Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy.

-
-

bcc-tools

-

Most modern Linux distributions (kernels newer than 4.1) support bcc-tools for diving deep into performance problems. -First install bcc-tools, e.g. via apt on Debian:

-
$ apt install bcc-tools
-
-
-

Then you can use all the tools that bcc-tools contains. One of the most -useful tools is cachestat -(cachestat examples) -which allows you to determine exactly how many OS page cache hits and misses -are happening:

-
$ sudo /usr/share/bcc/tools/cachestat -T 1
-TIME        TOTAL   MISSES     HITS  DIRTIES   BUFFERS_MB  CACHED_MB
-18:44:08       66       66        0       64           88       4427
-18:44:09       40       40        0       75           88       4427
-18:44:10     4353       45     4308      203           88       4427
-18:44:11       84       77        7       13           88       4428
-18:44:12     2511       14     2497       14           88       4428
-18:44:13      101       98        3       18           88       4428
-18:44:14    16741        0    16741       58           88       4428
-18:44:15     1935       36     1899       18           88       4428
-18:44:16       89       34       55       18           88       4428
-
-
-

In this case there are not too many page cache MISSES which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node’s “hot” dataset. If you don’t have enough cache, MISSES will -be high and performance will be slow. If you have enough cache, MISSES will -be low and performance will be fast (as almost all reads are being served out -of memory).

-

You can also measure disk latency distributions using biolatency -(biolatency examples) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:

-
$ sudo /usr/share/bcc/tools/biolatency -D 10
-Tracing block device I/O... Hit Ctrl-C to end.
-
-
-disk = 'sda'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 12       |****************************************|
-        32 -> 63         : 9        |******************************          |
-        64 -> 127        : 1        |***                                     |
-       128 -> 255        : 3        |**********                              |
-       256 -> 511        : 7        |***********************                 |
-       512 -> 1023       : 2        |******                                  |
-
-disk = 'sdc'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 0        |                                        |
-        32 -> 63         : 0        |                                        |
-        64 -> 127        : 41       |************                            |
-       128 -> 255        : 17       |*****                                   |
-       256 -> 511        : 13       |***                                     |
-       512 -> 1023       : 2        |                                        |
-      1024 -> 2047       : 0        |                                        |
-      2048 -> 4095       : 0        |                                        |
-      4096 -> 8191       : 56       |*****************                       |
-      8192 -> 16383      : 131      |****************************************|
-     16384 -> 32767      : 9        |**                                      |
-
-
-

In this case most ios on the data drive (sdc) are fast, but many take -between 8 and 16 milliseconds.

-

Finally biosnoop (examples) -can be used to dive even deeper and see per IO latencies:

-
$ sudo /usr/share/bcc/tools/biosnoop | grep java | head
-0.000000000    java           17427  sdc     R  3972458600 4096      13.58
-0.000818000    java           17427  sdc     R  3972459408 4096       0.35
-0.007098000    java           17416  sdc     R  3972401824 4096       5.81
-0.007896000    java           17416  sdc     R  3972489960 4096       0.34
-0.008920000    java           17416  sdc     R  3972489896 4096       0.34
-0.009487000    java           17427  sdc     R  3972401880 4096       0.32
-0.010238000    java           17416  sdc     R  3972488368 4096       0.37
-0.010596000    java           17427  sdc     R  3972488376 4096       0.34
-0.011236000    java           17410  sdc     R  3972488424 4096       0.32
-0.011825000    java           17427  sdc     R  3972488576 16384      0.65
-... time passes
-8.032687000    java           18279  sdc     R  10899712  122880     3.01
-8.033175000    java           18279  sdc     R  10899952  8192       0.46
-8.073295000    java           18279  sdc     R  23384320  122880     3.01
-8.073768000    java           18279  sdc     R  23384560  8192       0.46
-
-
-

With biosnoop you see every single IO and how long they take. This data -can be used to construct the latency distributions in biolatency but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (128kb) of read_ahead_kb. To improve point read -performance you may may want to decrease read_ahead_kb on fast data volumes -such as SSDs while keeping the a higher value like 128kb value is probably -right for HDs. There are tradeoffs involved, see queue-sysfs docs for more -information, but regardless biosnoop is useful for understanding how -Cassandra uses drives.

-
-
-

vmtouch

-

Sometimes it’s useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -vmtouch.

-

First install it:

-
$ git clone https://github.com/hoytech/vmtouch.git
-$ cd vmtouch
-$ make
-
-
-

Then run it on the Cassandra data directory:

-
$ ./vmtouch /var/lib/cassandra/data/
-           Files: 312
-     Directories: 92
-  Resident Pages: 62503/64308  244M/251M  97.2%
-         Elapsed: 0.005657 seconds
-
-
-

In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn’t really matter unless reads are missing the -cache (per e.g. cachestat), in which case having -additional memory may help read performance.

-
-
-

CPU Flamegraphs

-

Cassandra often uses a lot of CPU, but telling what it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -CPU Flamegraphs -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a “compaction problem dropping -tombstones” or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -Java Flamegraphs.

-

Generally:

-
    -
  1. Enable the -XX:+PreserveFramePointer option in Cassandra’s -jvm.options configuation file. This has a negligible performance impact -but allows you actually see what Cassandra is doing.
  2. -
  3. Run perf to get some data.
  4. -
  5. Send that data through the relevant scripts in the FlameGraph toolset and -convert the data into a pretty flamegraph. View the resulting SVG image in -a browser or other image browser.
  6. -
-

For example just cloning straight off github we first install the -perf-map-agent to the location of our JVMs (assumed to be -/usr/lib/jvm):

-
$ sudo bash
-$ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
-$ cd /usr/lib/jvm
-$ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent
-$ cd perf-map-agent
-$ cmake .
-$ make
-
-
-

Now to get a flamegraph:

-
$ git clone --depth=1 https://github.com/brendangregg/FlameGraph
-$ sudo bash
-$ cd FlameGraph
-$ # Record traces of Cassandra and map symbols for all java processes
-$ perf record -F 49 -a -g -p <CASSANDRA PID> -- sleep 30; ./jmaps
-$ # Translate the data
-$ perf script > cassandra_stacks
-$ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \
-    ./flamegraph.pl --color=java --hash > cassandra_flames.svg
-
-
-

The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser.

-
-
-

Packet Capture

-

Sometimes you have to understand what queries a Cassandra node is performing -right now to troubleshoot an issue. For these times trusty packet capture -tools like tcpdump and Wireshark can be very helpful to dissect packet captures. -Wireshark even has native CQL support although it sometimes has -compatibility issues with newer Cassandra protocol releases.

-

To get a packet capture first capture some packets:

-
$ sudo tcpdump -U -s0 -i <INTERFACE> -w cassandra.pcap -n "tcp port 9042"
-
-
-

Now open it up with wireshark:

-
$ wireshark cassandra.pcap
-
-
-

If you don’t see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> Decode as -> select CQL from the -dropdown for port 9042.

-

If you don’t want to do this manually or use a GUI, you can also use something -like cqltrace to ease obtaining and -parsing CQL packet captures.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/.buildinfo b/src/doc/4.0-beta1/.buildinfo deleted file mode 100644 index 4b7630fa2..000000000 --- a/src/doc/4.0-beta1/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 2750660c14320a5bfbbf0b0c2a46bb84 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/src/doc/4.0-beta1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml b/src/doc/4.0-beta1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml deleted file mode 100644 index fc5db0814..000000000 --- a/src/doc/4.0-beta1/_downloads/073727311784b6e183b3e78dbd702329/stress-lwt-example.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Keyspace Name -keyspace: stresscql - -# The CQL for creating a keyspace (optional if it already exists) -# Would almost always be network topology unless running something locall -keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -# Table name -table: blogposts - -# The CQL for creating a table you wish to stress (optional if it already exists) -table_definition: | - CREATE TABLE blogposts ( - domain text, - published_date timeuuid, - url text, - author text, - title text, - body text, - PRIMARY KEY(domain, published_date) - ) WITH CLUSTERING ORDER BY (published_date DESC) - AND compaction = { 'class':'LeveledCompactionStrategy' } - AND comment='A table to hold blog posts' - -### Column Distribution Specifications ### - -columnspec: - - name: domain - size: gaussian(5..100) #domain names are relatively short - population: uniform(1..10M) #10M possible domains to pick from - - - name: published_date - cluster: fixed(1000) #under each domain we will have max 1000 posts - - - name: url - size: uniform(30..300) - - - name: title #titles shouldn't go beyond 200 chars - size: gaussian(10..200) - - - name: author - size: uniform(5..20) #author names should be short - - - name: body - size: gaussian(100..5000) #the body of the blog post can be long - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # Our partition key is the domain so only insert one per batch - - select: fixed(1)/1000 # We have 1000 posts per domain so 1/1000 will allow 1 post per batch - - batchtype: UNLOGGED # Unlogged batches - - -# -# A list of queries you wish to run against the schema -# -queries: - singlepost: - cql: select * from blogposts where domain = ? LIMIT 1 - fields: samerow - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow diff --git a/src/doc/4.0-beta1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml b/src/doc/4.0-beta1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml deleted file mode 100644 index 17161af27..000000000 --- a/src/doc/4.0-beta1/_downloads/0bad10109f737a1dc8fae9db51a00e36/stress-example.yaml +++ /dev/null @@ -1,44 +0,0 @@ -spacenam: example # idenitifier for this spec if running with multiple yaml files -keyspace: example - -# Would almost always be network topology unless running something locally -keyspace_definition: | - CREATE KEYSPACE example WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -table: staff_activities - -# The table under test. Start with a partition per staff member -# Is this a good idea? -table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when) - ) - -columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -insert: - # we only update a single partition in any given insert - partitions: fixed(1) - # we want to insert a single row per partition and we have between 20 and 500 - # rows per partition - select: fixed(1)/500 - batchtype: UNLOGGED # Single partition unlogged batches are essentially noops - -queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - diff --git a/src/doc/4.0-beta1/_images/Figure_1_backups.jpg b/src/doc/4.0-beta1/_images/Figure_1_backups.jpg deleted file mode 100644 index 160013d76..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_1_backups.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_1_data_model.jpg b/src/doc/4.0-beta1/_images/Figure_1_data_model.jpg deleted file mode 100644 index a3b330e7a..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_1_data_model.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_1_guarantees.jpg b/src/doc/4.0-beta1/_images/Figure_1_guarantees.jpg deleted file mode 100644 index 859342da5..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_1_guarantees.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_1_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_1_read_repair.jpg deleted file mode 100644 index d771550a4..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_1_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_2_data_model.jpg b/src/doc/4.0-beta1/_images/Figure_2_data_model.jpg deleted file mode 100644 index 7acdeac02..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_2_data_model.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_2_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_2_read_repair.jpg deleted file mode 100644 index 29a912b49..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_2_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_3_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_3_read_repair.jpg deleted file mode 100644 index f5cc1897e..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_3_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_4_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_4_read_repair.jpg deleted file mode 100644 index 25bdb347d..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_4_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_5_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_5_read_repair.jpg deleted file mode 100644 index d9c04857f..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_5_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/Figure_6_read_repair.jpg b/src/doc/4.0-beta1/_images/Figure_6_read_repair.jpg deleted file mode 100644 index 6bb4d1e32..000000000 Binary files a/src/doc/4.0-beta1/_images/Figure_6_read_repair.jpg and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_chebotko_logical.png b/src/doc/4.0-beta1/_images/data_modeling_chebotko_logical.png deleted file mode 100644 index e54b5f274..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_chebotko_logical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_chebotko_physical.png b/src/doc/4.0-beta1/_images/data_modeling_chebotko_physical.png deleted file mode 100644 index bfdaec552..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_chebotko_physical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_bucketing.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_bucketing.png deleted file mode 100644 index 8b53e38f9..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_bucketing.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_erd.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_erd.png deleted file mode 100644 index e86fe68f3..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_erd.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_logical.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_logical.png deleted file mode 100644 index e920f1248..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_logical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_physical.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_physical.png deleted file mode 100644 index 2d20a6ddb..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_physical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_queries.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_queries.png deleted file mode 100644 index 2434db39d..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_queries.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_hotel_relational.png b/src/doc/4.0-beta1/_images/data_modeling_hotel_relational.png deleted file mode 100644 index 43e784eea..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_hotel_relational.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_reservation_logical.png b/src/doc/4.0-beta1/_images/data_modeling_reservation_logical.png deleted file mode 100644 index 0460633b6..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_reservation_logical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/data_modeling_reservation_physical.png b/src/doc/4.0-beta1/_images/data_modeling_reservation_physical.png deleted file mode 100644 index 1e6e76c16..000000000 Binary files a/src/doc/4.0-beta1/_images/data_modeling_reservation_physical.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_commit.png b/src/doc/4.0-beta1/_images/docs_commit.png deleted file mode 100644 index d90d96a88..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_commit.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_create_branch.png b/src/doc/4.0-beta1/_images/docs_create_branch.png deleted file mode 100644 index a04cb54f3..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_create_branch.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_create_file.png b/src/doc/4.0-beta1/_images/docs_create_file.png deleted file mode 100644 index b51e37035..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_create_file.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_editor.png b/src/doc/4.0-beta1/_images/docs_editor.png deleted file mode 100644 index 5b9997bcc..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_editor.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_fork.png b/src/doc/4.0-beta1/_images/docs_fork.png deleted file mode 100644 index 20a592a98..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_fork.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_pr.png b/src/doc/4.0-beta1/_images/docs_pr.png deleted file mode 100644 index 211eb25ef..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_pr.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/docs_preview.png b/src/doc/4.0-beta1/_images/docs_preview.png deleted file mode 100644 index 207f0ac43..000000000 Binary files a/src/doc/4.0-beta1/_images/docs_preview.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug0.png b/src/doc/4.0-beta1/_images/eclipse_debug0.png deleted file mode 100644 index 79fc5fd5b..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug0.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug1.png b/src/doc/4.0-beta1/_images/eclipse_debug1.png deleted file mode 100644 index 87b8756a3..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug1.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug2.png b/src/doc/4.0-beta1/_images/eclipse_debug2.png deleted file mode 100644 index df4eddbd7..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug2.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug3.png b/src/doc/4.0-beta1/_images/eclipse_debug3.png deleted file mode 100644 index 23178142c..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug3.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug4.png b/src/doc/4.0-beta1/_images/eclipse_debug4.png deleted file mode 100644 index 5063d4891..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug4.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug5.png b/src/doc/4.0-beta1/_images/eclipse_debug5.png deleted file mode 100644 index ab68e68a3..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug5.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/eclipse_debug6.png b/src/doc/4.0-beta1/_images/eclipse_debug6.png deleted file mode 100644 index 61ef30bfe..000000000 Binary files a/src/doc/4.0-beta1/_images/eclipse_debug6.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/example-stress-graph.png b/src/doc/4.0-beta1/_images/example-stress-graph.png deleted file mode 100644 index a65b08b16..000000000 Binary files a/src/doc/4.0-beta1/_images/example-stress-graph.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_images/hints.svg b/src/doc/4.0-beta1/_images/hints.svg deleted file mode 100644 index 5e952e796..000000000 --- a/src/doc/4.0-beta1/_images/hints.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/src/doc/4.0-beta1/_images/ring.svg b/src/doc/4.0-beta1/_images/ring.svg deleted file mode 100644 index d0db8c579..000000000 --- a/src/doc/4.0-beta1/_images/ring.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - ... - diff --git a/src/doc/4.0-beta1/_images/vnodes.svg b/src/doc/4.0-beta1/_images/vnodes.svg deleted file mode 100644 index 71b4fa2d8..000000000 --- a/src/doc/4.0-beta1/_images/vnodes.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - diff --git a/src/doc/4.0-beta1/_sources/architecture/dynamo.rst.txt b/src/doc/4.0-beta1/_sources/architecture/dynamo.rst.txt deleted file mode 100644 index 5b17d9a7c..000000000 --- a/src/doc/4.0-beta1/_sources/architecture/dynamo.rst.txt +++ /dev/null @@ -1,537 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dynamo -====== - -Apache Cassandra relies on a number of techniques from Amazon's `Dynamo -`_ -distributed storage key-value system. Each node in the Dynamo system has three -main components: - -- Request coordination over a partitioned dataset -- Ring membership and failure detection -- A local persistence (storage) engine - -Cassandra primarily draws from the first two clustering components, -while using a storage engine based on a Log Structured Merge Tree -(`LSM `_). -In particular, Cassandra relies on Dynamo style: - -- Dataset partitioning using consistent hashing -- Multi-master replication using versioned data and tunable consistency -- Distributed cluster membership and failure detection via a gossip protocol -- Incremental scale-out on commodity hardware - -Cassandra was designed this way to meet large-scale (PiB+) business-critical -storage requirements. In particular, as applications demanded full global -replication of petabyte scale datasets along with always available low-latency -reads and writes, it became imperative to design a new kind of database model -as the relational database systems of the time struggled to meet the new -requirements of global scale applications. - -Dataset Partitioning: Consistent Hashing ----------------------------------------- - -Cassandra achieves horizontal scalability by -`partitioning `_ -all data stored in the system using a hash function. Each partition is replicated -to multiple physical nodes, often across failure domains such as racks and even -datacenters. As every replica can independently accept mutations to every key -that it owns, every key must be versioned. Unlike in the original Dynamo paper -where deterministic versions and vector clocks were used to reconcile concurrent -updates to a key, Cassandra uses a simpler last write wins model where every -mutation is timestamped (including deletes) and then the latest version of data -is the "winning" value. Formally speaking, Cassandra uses a Last-Write-Wins Element-Set -conflict-free replicated data type for each CQL row (a.k.a `LWW-Element-Set CRDT -`_) -to resolve conflicting mutations on replica sets. - - .. _consistent-hashing-token-ring: - -Consistent Hashing using a Token Ring -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra partitions data over storage nodes using a special form of hashing -called `consistent hashing `_. -In naive data hashing, you typically allocate keys to buckets by taking a hash -of the key modulo the number of buckets. For example, if you want to distribute -data to 100 nodes using naive hashing you might assign every node to a bucket -between 0 and 100, hash the input key modulo 100, and store the data on the -associated bucket. In this naive scheme, however, adding a single node might -invalidate almost all of the mappings. - -Cassandra instead maps every node to one or more tokens on a continuous hash -ring, and defines ownership by hashing a key onto the ring and then "walking" -the ring in one direction, similar to the `Chord -`_ -algorithm. The main difference of consistent hashing to naive data hashing is -that when the number of nodes (buckets) to hash into changes, consistent -hashing only has to move a small fraction of the keys. - -For example, if we have an eight node cluster with evenly spaced tokens, and -a replication factor (RF) of 3, then to find the owning nodes for a key we -first hash that key to generate a token (which is just the hash of the key), -and then we "walk" the ring in a clockwise fashion until we encounter three -distinct nodes, at which point we have found all the replicas of that key. -This example of an eight node cluster with `RF=3` can be visualized as follows: - -.. figure:: images/ring.svg - :scale: 75 % - :alt: Dynamo Ring - -You can see that in a Dynamo like system, ranges of keys, also known as **token -ranges**, map to the same physical set of nodes. In this example, all keys that -fall in the token range excluding token 1 and including token 2 (`range(t1, t2]`) -are stored on nodes 2, 3 and 4. - -Multiple Tokens per Physical Node (a.k.a. `vnodes`) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Simple single token consistent hashing works well if you have many physical -nodes to spread data over, but with evenly spaced tokens and a small number of -physical nodes, incremental scaling (adding just a few nodes of capacity) is -difficult because there are no token selections for new nodes that can leave -the ring balanced. Cassandra seeks to avoid token imbalance because uneven -token ranges lead to uneven request load. For example, in the previous example -there is no way to add a ninth token without causing imbalance; instead we -would have to insert ``8`` tokens in the midpoints of the existing ranges. - -The Dynamo paper advocates for the use of "virtual nodes" to solve this -imbalance problem. Virtual nodes solve the problem by assigning multiple -tokens in the token ring to each physical node. By allowing a single physical -node to take multiple positions in the ring, we can make small clusters look -larger and therefore even with a single physical node addition we can make it -look like we added many more nodes, effectively taking many smaller pieces of -data from more ring neighbors when we add even a single node. - -Cassandra introduces some nomenclature to handle these concepts: - -- **Token**: A single position on the `dynamo` style hash ring. -- **Endpoint**: A single physical IP and port on the network. -- **Host ID**: A unique identifier for a single "physical" node, usually - present at one `Endpoint` and containing one or more `Tokens`. -- **Virtual Node** (or **vnode**): A `Token` on the hash ring owned by the same - physical node, one with the same `Host ID`. - -The mapping of **Tokens** to **Endpoints** gives rise to the **Token Map** -where Cassandra keeps track of what ring positions map to which physical -endpoints. For example, in the following figure we can represent an eight node -cluster using only four physical nodes by assigning two tokens to every node: - -.. figure:: images/vnodes.svg - :scale: 75 % - :alt: Virtual Tokens Ring - - -Multiple tokens per physical node provide the following benefits: - -1. When a new node is added it accepts approximately equal amounts of data from - other nodes in the ring, resulting in equal distribution of data across the - cluster. -2. When a node is decommissioned, it loses data roughly equally to other members - of the ring, again keeping equal distribution of data across the cluster. -3. If a node becomes unavailable, query load (especially token aware query load), - is evenly distributed across many other nodes. - -Multiple tokens, however, can also have disadvantages: - -1. Every token introduces up to ``2 * (RF - 1)`` additional neighbors on the - token ring, which means that there are more combinations of node failures - where we lose availability for a portion of the token ring. The more tokens - you have, `the higher the probability of an outage - `_. -2. Cluster-wide maintenance operations are often slowed. For example, as the - number of tokens per node is increased, the number of discrete repair - operations the cluster must do also increases. -3. Performance of operations that span token ranges could be affected. - -Note that in Cassandra ``2.x``, the only token allocation algorithm available -was picking random tokens, which meant that to keep balance the default number -of tokens per node had to be quite high, at ``256``. This had the effect of -coupling many physical endpoints together, increasing the risk of -unavailability. That is why in ``3.x +`` the new deterministic token allocator -was added which intelligently picks tokens such that the ring is optimally -balanced while requiring a much lower number of tokens per physical node. - - -Multi-master Replication: Versioned Data and Tunable Consistency ----------------------------------------------------------------- - -Cassandra replicates every partition of data to many nodes across the cluster -to maintain high availability and durability. When a mutation occurs, the -coordinator hashes the partition key to determine the token range the data -belongs to and then replicates the mutation to the replicas of that data -according to the :ref:`Replication Strategy `. - -All replication strategies have the notion of a **replication factor** (``RF``), -which indicates to Cassandra how many copies of the partition should exist. -For example with a ``RF=3`` keyspace, the data will be written to three -distinct **replicas**. Replicas are always chosen such that they are distinct -physical nodes which is achieved by skipping virtual nodes if needed. -Replication strategies may also choose to skip nodes present in the same failure -domain such as racks or datacenters so that Cassandra clusters can tolerate -failures of whole racks and even datacenters of nodes. - -.. _replication-strategy: - -Replication Strategy -^^^^^^^^^^^^^^^^^^^^ - -Cassandra supports pluggable **replication strategies**, which determine which -physical nodes act as replicas for a given token range. Every keyspace of -data has its own replication strategy. All production deployments should use -the :ref:`network-topology-strategy` while the :ref:`simple-strategy` replication -strategy is useful only for testing clusters where you do not yet know the -datacenter layout of the cluster. - -.. _network-topology-strategy: - -``NetworkTopologyStrategy`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``NetworkTopologyStrategy`` allows a replication factor to be specified for each -datacenter in the cluster. Even if your cluster only uses a single datacenter, -``NetworkTopologyStrategy`` should be preferred over ``SimpleStrategy`` to make it -easier to add new physical or virtual datacenters to the cluster later. - -In addition to allowing the replication factor to be specified individually by -datacenter, ``NetworkTopologyStrategy`` also attempts to choose replicas within a -datacenter from different racks as specified by the :ref:`Snitch `. If -the number of racks is greater than or equal to the replication factor for the -datacenter, each replica is guaranteed to be chosen from a different rack. -Otherwise, each rack will hold at least one replica, but some racks may hold -more than one. Note that this rack-aware behavior has some potentially -`surprising implications -`_. For example, if -there are not an even number of nodes in each rack, the data load on the -smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a brand new rack, it will be considered a replica for the entire ring. -For this reason, many operators choose to configure all nodes in a single -availability zone or similar failure domain as a single "rack". - -.. _simple-strategy: - -``SimpleStrategy`` -~~~~~~~~~~~~~~~~~~ - -``SimpleStrategy`` allows a single integer ``replication_factor`` to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if ``replication_factor`` is 3, then three different nodes should store -a copy of each row. - -``SimpleStrategy`` treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until ``replication_factor`` distinct nodes have been added to the set of replicas. - -.. _transient-replication: - -Transient Replication -~~~~~~~~~~~~~~~~~~~~~ - -Transient replication is an experimental feature in Cassandra 4.0 not present -in the original Dynamo paper. It allows you to configure a subset of replicas -to only replicate data that hasn't been incrementally repaired. This allows you -to decouple data redundancy from availability. For instance, if you have a -keyspace replicated at rf 3, and alter it to rf 5 with 2 transient replicas, -you go from being able to tolerate one failed replica to being able to tolerate -two, without corresponding increase in storage usage. This is because 3 nodes -will replicate all the data for a given token range, and the other 2 will only -replicate data that hasn't been incrementally repaired. - -To use transient replication, you first need to enable it in -``cassandra.yaml``. Once enabled, both ``SimpleStrategy`` and -``NetworkTopologyStrategy`` can be configured to transiently replicate data. -You configure it by specifying replication factor as -``/` in the read path and -`Hinted handoff ` in the write path. - -These techniques are only best-effort, however, and to guarantee eventual -consistency Cassandra implements `anti-entropy repair ` where replicas -calculate hierarchical hash-trees over their datasets called `Merkle Trees -`_ that can then be compared across -replicas to identify mismatched data. Like the original Dynamo paper Cassandra -supports "full" repairs where replicas hash their entire dataset, create Merkle -trees, send them to each other and sync any ranges that don't match. - -Unlike the original Dynamo paper, Cassandra also implements sub-range repair -and incremental repair. Sub-range repair allows Cassandra to increase the -resolution of the hash trees (potentially down to the single partition level) -by creating a larger number of trees that span only a portion of the data -range. Incremental repair allows Cassandra to only repair the partitions that -have changed since the last repair. - -Tunable Consistency -^^^^^^^^^^^^^^^^^^^ - -Cassandra supports a per-operation tradeoff between consistency and -availability through **Consistency Levels**. Cassandra's consistency levels -are a version of Dynamo's ``R + W > N`` consistency mechanism where operators -could configure the number of nodes that must participate in reads (``R``) -and writes (``W``) to be larger than the replication factor (``N``). In -Cassandra, you instead choose from a menu of common consistency levels which -allow the operator to pick ``R`` and ``W`` behavior without knowing the -replication factor. Generally writes will be visible to subsequent reads when -the read consistency level contains enough nodes to guarantee a quorum intersection -with the write consistency level. - -The following consistency levels are available: - -``ONE`` - Only a single replica must respond. - -``TWO`` - Two replicas must respond. - -``THREE`` - Three replicas must respond. - -``QUORUM`` - A majority (n/2 + 1) of the replicas must respond. - -``ALL`` - All of the replicas must respond. - -``LOCAL_QUORUM`` - A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond. - -``EACH_QUORUM`` - A majority of the replicas in each datacenter must respond. - -``LOCAL_ONE`` - Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not - sent to replicas in a remote datacenter. - -``ANY`` - A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later - attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for - write operations. - -Write operations **are always sent to all replicas**, regardless of consistency -level. The consistency level simply controls how many responses the coordinator -waits for before responding to the client. - -For read operations, the coordinator generally only issues read commands to -enough replicas to satisfy the consistency level. The one exception to this is -when speculative retry may issue a redundant read request to an extra replica -if the original replicas have not responded within a specified time window. - -Picking Consistency Levels -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is common to pick read and write consistency levels such that the replica -sets overlap, resulting in all acknowledged writes being visible to subsequent -reads. This is typically expressed in the same terms Dynamo does, in that ``W + -R > RF``, where ``W`` is the write consistency level, ``R`` is the read -consistency level, and ``RF`` is the replication factor. For example, if ``RF -= 3``, a ``QUORUM`` request will require responses from at least ``2/3`` -replicas. If ``QUORUM`` is used for both writes and reads, at least one of the -replicas is guaranteed to participate in *both* the write and the read request, -which in turn guarantees that the quorums will overlap and the write will be -visible to the read. - -In a multi-datacenter environment, ``LOCAL_QUORUM`` can be used to provide a -weaker but still useful guarantee: reads are guaranteed to see the latest write -from within the same datacenter. This is often sufficient as clients homed to -a single datacenter will read their own writes. - -If this type of strong consistency isn't required, lower consistency levels -like ``LOCAL_ONE`` or ``ONE`` may be used to improve throughput, latency, and -availability. With replication spanning multiple datacenters, ``LOCAL_ONE`` is -typically less available than ``ONE`` but is faster as a rule. Indeed ``ONE`` -will succeed if a single replica is available in any datacenter. - -Distributed Cluster Membership and Failure Detection ----------------------------------------------------- - -The replication protocols and dataset partitioning rely on knowing which nodes -are alive and dead in the cluster so that write and read operations can be -optimally routed. In Cassandra liveness information is shared in a distributed -fashion through a failure detection mechanism based on a gossip protocol. - -.. _gossip: - -Gossip -^^^^^^ - -Gossip is how Cassandra propagates basic cluster bootstrapping information such -as endpoint membership and internode network protocol versions. In Cassandra's -gossip system, nodes exchange state information not only about themselves but -also about other nodes they know about. This information is versioned with a -vector clock of ``(generation, version)`` tuples, where the generation is a -monotonic timestamp and version is a logical clock the increments roughly every -second. These logical clocks allow Cassandra gossip to ignore old versions of -cluster state just by inspecting the logical clocks presented with gossip -messages. - -Every node in the Cassandra cluster runs the gossip task independently and -periodically. Every second, every node in the cluster: - -1. Updates the local node's heartbeat state (the version) and constructs the - node's local view of the cluster gossip endpoint state. -2. Picks a random other node in the cluster to exchange gossip endpoint state - with. -3. Probabilistically attempts to gossip with any unreachable nodes (if one exists) -4. Gossips with a seed node if that didn't happen in step 2. - -When an operator first bootstraps a Cassandra cluster they designate certain -nodes as "seed" nodes. Any node can be a seed node and the only difference -between seed and non-seed nodes is seed nodes are allowed to bootstrap into the -ring without seeing any other seed nodes. Furthermore, once a cluster is -bootstrapped, seed nodes become "hotspots" for gossip due to step 4 above. - -As non-seed nodes must be able to contact at least one seed node in order to -bootstrap into the cluster, it is common to include multiple seed nodes, often -one for each rack or datacenter. Seed nodes are often chosen using existing -off-the-shelf service discovery mechanisms. - -.. note:: - Nodes do not have to agree on the seed nodes, and indeed once a cluster is - bootstrapped, newly launched nodes can be configured to use any existing - nodes as "seeds". The only advantage to picking the same nodes as seeds - is it increases their usefullness as gossip hotspots. - -Currently, gossip also propagates token metadata and schema *version* -information. This information forms the control plane for scheduling data -movements and schema pulls. For example, if a node sees a mismatch in schema -version in gossip state, it will schedule a schema sync task with the other -nodes. As token information propagates via gossip it is also the control plane -for teaching nodes which endpoints own what data. - -Ring Membership and Failure Detection -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Gossip forms the basis of ring membership, but the **failure detector** -ultimately makes decisions about if nodes are ``UP`` or ``DOWN``. Every node in -Cassandra runs a variant of the `Phi Accrual Failure Detector -`_, -in which every node is constantly making an independent decision of if their -peer nodes are available or not. This decision is primarily based on received -heartbeat state. For example, if a node does not see an increasing heartbeat -from a node for a certain amount of time, the failure detector "convicts" that -node, at which point Cassandra will stop routing reads to it (writes will -typically be written to hints). If/when the node starts heartbeating again, -Cassandra will try to reach out and connect, and if it can open communication -channels it will mark that node as available. - -.. note:: - UP and DOWN state are local node decisions and are not propagated with - gossip. Heartbeat state is propagated with gossip, but nodes will not - consider each other as "UP" until they can successfully message each other - over an actual network channel. - -Cassandra will never remove a node from gossip state without explicit -instruction from an operator via a decommission operation or a new node -bootstrapping with a ``replace_address_first_boot`` option. This choice is -intentional to allow Cassandra nodes to temporarily fail without causing data -to needlessly re-balance. This also helps to prevent simultaneous range -movements, where multiple replicas of a token range are moving at the same -time, which can violate monotonic consistency and can even cause data loss. - -Incremental Scale-out on Commodity Hardware --------------------------------------------- - -Cassandra scales-out to meet the requirements of growth in data size and -request rates. Scaling-out means adding additional nodes to the ring, and -every additional node brings linear improvements in compute and storage. In -contrast, scaling-up implies adding more capacity to the existing database -nodes. Cassandra is also capable of scale-up, and in certain environments it -may be preferable depending on the deployment. Cassandra gives operators the -flexibility to chose either scale-out or scale-up. - -One key aspect of Dynamo that Cassandra follows is to attempt to run on -commodity hardware, and many engineering choices are made under this -assumption. For example, Cassandra assumes nodes can fail at any time, -auto-tunes to make the best use of CPU and memory resources available and makes -heavy use of advanced compression and caching techniques to get the most -storage out of limited memory and storage capabilities. - -Simple Query Model -^^^^^^^^^^^^^^^^^^ - -Cassandra, like Dynamo, chooses not to provide cross-partition transactions -that are common in SQL Relational Database Management Systems (RDBMS). This -both gives the programmer a simpler read and write API, and allows Cassandra to -more easily scale horizontally since multi-partition transactions spanning -multiple nodes are notoriously difficult to implement and typically very -latent. - -Instead, Cassanda chooses to offer fast, consistent, latency at any scale for -single partition operations, allowing retrieval of entire partitions or only -subsets of partitions based on primary key filters. Furthermore, Cassandra does -support single partition compare and swap functionality via the lightweight -transaction CQL API. - -Simple Interface for Storing Records -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra, in a slight departure from Dynamo, chooses a storage interface that -is more sophisticated then "simple key value" stores but significantly less -complex than SQL relational data models. Cassandra presents a wide-column -store interface, where partitions of data contain multiple rows, each of which -contains a flexible set of individually typed columns. Every row is uniquely -identified by the partition key and one or more clustering keys, and every row -can have as many columns as needed. - -This allows users to flexibly add new columns to existing datasets as new -requirements surface. Schema changes involve only metadata changes and run -fully concurrently with live workloads. Therefore, users can safely add columns -to existing Cassandra databases while remaining confident that query -performance will not degrade. diff --git a/src/doc/4.0-beta1/_sources/architecture/guarantees.rst.txt b/src/doc/4.0-beta1/_sources/architecture/guarantees.rst.txt deleted file mode 100644 index 3cff808ec..000000000 --- a/src/doc/4.0-beta1/_sources/architecture/guarantees.rst.txt +++ /dev/null @@ -1,76 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _guarantees: - -Guarantees -============== -Apache Cassandra is a highly scalable and reliable database. Cassandra is used in web based applications that serve large number of clients and the quantity of data processed is web-scale (Petabyte) large. Cassandra makes some guarantees about its scalability, availability and reliability. To fully understand the inherent limitations of a storage system in an environment in which a certain level of network partition failure is to be expected and taken into account when designing the system it is important to first briefly introduce the CAP theorem. - -What is CAP? -^^^^^^^^^^^^^ -According to the CAP theorem it is not possible for a distributed data store to provide more than two of the following guarantees simultaneously. - -- Consistency: Consistency implies that every read receives the most recent write or errors out -- Availability: Availability implies that every request receives a response. It is not guaranteed that the response contains the most recent write or data. -- Partition tolerance: Partition tolerance refers to the tolerance of a storage system to failure of a network partition. Even if some of the messages are dropped or delayed the system continues to operate. - -CAP theorem implies that when using a network partition, with the inherent risk of partition failure, one has to choose between consistency and availability and both cannot be guaranteed at the same time. CAP theorem is illustrated in Figure 1. - -.. figure:: Figure_1_guarantees.jpg - -Figure 1. CAP Theorem - -High availability is a priority in web based applications and to this objective Cassandra chooses Availability and Partition Tolerance from the CAP guarantees, compromising on data Consistency to some extent. - -Cassandra makes the following guarantees. - -- High Scalability -- High Availability -- Durability -- Eventual Consistency of writes to a single table -- Lightweight transactions with linearizable consistency -- Batched writes across multiple tables are guaranteed to succeed completely or not at all -- Secondary indexes are guaranteed to be consistent with their local replicas data - -High Scalability -^^^^^^^^^^^^^^^^^ -Cassandra is a highly scalable storage system in which nodes may be added/removed as needed. Using gossip-based protocol a unified and consistent membership list is kept at each node. - -High Availability -^^^^^^^^^^^^^^^^^^^ -Cassandra guarantees high availability of data by implementing a fault-tolerant storage system. Failure detection in a node is detected using a gossip-based protocol. - -Durability -^^^^^^^^^^^^ -Cassandra guarantees data durability by using replicas. Replicas are multiple copies of a data stored on different nodes in a cluster. In a multi-datacenter environment the replicas may be stored on different datacenters. If one replica is lost due to unrecoverable node/datacenter failure the data is not completely lost as replicas are still available. - -Eventual Consistency -^^^^^^^^^^^^^^^^^^^^^^ -Meeting the requirements of performance, reliability, scalability and high availability in production Cassandra is an eventually consistent storage system. Eventually consistent implies that all updates reach all replicas eventually. Divergent versions of the same data may exist temporarily but they are eventually reconciled to a consistent state. Eventual consistency is a tradeoff to achieve high availability and it involves some read and write latencies. - -Lightweight transactions with linearizable consistency -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Data must be read and written in a sequential order. Paxos consensus protocol is used to implement lightweight transactions. Paxos protocol implements lightweight transactions that are able to handle concurrent operations using linearizable consistency. Linearizable consistency is sequential consistency with real-time constraints and it ensures transaction isolation with compare and set (CAS) transaction. With CAS replica data is compared and data that is found to be out of date is set to the most consistent value. Reads with linearizable consistency allow reading the current state of the data, which may possibly be uncommitted, without making a new addition or update. - -Batched Writes -^^^^^^^^^^^^^^^ - -The guarantee for batched writes across multiple tables is that they will eventually succeed, or none will. Batch data is first written to batchlog system data, and when the batch data has been successfully stored in the cluster the batchlog data is removed. The batch is replicated to another node to ensure the full batch completes in the event the coordinator node fails. - -Secondary Indexes -^^^^^^^^^^^^^^^^^^ -A secondary index is an index on a column and is used to query a table that is normally not queryable. Secondary indexes when built are guaranteed to be consistent with their local replicas. diff --git a/src/doc/4.0-beta1/_sources/architecture/index.rst.txt b/src/doc/4.0-beta1/_sources/architecture/index.rst.txt deleted file mode 100644 index 58eda1377..000000000 --- a/src/doc/4.0-beta1/_sources/architecture/index.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Architecture -============ - -This section describes the general architecture of Apache Cassandra. - -.. toctree:: - :maxdepth: 2 - - overview - dynamo - storage_engine - guarantees - diff --git a/src/doc/4.0-beta1/_sources/architecture/overview.rst.txt b/src/doc/4.0-beta1/_sources/architecture/overview.rst.txt deleted file mode 100644 index e5fcbe3b5..000000000 --- a/src/doc/4.0-beta1/_sources/architecture/overview.rst.txt +++ /dev/null @@ -1,114 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _overview: - -Overview -======== - -Apache Cassandra is an open source, distributed, NoSQL database. It presents -a partitioned wide column storage model with eventually consistent semantics. - -Apache Cassandra was initially designed at `Facebook -`_ -using a staged event-driven architecture (`SEDA -`_) to implement a combination of -Amazon’s `Dynamo -`_ -distributed storage and replication techniques combined with Google's `Bigtable -`_ -data and storage engine model. Dynamo and Bigtable were both developed to meet -emerging requirements for scalable, reliable and highly available storage -systems, but each had areas that could be improved. - -Cassandra was designed as a best in class combination of both systems to meet -emerging large scale, both in data footprint and query volume, storage -requirements. As applications began to require full global replication and -always available low-latency reads and writes, it became imperative to design a -new kind of database model as the relational database systems of the time -struggled to meet the new requirements of global scale applications. - -Systems like Cassandra are designed for these challenges and seek the -following design objectives: - -- Full multi-master database replication -- Global availability at low latency -- Scaling out on commodity hardware -- Linear throughput increase with each additional processor -- Online load balancing and cluster growth -- Partitioned key-oriented queries -- Flexible schema - -Features --------- - -Cassandra provides the Cassandra Query Language (CQL), an SQL-like language, -to create and update database schema and access data. CQL allows users to -organize data within a cluster of Cassandra nodes using: - -- **Keyspace**: defines how a dataset is replicated, for example in which - datacenters and how many copies. Keyspaces contain tables. -- **Table**: defines the typed schema for a collection of partitions. Cassandra - tables have flexible addition of new columns to tables with zero downtime. - Tables contain partitions, which contain partitions, which contain columns. -- **Partition**: defines the mandatory part of the primary key all rows in - Cassandra must have. All performant queries supply the partition key in - the query. -- **Row**: contains a collection of columns identified by a unique primary key - made up of the partition key and optionally additional clustering keys. -- **Column**: A single datum with a type which belong to a row. - -CQL supports numerous advanced features over a partitioned dataset such as: - -- Single partition lightweight transactions with atomic compare and set - semantics. -- User-defined types, functions and aggregates -- Collection types including sets, maps, and lists. -- Local secondary indices -- (Experimental) materialized views - -Cassandra explicitly chooses not to implement operations that require cross -partition coordination as they are typically slow and hard to provide highly -available global semantics. For example Cassandra does not support: - -- Cross partition transactions -- Distributed joins -- Foreign keys or referential integrity. - -Operating ---------- - -Apache Cassandra configuration settings are configured in the ``cassandra.yaml`` -file that can be edited by hand or with the aid of configuration management tools. -Some settings can be manipulated live using an online interface, but others -require a restart of the database to take effect. - -Cassandra provides tools for managing a cluster. The ``nodetool`` command -interacts with Cassandra's live control interface, allowing runtime manipulation -of many settings from ``cassandra.yaml``. The ``auditlogviewer`` is used -to view the audit logs. The ``fqltool`` is used to view, replay and compare -full query logs. The ``auditlogviewer`` and ``fqltool`` are new tools in -Apache Cassandra 4.0. - -In addition, Cassandra supports out of the box atomic snapshot functionality, -which presents a point in time snapshot of Cassandra's data for easy -integration with many backup tools. Cassandra also supports incremental backups -where data can be backed up as it is written. - -Apache Cassandra 4.0 has added several new features including virtual tables. -transient replication, audit logging, full query logging, and support for Java -11. Two of these features are experimental: transient replication and Java 11 -support. diff --git a/src/doc/4.0-beta1/_sources/architecture/storage_engine.rst.txt b/src/doc/4.0-beta1/_sources/architecture/storage_engine.rst.txt deleted file mode 100644 index 23b738de7..000000000 --- a/src/doc/4.0-beta1/_sources/architecture/storage_engine.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Storage Engine --------------- - -.. _commit-log: - -CommitLog -^^^^^^^^^ - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables. - -All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the "commitlog_segment_size_in_mb" option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running "nodetool drain" before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup. - -- ``commitlog_segment_size_in_mb``: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. - -***NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*** - -*Default Value:* 32 - -Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied. - -- ``commitlog_sync``: may be either “periodic” or “batch.” - - - ``batch``: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait "commitlog_sync_batch_window_in_ms" milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason. - - - ``commitlog_sync_batch_window_in_ms``: Time to wait between "batch" fsyncs - *Default Value:* 2 - - - ``periodic``: In periodic mode, writes are immediately ack'ed, and the CommitLog is simply synced every "commitlog_sync_period_in_ms" milliseconds. - - - ``commitlog_sync_period_in_ms``: Time to wait between "periodic" fsyncs - *Default Value:* 10000 - -*Default Value:* batch - -*** NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using "batch" mode, it is recommended to store commitlogs in a separate, dedicated device.** - - -- ``commitlog_directory``: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -- ``commitlog_compression``: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported. - -(Default Value: (complex option):: - - # - class_name: LZ4Compressor - # parameters: - # - - -- ``commitlog_total_space_in_mb``: Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume. - -*Default Value:* 8192 - -.. _memtables: - -Memtables -^^^^^^^^^ - -Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable `SSTables`_. This can be triggered in several -ways: - -- The memory usage of the memtables exceeds the configured threshold (see ``memtable_cleanup_threshold``) -- The :ref:`commit-log` approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to - be freed - -Memtables may be stored entirely on-heap or partially off-heap, depending on ``memtable_allocation_type``. - -SSTables -^^^^^^^^ - -SSTables are the immutable data files that Cassandra uses for persisting data on disk. - -As SSTables are flushed to disk from :ref:`memtables` or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed. - -Each SSTable is comprised of multiple components stored in separate files: - -``Data.db`` - The actual data, i.e. the contents of rows. - -``Index.db`` - An index from partition keys to positions in the ``Data.db`` file. For wide partitions, this may also include an - index to rows within a partition. - -``Summary.db`` - A sampling of (by default) every 128th entry in the ``Index.db`` file. - -``Filter.db`` - A Bloom Filter of the partition keys in the SSTable. - -``CompressionInfo.db`` - Metadata about the offsets and lengths of compression chunks in the ``Data.db`` file. - -``Statistics.db`` - Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, - repair, compression, TTLs, and more. - -``Digest.crc32`` - A CRC-32 digest of the ``Data.db`` file. - -``TOC.txt`` - A plain text list of the component files for the SSTable. - -Within the ``Data.db`` file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, ``Murmur3Partition``, is used). Within a partition, rows are -stored in the order of their clustering keys. - -SSTables can be optionally compressed using block-based compression. - -SSTable Versions -^^^^^^^^^^^^^^^^ - -This section was created using the following -`gist `_ -which utilized this original -`source `_. - -The version numbers, to date are: - -Version 0 -~~~~~~~~~ - -* b (0.7.0): added version to sstable filenames -* c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings -* d (0.7.0): row size in data component becomes a long instead of int -* e (0.7.0): stores undecorated keys in data and index components -* f (0.7.0): switched bloom filter implementations in data component -* g (0.8): tracks flushed-at context in metadata component - -Version 1 -~~~~~~~~~ - -* h (1.0): tracks max client timestamp in metadata component -* hb (1.0.3): records compression ration in metadata component -* hc (1.0.4): records partitioner in metadata component -* hd (1.0.10): includes row tombstones in maxtimestamp -* he (1.1.3): includes ancestors generation in metadata component -* hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782) -* ia (1.2.0): - - * column indexes are promoted to the index file - * records estimated histogram of deletion times in tombstones - * bloom filter (keys and columns) upgraded to Murmur3 -* ib (1.2.1): tracks min client timestamp in metadata component -* ic (1.2.5): omits per-row bloom filter of column names - -Version 2 -~~~~~~~~~ - -* ja (2.0.0): - - * super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format) - * tracks max local deletiontime in sstable metadata - * records bloom_filter_fp_chance in metadata component - * remove data size and column count from data file (CASSANDRA-4180) - * tracks max/min column values (according to comparator) -* jb (2.0.1): - - * switch from crc32 to adler32 for compression checksums - * checksum the compressed data -* ka (2.1.0): - - * new Statistics.db file format - * index summaries can be downsampled and the sampling level is persisted - * switch uncompressed checksums to adler32 - * tracks presense of legacy (local and remote) counter shards -* la (2.2.0): new file name format -* lb (2.2.7): commit log lower bound included - -Version 3 -~~~~~~~~~ - -* ma (3.0.0): - - * swap bf hash order - * store rows natively -* mb (3.0.7, 3.7): commit log lower bound included -* mc (3.0.8, 3.9): commit log intervals included - -Example Code -~~~~~~~~~~~~ - -The following example is useful for finding all sstables that do not match the "ib" SSTable version - -.. code-block:: bash - - find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots" diff --git a/src/doc/4.0-beta1/_sources/bugs.rst.txt b/src/doc/4.0-beta1/_sources/bugs.rst.txt deleted file mode 100644 index 32d676f9d..000000000 --- a/src/doc/4.0-beta1/_sources/bugs.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Reporting Bugs -============== - -If you encounter a problem with Cassandra, the first places to ask for help are the :ref:`user mailing list -` and the ``cassandra`` :ref:`Slack room `. - -If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the `Apache Cassandra JIRA `__. Please provide as much -details as you can on your problem, and don't forget to indicate which version of Cassandra you are running and on which -environment. - -Further details on how to contribute can be found at our :doc:`development/index` section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path. diff --git a/src/doc/4.0-beta1/_sources/configuration/cassandra_config_file.rst.txt b/src/doc/4.0-beta1/_sources/configuration/cassandra_config_file.rst.txt deleted file mode 100644 index 32538d9de..000000000 --- a/src/doc/4.0-beta1/_sources/configuration/cassandra_config_file.rst.txt +++ /dev/null @@ -1,2075 +0,0 @@ -.. _cassandra-yaml: - -Cassandra Configuration File -============================ - -``cluster_name`` ----------------- -The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another. - -*Default Value:* 'Test Cluster' - -``num_tokens`` --------------- - -This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability. - -If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below. - -Specifying initial_token will override this setting on the node's initial start, -on subsequent starts, this setting will apply even if initial token is set. - -If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations - -*Default Value:* 256 - -``allocate_tokens_for_keyspace`` --------------------------------- -*This option is commented out by default.* - -Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor. - -The load assigned to each node will be close to proportional to its number of -vnodes. - -Only supported with the Murmur3Partitioner. - -Replica factor is determined via the replication strategy used by the specified -keyspace. - -*Default Value:* KEYSPACE - -``allocate_tokens_for_local_replication_factor`` ------------------------------------------------- -*This option is commented out by default.* - -Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS. - -*Default Value:* 3 - -``initial_token`` ------------------ -*This option is commented out by default.* - -initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) -- in which case you should provide a -comma-separated list -- it's primarily used when adding nodes to legacy clusters -that do not have vnodes enabled. - -``hinted_handoff_enabled`` --------------------------- - -See http://wiki.apache.org/cassandra/HintedHandoff -May either be "true" or "false" to enable globally - -*Default Value:* true - -``hinted_handoff_disabled_datacenters`` ---------------------------------------- -*This option is commented out by default.* - -When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff - -*Default Value (complex option)*:: - - # - DC1 - # - DC2 - -``max_hint_window_in_ms`` -------------------------- -this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again. - -*Default Value:* 10800000 # 3 hours - -``hinted_handoff_throttle_in_kb`` ---------------------------------- - -Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.) - -*Default Value:* 1024 - -``max_hints_delivery_threads`` ------------------------------- - -Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower - -*Default Value:* 2 - -``hints_directory`` -------------------- -*This option is commented out by default.* - -Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints. - -*Default Value:* /var/lib/cassandra/hints - -``hints_flush_period_in_ms`` ----------------------------- - -How often hints should be flushed from the internal buffers to disk. -Will *not* trigger fsync. - -*Default Value:* 10000 - -``max_hints_file_size_in_mb`` ------------------------------ - -Maximum size for a single hints file, in megabytes. - -*Default Value:* 128 - -``hints_compression`` ---------------------- -*This option is commented out by default.* - -Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``batchlog_replay_throttle_in_kb`` ----------------------------------- -Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster. - -*Default Value:* 1024 - -``authenticator`` ------------------ - -Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}. - -- AllowAllAuthenticator performs no checks - set it to disable authentication. -- PasswordAuthenticator relies on username/password pairs to authenticate - users. It keeps usernames and hashed passwords in system_auth.roles table. - Please increase system_auth keyspace replication factor if you use this authenticator. - If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) - -*Default Value:* AllowAllAuthenticator - -``authorizer`` --------------- - -Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}. - -- AllowAllAuthorizer allows any action to any user - set it to disable authorization. -- CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllAuthorizer - -``role_manager`` ----------------- - -Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable. - -- CassandraRoleManager stores role data in the system_auth keyspace. Please - increase system_auth keyspace replication factor if you use this role manager. - -*Default Value:* CassandraRoleManager - -``network_authorizer`` ----------------------- - -Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}. - -- AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization. -- CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please - increase system_auth keyspace replication factor if you use this authorizer. - -*Default Value:* AllowAllNetworkAuthorizer - -``roles_validity_in_ms`` ------------------------- - -Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator. - -*Default Value:* 2000 - -``roles_update_interval_in_ms`` -------------------------------- -*This option is commented out by default.* - -Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms. - -*Default Value:* 2000 - -``permissions_validity_in_ms`` ------------------------------- - -Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer. - -*Default Value:* 2000 - -``permissions_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms. - -*Default Value:* 2000 - -``credentials_validity_in_ms`` ------------------------------- - -Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching. - -*Default Value:* 2000 - -``credentials_update_interval_in_ms`` -------------------------------------- -*This option is commented out by default.* - -Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms. - -*Default Value:* 2000 - -``partitioner`` ---------------- - -The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using. - -The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value. - - -*Default Value:* org.apache.cassandra.dht.Murmur3Partitioner - -``data_file_directories`` -------------------------- -*This option is commented out by default.* - -Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data. - -*Default Value (complex option)*:: - - # - /var/lib/cassandra/data - -``commitlog_directory`` ------------------------ -*This option is commented out by default.* -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog. - -*Default Value:* /var/lib/cassandra/commitlog - -``cdc_enabled`` ---------------- - -Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory). - -*Default Value:* false - -``cdc_raw_directory`` ---------------------- -*This option is commented out by default.* - -CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw. - -*Default Value:* /var/lib/cassandra/cdc_raw - -``disk_failure_policy`` ------------------------ - -Policy for data disk failures: - -die - shut down gossip and client transports and kill the JVM for any fs errors or - single-sstable errors, so the node can be replaced. - -stop_paranoid - shut down gossip and client transports even for single-sstable errors, - kill the JVM for errors during startup. - -stop - shut down gossip and client transports, leaving the node effectively dead, but - can still be inspected via JMX, kill the JVM for errors during startup. - -best_effort - stop using the failed disk and respond to requests based on - remaining available sstables. This means you WILL see obsolete - data at CL.ONE! - -ignore - ignore fatal errors and let requests fail, as in pre-1.2 Cassandra - -*Default Value:* stop - -``commit_failure_policy`` -------------------------- - -Policy for commit disk failures: - -die - shut down the node and kill the JVM, so the node can be replaced. - -stop - shut down the node, leaving the node effectively dead, but - can still be inspected via JMX. - -stop_commit - shutdown the commit log, letting writes collect but - continuing to service reads, as in pre-2.0.5 Cassandra - -ignore - ignore fatal errors and let the batches fail - -*Default Value:* stop - -``prepared_statements_cache_size_mb`` -------------------------------------- - -Maximum size of the native protocol prepared statement cache - -Valid values are either "auto" (omitting the value) or a value greater 0. - -Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap. - -If you constantly see "prepared statements discarded in the last minute because -cache limit reached" messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts. - -Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty. - -Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater - -``key_cache_size_in_mb`` ------------------------- - -Maximum size of the key cache in memory. - -Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it's worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It's best to only use the -row cache if you have hot rows or static rows. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. - -``key_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 14400 or 4 hours. - -*Default Value:* 14400 - -``key_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``row_cache_class_name`` ------------------------- -*This option is commented out by default.* - -Row cache implementation class name. Available implementations: - -org.apache.cassandra.cache.OHCProvider - Fully off-heap row cache implementation (default). - -org.apache.cassandra.cache.SerializingCacheProvider - This is the row cache implementation availabile - in previous releases of Cassandra. - -*Default Value:* org.apache.cassandra.cache.OHCProvider - -``row_cache_size_in_mb`` ------------------------- - -Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap. - -Default value is 0, to disable row caching. - -*Default Value:* 0 - -``row_cache_save_period`` -------------------------- - -Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file. - -Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use. - -Default is 0 to disable saving the row cache. - -*Default Value:* 0 - -``row_cache_keys_to_save`` --------------------------- -*This option is commented out by default.* - -Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved - -*Default Value:* 100 - -``counter_cache_size_in_mb`` ----------------------------- - -Maximum size of the counter cache in memory. - -Counter cache helps to reduce counter locks' contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it's relatively cheap. - -NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. - -Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. - -``counter_cache_save_period`` ------------------------------ - -Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file. - -Default is 7200 or 2 hours. - -*Default Value:* 7200 - -``counter_cache_keys_to_save`` ------------------------------- -*This option is commented out by default.* - -Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved - -*Default Value:* 100 - -``saved_caches_directory`` --------------------------- -*This option is commented out by default.* - -saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. - -*Default Value:* /var/lib/cassandra/saved_caches - -``commitlog_sync_batch_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -commitlog_sync may be either "periodic", "group", or "batch." - -When in batch mode, Cassandra won't ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed. - - -*Default Value:* 2 - -``commitlog_sync_group_window_in_ms`` -------------------------------------- -*This option is commented out by default.* - -group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes. - - -*Default Value:* 1000 - -``commitlog_sync`` ------------------- - -the default option is "periodic" where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds. - -*Default Value:* periodic - -``commitlog_sync_period_in_ms`` -------------------------------- - -*Default Value:* 10000 - -``periodic_commitlog_sync_lag_block_in_ms`` -------------------------------------------- -*This option is commented out by default.* - -When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete. - -``commitlog_segment_size_in_mb`` --------------------------------- - -The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables. - -The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048. - -NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024 - - -*Default Value:* 32 - -``commitlog_compression`` -------------------------- -*This option is commented out by default.* - -Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported. - -*Default Value (complex option)*:: - - # - class_name: LZ4Compressor - # parameters: - # - - -``table`` ---------- -*This option is commented out by default.* -Compression to apply to SSTables as they flush for compressed tables. -Note that tables without compression enabled do not respect this flag. - -As high ratio compressors like LZ4HC, Zstd, and Deflate can potentially -block flushes for too long, the default is to flush with a known fast -compressor in those cases. Options are: - -none : Flush without compressing blocks but while still doing checksums. -fast : Flush with a fast compressor. If the table is already using a - fast compressor that compressor is used. - -*Default Value:* Always flush with the same compressor that the table uses. This - -``flush_compression`` ---------------------- -*This option is commented out by default.* - was the pre 4.0 behavior. - - -*Default Value:* fast - -``seed_provider`` ------------------ - -any class that implements the SeedProvider interface and has a -constructor that takes a Map of parameters will do. - -*Default Value (complex option)*:: - - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1:7000" - -``concurrent_reads`` --------------------- -For workloads with more data than can fit in memory, Cassandra's -bottleneck will be reads that need to fetch data from -disk. "concurrent_reads" should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -"concurrent_counter_writes", since counter writes read the current -values before incrementing and writing them back. - -On the other hand, since writes are almost never IO bound, the ideal -number of "concurrent_writes" is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb. - -*Default Value:* 32 - -``concurrent_writes`` ---------------------- - -*Default Value:* 32 - -``concurrent_counter_writes`` ------------------------------ - -*Default Value:* 32 - -``concurrent_materialized_view_writes`` ---------------------------------------- - -For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes. - -*Default Value:* 32 - -``file_cache_size_in_mb`` -------------------------- -*This option is commented out by default.* - -Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed. - -*Default Value:* 512 - -``buffer_pool_use_heap_if_exhausted`` -------------------------------------- -*This option is commented out by default.* - -Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - - -*Default Value:* true - -``disk_optimization_strategy`` ------------------------------- -*This option is commented out by default.* - -The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks) - -*Default Value:* ssd - -``memtable_heap_space_in_mb`` ------------------------------ -*This option is commented out by default.* - -Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap. - -*Default Value:* 2048 - -``memtable_offheap_space_in_mb`` --------------------------------- -*This option is commented out by default.* - -*Default Value:* 2048 - -``memtable_cleanup_threshold`` ------------------------------- -*This option is commented out by default.* - -memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information. - -Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load. - -memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) - -*Default Value:* 0.11 - -``memtable_allocation_type`` ----------------------------- - -Specify the way Cassandra allocates and manages memtable memory. -Options are: - -heap_buffers - on heap nio buffers - -offheap_buffers - off heap (direct) nio buffers - -offheap_objects - off heap objects - -*Default Value:* heap_buffers - -``repair_session_space_in_mb`` ------------------------------- -*This option is commented out by default.* - -Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair. - -For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. - - -``commitlog_total_space_in_mb`` -------------------------------- -*This option is commented out by default.* - -Total space to use for commit logs on disk. - -If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies. - -The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume. - - -*Default Value:* 8192 - -``memtable_flush_writers`` --------------------------- -*This option is commented out by default.* - -This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound. - -Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time. - -You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory. - -memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers. - -Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead. - -There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory. - - -*Default Value:* 2 - -``cdc_total_space_in_mb`` -------------------------- -*This option is commented out by default.* - -Total space to use for change-data-capture logs on disk. - -If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed. - -The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides. - -*Default Value:* 4096 - -``cdc_free_space_check_interval_ms`` ------------------------------------- -*This option is commented out by default.* - -When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms - -*Default Value:* 250 - -``index_summary_capacity_in_mb`` --------------------------------- - -A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory. - -``index_summary_resize_interval_in_minutes`` --------------------------------------------- - -How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level. - -*Default Value:* 60 - -``trickle_fsync`` ------------------ - -Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters. - -*Default Value:* false - -``trickle_fsync_interval_in_kb`` --------------------------------- - -*Default Value:* 10240 - -``storage_port`` ----------------- - -TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7000 - -``ssl_storage_port`` --------------------- - -SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 7001 - -``listen_address`` ------------------- - -Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate! - -Set listen_address OR listen_interface, not both. - -Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be). - -Setting listen_address to 0.0.0.0 is always wrong. - - -*Default Value:* localhost - -``listen_interface`` --------------------- -*This option is commented out by default.* - -Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth0 - -``listen_interface_prefer_ipv6`` --------------------------------- -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_address`` ---------------------- -*This option is commented out by default.* - -Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address - -*Default Value:* 1.2.3.4 - -``listen_on_broadcast_address`` -------------------------------- -*This option is commented out by default.* - -When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2. - -*Default Value:* false - -``internode_authenticator`` ---------------------------- -*This option is commented out by default.* - -Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes. - -*Default Value:* org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -``start_native_transport`` --------------------------- - -Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address. - -*Default Value:* true - -``native_transport_port`` -------------------------- -port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* 9042 - -``native_transport_port_ssl`` ------------------------------ -*This option is commented out by default.* -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted. - -*Default Value:* 9142 - -``native_transport_max_threads`` --------------------------------- -*This option is commented out by default.* -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting). - -*Default Value:* 128 - -``native_transport_max_frame_size_in_mb`` ------------------------------------------ -*This option is commented out by default.* - -The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you're changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. - -*Default Value:* 256 - -``native_transport_frame_block_size_in_kb`` -------------------------------------------- -*This option is commented out by default.* - -If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed. - -*Default Value:* 32 - -``native_transport_max_concurrent_connections`` ------------------------------------------------ -*This option is commented out by default.* - -The maximum number of concurrent client connections. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_max_concurrent_connections_per_ip`` ------------------------------------------------------- -*This option is commented out by default.* - -The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited. - -*Default Value:* -1 - -``native_transport_allow_older_protocols`` ------------------------------------------- - -Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored. - -*Default Value:* true - -``native_transport_idle_timeout_in_ms`` ---------------------------------------- -*This option is commented out by default.* - -Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period. - -Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side. - -Idle connection timeouts are disabled by default. - -*Default Value:* 60000 - -``rpc_address`` ---------------- - -The address or interface to bind the native transport server to. - -Set rpc_address OR rpc_interface, not both. - -Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node). - -Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0. - -For security reasons, you should not expose this port to the internet. Firewall it if needed. - -*Default Value:* localhost - -``rpc_interface`` ------------------ -*This option is commented out by default.* - -Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported. - -*Default Value:* eth1 - -``rpc_interface_prefer_ipv6`` ------------------------------ -*This option is commented out by default.* - -If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. - -*Default Value:* false - -``broadcast_rpc_address`` -------------------------- -*This option is commented out by default.* - -RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set. - -*Default Value:* 1.2.3.4 - -``rpc_keepalive`` ------------------ - -enable or disable keepalive on rpc/native connections - -*Default Value:* true - -``internode_send_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and 'man tcp' - -``internode_recv_buff_size_in_bytes`` -------------------------------------- -*This option is commented out by default.* - -Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem - -``incremental_backups`` ------------------------ - -Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator's -responsibility. - -*Default Value:* false - -``snapshot_before_compaction`` ------------------------------- - -Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won't clean up the -snapshots for you. Mostly useful if you're paranoid when there -is a data format change. - -*Default Value:* false - -``auto_snapshot`` ------------------ - -Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop. - -*Default Value:* true - -``column_index_size_in_kb`` ---------------------------- - -Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these: - -- a smaller granularity means more index entries are generated - and looking up rows withing the partition by collation column - is faster -- but, Cassandra will keep the collation index in memory for hot - rows (as part of the key cache), so a larger granularity means - you can cache more hot rows - -*Default Value:* 64 - -``column_index_cache_size_in_kb`` ---------------------------------- - -Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk. - -Note that this size refers to the size of the -serialized index information and not the size of the partition. - -*Default Value:* 2 - -``concurrent_compactors`` -------------------------- -*This option is commented out by default.* - -Number of simultaneous compactions to allow, NOT including -validation "compactions" for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first. - -concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8. - -If your data directories are backed by SSD, you should increase this -to the number of cores. - -*Default Value:* 1 - -``concurrent_validations`` --------------------------- -*This option is commented out by default.* - -Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default) - -*Default Value:* 0 - -``concurrent_materialized_view_builders`` ------------------------------------------ - -Number of simultaneous materialized view builder tasks to allow. - -*Default Value:* 1 - -``compaction_throughput_mb_per_sec`` ------------------------------------- - -Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction. - -*Default Value:* 16 - -``sstable_preemptive_open_interval_in_mb`` ------------------------------------------- - -When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot - -*Default Value:* 50 - -``stream_entire_sstables`` --------------------------- -*This option is commented out by default.* - -When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696. - -*Default Value:* true - -``stream_throughput_outbound_megabits_per_sec`` ------------------------------------------------ -*This option is commented out by default.* - -Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s. - -*Default Value:* 200 - -``inter_dc_stream_throughput_outbound_megabits_per_sec`` --------------------------------------------------------- -*This option is commented out by default.* - -Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s - -*Default Value:* 200 - -``read_request_timeout_in_ms`` ------------------------------- - -How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``range_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``write_request_timeout_in_ms`` -------------------------------- -How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 2000 - -``counter_write_request_timeout_in_ms`` ---------------------------------------- -How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms. - -*Default Value:* 5000 - -``cas_contention_timeout_in_ms`` --------------------------------- -How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms. - -*Default Value:* 1000 - -``truncate_request_timeout_in_ms`` ----------------------------------- -How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms. - -*Default Value:* 60000 - -``request_timeout_in_ms`` -------------------------- -The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms. - -*Default Value:* 10000 - -``internode_application_send_queue_capacity_in_bytes`` ------------------------------------------------------- -*This option is commented out by default.* - -Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details. - -The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000 - -The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000 - -The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000 - -Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received. - -The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth. - -The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster. - - -*Default Value:* 4194304 #4MiB - -``internode_application_send_queue_reserve_endpoint_capacity_in_bytes`` ------------------------------------------------------------------------ -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_send_queue_reserve_global_capacity_in_bytes`` ---------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``internode_application_receive_queue_capacity_in_bytes`` ---------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 4194304 #4MiB - -``internode_application_receive_queue_reserve_endpoint_capacity_in_bytes`` --------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 134217728 #128MiB - -``internode_application_receive_queue_reserve_global_capacity_in_bytes`` ------------------------------------------------------------------------- -*This option is commented out by default.* - -*Default Value:* 536870912 #512MiB - -``slow_query_log_timeout_in_ms`` --------------------------------- - - -How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging. - -*Default Value:* 500 - -``cross_node_timeout`` ----------------------- -*This option is commented out by default.* - -Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests. - -Warning: It is generally assumed that users have setup NTP on their clusters, and that clocks are modestly in sync, -since this is a requirement for general correctness of last write wins. - -*Default Value:* true - -``streaming_keep_alive_period_in_secs`` ---------------------------------------- -*This option is commented out by default.* - -Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default - -*Default Value:* 300 - -``streaming_connections_per_host`` ----------------------------------- -*This option is commented out by default.* - -Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files). - -*Default Value:* 1 - -``phi_convict_threshold`` -------------------------- -*This option is commented out by default.* - - -phi value that must be reached for a host to be marked down. -most users should never need to adjust this. - -*Default Value:* 8 - -``endpoint_snitch`` -------------------- - -endpoint_snitch -- Set this to a class that implements -IEndpointSnitch. The snitch has two functions: - -- it teaches Cassandra enough about your network topology to route - requests efficiently -- it allows Cassandra to spread replicas around your cluster to avoid - correlated failures. It does this by grouping machines into - "datacenters" and "racks." Cassandra will do its best not to have - more than one replica on the same "rack" (which may not actually - be a physical location) - -CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on "rack1" in "datacenter1", your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new "datacenter") and -decommissioning the old ones. - -Out of the box, Cassandra provides: - -SimpleSnitch: - Treats Strategy order as proximity. This can improve cache - locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack - and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via - gossip. If cassandra-topology.properties exists, it is used as a - fallback, allowing migration from the PropertyFileSnitch. - -PropertyFileSnitch: - Proximity is determined by rack and data center, which are - explicitly configured in cassandra-topology.properties. - -Ec2Snitch: - Appropriate for EC2 deployments in a single Region. Loads Region - and Availability Zone information from the EC2 API. The Region is - treated as the datacenter, and the Availability Zone as the rack. - Only private IPs are used, so this will not work across multiple - Regions. - -Ec2MultiRegionSnitch: - Uses public IPs as broadcast_address to allow cross-region - connectivity. (Thus, you should set seed addresses to the public - IP as well.) You will need to open the storage_port or - ssl_storage_port on the public IP firewall. (For intra-Region - traffic, Cassandra will switch to the private IP after - establishing a connection.) - -RackInferringSnitch: - Proximity is determined by rack and data center, which are - assumed to correspond to the 3rd and 2nd octet of each node's IP - address, respectively. Unless this happens to match your - deployment conventions, this is best used as an example of - writing a custom Snitch class and is provided in that spirit. - -You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath. - -*Default Value:* SimpleSnitch - -``dynamic_snitch_update_interval_in_ms`` ----------------------------------------- - -controls how often to perform the more expensive part of host score -calculation - -*Default Value:* 100 - -``dynamic_snitch_reset_interval_in_ms`` ---------------------------------------- -controls how often to reset all host scores, allowing a bad host to -possibly recover - -*Default Value:* 600000 - -``dynamic_snitch_badness_threshold`` ------------------------------------- -if set greater than zero, this will allow -'pinning' of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest. - -*Default Value:* 0.1 - -``server_encryption_options`` ------------------------------ - -Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html - -*NOTE* No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks - -The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore - - -*Default Value (complex option)*:: - - # set to true for allowing secure incoming connections - enabled: false - # If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port - optional: false - # if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used - # during upgrade to 4.0; otherwise, set to false. - enable_legacy_ssl_storage_port: false - # on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true. - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -``client_encryption_options`` ------------------------------ -enable or disable client-to-server encryption. - -*Default Value (complex option)*:: - - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set trustore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -``internode_compression`` -------------------------- -internode_compression controls whether traffic between nodes is -compressed. -Can be: - -all - all traffic is compressed - -dc - traffic between different datacenters is compressed - -none - nothing is compressed. - -*Default Value:* dc - -``inter_dc_tcp_nodelay`` ------------------------- - -Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses. - -*Default Value:* false - -``tracetype_query_ttl`` ------------------------ - -TTL for different trace types used during logging of the repair process. - -*Default Value:* 86400 - -``tracetype_repair_ttl`` ------------------------- - -*Default Value:* 604800 - -``enable_user_defined_functions`` ---------------------------------- - -If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. - -*Default Value:* false - -``enable_scripted_user_defined_functions`` ------------------------------------------- - -Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false. - -*Default Value:* false - -``windows_timer_interval`` --------------------------- - -The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals 'clockres' tool can confirm your system's default -setting. - -*Default Value:* 1 - -``transparent_data_encryption_options`` ---------------------------------------- - - -Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation). - -It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - -Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints - -*Default Value (complex option)*:: - - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - -``tombstone_warn_threshold`` ----------------------------- - -#################### -SAFETY THRESHOLDS # -#################### - -When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean. - -*Default Value:* 1000 - -``tombstone_failure_threshold`` -------------------------------- - -*Default Value:* 100000 - -``batch_size_warn_threshold_in_kb`` ------------------------------------ - -Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability. - -*Default Value:* 5 - -``batch_size_fail_threshold_in_kb`` ------------------------------------ - -Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. - -*Default Value:* 50 - -``unlogged_batch_across_partitions_warn_threshold`` ---------------------------------------------------- - -Log WARN on any batches not of type LOGGED than span across more partitions than this limit - -*Default Value:* 10 - -``compaction_large_partition_warning_threshold_mb`` ---------------------------------------------------- - -Log a warning when compacting partitions larger than this value - -*Default Value:* 100 - -``gc_log_threshold_in_ms`` --------------------------- -*This option is commented out by default.* - -GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary - -*Default Value:* 200 - -``gc_warn_threshold_in_ms`` ---------------------------- -*This option is commented out by default.* - -GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature. - -*Default Value:* 1000 - -``max_value_size_in_mb`` ------------------------- -*This option is commented out by default.* - -Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048. - -*Default Value:* 256 - -``back_pressure_enabled`` -------------------------- - -Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas. - -*Default Value:* false - -``back_pressure_strategy`` --------------------------- -The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map. - -``otc_coalescing_strategy`` ---------------------------- -*This option is commented out by default.* - -Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won't notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It's not that bare metal -doesn't benefit from coalescing messages, it's that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details. - -Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name. - -*Default Value:* DISABLED - -``otc_coalescing_window_us`` ----------------------------- -*This option is commented out by default.* - -How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled. - -*Default Value:* 200 - -``otc_coalescing_enough_coalesced_messages`` --------------------------------------------- -*This option is commented out by default.* - -Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128. - -*Default Value:* 8 - -``otc_backlog_expiration_interval_ms`` --------------------------------------- -*This option is commented out by default.* - -How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions. - - -*Default Value:* 200 - -``ideal_consistency_level`` ---------------------------- -*This option is commented out by default.* - -Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability. - -*Default Value:* EACH_QUORUM - -``automatic_sstable_upgrade`` ------------------------------ -*This option is commented out by default.* - -Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version - -*Default Value:* false - -``max_concurrent_automatic_sstable_upgrades`` ---------------------------------------------- -*This option is commented out by default.* -Limit the number of concurrent sstable upgrades - -*Default Value:* 1 - -``audit_logging_options`` -------------------------- - -Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options. - -``full_query_logging_options`` ------------------------------- -*This option is commented out by default.* - - -default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog - -``corrupted_tombstone_strategy`` --------------------------------- -*This option is commented out by default.* - -validate tombstones on reads and compaction -can be either "disabled", "warn" or "exception" - -*Default Value:* disabled - -``diagnostic_events_enabled`` ------------------------------ - -Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX. - -*Default Value:* false - -``native_transport_flush_in_batches_legacy`` --------------------------------------------- -*This option is commented out by default.* - -Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating. - -*Default Value:* false - -``repaired_data_tracking_for_range_reads_enabled`` --------------------------------------------------- - -Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads - -*Default Value:* false - -``repaired_data_tracking_for_partition_reads_enabled`` ------------------------------------------------------- - -*Default Value:* false - -``report_unconfirmed_repaired_data_mismatches`` ------------------------------------------------ -If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones. - -*Default Value:* false - -``enable_materialized_views`` ------------------------------ - -######################## -EXPERIMENTAL FEATURES # -######################## - -Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_sasi_indexes`` ------------------------ - -Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use. - -*Default Value:* false - -``enable_transient_replication`` --------------------------------- - -Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use. - -*Default Value:* false diff --git a/src/doc/4.0-beta1/_sources/configuration/index.rst.txt b/src/doc/4.0-beta1/_sources/configuration/index.rst.txt deleted file mode 100644 index f774fdad6..000000000 --- a/src/doc/4.0-beta1/_sources/configuration/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra -===================== - -This section describes how to configure Apache Cassandra. - -.. toctree:: - :maxdepth: 1 - - cassandra_config_file diff --git a/src/doc/4.0-beta1/_sources/contactus.rst.txt b/src/doc/4.0-beta1/_sources/contactus.rst.txt deleted file mode 100644 index 3ed9004dd..000000000 --- a/src/doc/4.0-beta1/_sources/contactus.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contact us -========== - -You can get in touch with the Cassandra community either via the mailing lists or :ref:`Slack rooms `. - -.. _mailing-lists: - -Mailing lists -------------- - -The following mailing lists are available: - -- `Users `__ – General discussion list for users - `Subscribe - `__ -- `Developers `__ – Development related discussion - `Subscribe - `__ -- `Commits `__ – Commit notification source repository - - `Subscribe `__ -- `Client Libraries `__ – Discussion related to the - development of idiomatic client APIs - `Subscribe `__ - -Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe. - -.. _slack: - -Slack ------ -To chat with developers or users in real-time, join our rooms on `ASF Slack `__: - -- ``cassandra`` - for user questions and general discussions. -- ``cassandra-dev`` - strictly for questions or discussions related to Cassandra development. - diff --git a/src/doc/4.0-beta1/_sources/cql/appendices.rst.txt b/src/doc/4.0-beta1/_sources/cql/appendices.rst.txt deleted file mode 100644 index 480b78ea2..000000000 --- a/src/doc/4.0-beta1/_sources/cql/appendices.rst.txt +++ /dev/null @@ -1,330 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Appendices ----------- - -.. _appendix-A: - -Appendix A: CQL Keywords -~~~~~~~~~~~~~~~~~~~~~~~~ - -CQL distinguishes between *reserved* and *non-reserved* keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only *raison d’être* of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not. - -+--------------------+-------------+ -| Keyword | Reserved? | -+====================+=============+ -| ``ADD`` | yes | -+--------------------+-------------+ -| ``AGGREGATE`` | no | -+--------------------+-------------+ -| ``ALL`` | no | -+--------------------+-------------+ -| ``ALLOW`` | yes | -+--------------------+-------------+ -| ``ALTER`` | yes | -+--------------------+-------------+ -| ``AND`` | yes | -+--------------------+-------------+ -| ``APPLY`` | yes | -+--------------------+-------------+ -| ``AS`` | no | -+--------------------+-------------+ -| ``ASC`` | yes | -+--------------------+-------------+ -| ``ASCII`` | no | -+--------------------+-------------+ -| ``AUTHORIZE`` | yes | -+--------------------+-------------+ -| ``BATCH`` | yes | -+--------------------+-------------+ -| ``BEGIN`` | yes | -+--------------------+-------------+ -| ``BIGINT`` | no | -+--------------------+-------------+ -| ``BLOB`` | no | -+--------------------+-------------+ -| ``BOOLEAN`` | no | -+--------------------+-------------+ -| ``BY`` | yes | -+--------------------+-------------+ -| ``CALLED`` | no | -+--------------------+-------------+ -| ``CLUSTERING`` | no | -+--------------------+-------------+ -| ``COLUMNFAMILY`` | yes | -+--------------------+-------------+ -| ``COMPACT`` | no | -+--------------------+-------------+ -| ``CONTAINS`` | no | -+--------------------+-------------+ -| ``COUNT`` | no | -+--------------------+-------------+ -| ``COUNTER`` | no | -+--------------------+-------------+ -| ``CREATE`` | yes | -+--------------------+-------------+ -| ``CUSTOM`` | no | -+--------------------+-------------+ -| ``DATE`` | no | -+--------------------+-------------+ -| ``DECIMAL`` | no | -+--------------------+-------------+ -| ``DELETE`` | yes | -+--------------------+-------------+ -| ``DESC`` | yes | -+--------------------+-------------+ -| ``DESCRIBE`` | yes | -+--------------------+-------------+ -| ``DISTINCT`` | no | -+--------------------+-------------+ -| ``DOUBLE`` | no | -+--------------------+-------------+ -| ``DROP`` | yes | -+--------------------+-------------+ -| ``ENTRIES`` | yes | -+--------------------+-------------+ -| ``EXECUTE`` | yes | -+--------------------+-------------+ -| ``EXISTS`` | no | -+--------------------+-------------+ -| ``FILTERING`` | no | -+--------------------+-------------+ -| ``FINALFUNC`` | no | -+--------------------+-------------+ -| ``FLOAT`` | no | -+--------------------+-------------+ -| ``FROM`` | yes | -+--------------------+-------------+ -| ``FROZEN`` | no | -+--------------------+-------------+ -| ``FULL`` | yes | -+--------------------+-------------+ -| ``FUNCTION`` | no | -+--------------------+-------------+ -| ``FUNCTIONS`` | no | -+--------------------+-------------+ -| ``GRANT`` | yes | -+--------------------+-------------+ -| ``IF`` | yes | -+--------------------+-------------+ -| ``IN`` | yes | -+--------------------+-------------+ -| ``INDEX`` | yes | -+--------------------+-------------+ -| ``INET`` | no | -+--------------------+-------------+ -| ``INFINITY`` | yes | -+--------------------+-------------+ -| ``INITCOND`` | no | -+--------------------+-------------+ -| ``INPUT`` | no | -+--------------------+-------------+ -| ``INSERT`` | yes | -+--------------------+-------------+ -| ``INT`` | no | -+--------------------+-------------+ -| ``INTO`` | yes | -+--------------------+-------------+ -| ``JSON`` | no | -+--------------------+-------------+ -| ``KEY`` | no | -+--------------------+-------------+ -| ``KEYS`` | no | -+--------------------+-------------+ -| ``KEYSPACE`` | yes | -+--------------------+-------------+ -| ``KEYSPACES`` | no | -+--------------------+-------------+ -| ``LANGUAGE`` | no | -+--------------------+-------------+ -| ``LIMIT`` | yes | -+--------------------+-------------+ -| ``LIST`` | no | -+--------------------+-------------+ -| ``LOGIN`` | no | -+--------------------+-------------+ -| ``MAP`` | no | -+--------------------+-------------+ -| ``MODIFY`` | yes | -+--------------------+-------------+ -| ``NAN`` | yes | -+--------------------+-------------+ -| ``NOLOGIN`` | no | -+--------------------+-------------+ -| ``NORECURSIVE`` | yes | -+--------------------+-------------+ -| ``NOSUPERUSER`` | no | -+--------------------+-------------+ -| ``NOT`` | yes | -+--------------------+-------------+ -| ``NULL`` | yes | -+--------------------+-------------+ -| ``OF`` | yes | -+--------------------+-------------+ -| ``ON`` | yes | -+--------------------+-------------+ -| ``OPTIONS`` | no | -+--------------------+-------------+ -| ``OR`` | yes | -+--------------------+-------------+ -| ``ORDER`` | yes | -+--------------------+-------------+ -| ``PASSWORD`` | no | -+--------------------+-------------+ -| ``PERMISSION`` | no | -+--------------------+-------------+ -| ``PERMISSIONS`` | no | -+--------------------+-------------+ -| ``PRIMARY`` | yes | -+--------------------+-------------+ -| ``RENAME`` | yes | -+--------------------+-------------+ -| ``REPLACE`` | yes | -+--------------------+-------------+ -| ``RETURNS`` | no | -+--------------------+-------------+ -| ``REVOKE`` | yes | -+--------------------+-------------+ -| ``ROLE`` | no | -+--------------------+-------------+ -| ``ROLES`` | no | -+--------------------+-------------+ -| ``SCHEMA`` | yes | -+--------------------+-------------+ -| ``SELECT`` | yes | -+--------------------+-------------+ -| ``SET`` | yes | -+--------------------+-------------+ -| ``SFUNC`` | no | -+--------------------+-------------+ -| ``SMALLINT`` | no | -+--------------------+-------------+ -| ``STATIC`` | no | -+--------------------+-------------+ -| ``STORAGE`` | no | -+--------------------+-------------+ -| ``STYPE`` | no | -+--------------------+-------------+ -| ``SUPERUSER`` | no | -+--------------------+-------------+ -| ``TABLE`` | yes | -+--------------------+-------------+ -| ``TEXT`` | no | -+--------------------+-------------+ -| ``TIME`` | no | -+--------------------+-------------+ -| ``TIMESTAMP`` | no | -+--------------------+-------------+ -| ``TIMEUUID`` | no | -+--------------------+-------------+ -| ``TINYINT`` | no | -+--------------------+-------------+ -| ``TO`` | yes | -+--------------------+-------------+ -| ``TOKEN`` | yes | -+--------------------+-------------+ -| ``TRIGGER`` | no | -+--------------------+-------------+ -| ``TRUNCATE`` | yes | -+--------------------+-------------+ -| ``TTL`` | no | -+--------------------+-------------+ -| ``TUPLE`` | no | -+--------------------+-------------+ -| ``TYPE`` | no | -+--------------------+-------------+ -| ``UNLOGGED`` | yes | -+--------------------+-------------+ -| ``UPDATE`` | yes | -+--------------------+-------------+ -| ``USE`` | yes | -+--------------------+-------------+ -| ``USER`` | no | -+--------------------+-------------+ -| ``USERS`` | no | -+--------------------+-------------+ -| ``USING`` | yes | -+--------------------+-------------+ -| ``UUID`` | no | -+--------------------+-------------+ -| ``VALUES`` | no | -+--------------------+-------------+ -| ``VARCHAR`` | no | -+--------------------+-------------+ -| ``VARINT`` | no | -+--------------------+-------------+ -| ``WHERE`` | yes | -+--------------------+-------------+ -| ``WITH`` | yes | -+--------------------+-------------+ -| ``WRITETIME`` | no | -+--------------------+-------------+ - -Appendix B: CQL Reserved Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name. - -+-----------------+ -| type | -+=================+ -| ``bitstring`` | -+-----------------+ -| ``byte`` | -+-----------------+ -| ``complex`` | -+-----------------+ -| ``enum`` | -+-----------------+ -| ``interval`` | -+-----------------+ -| ``macaddr`` | -+-----------------+ - - -Appendix C: Dropping Compact Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported. - -'ALTER ... DROP COMPACT STORAGE' statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables: - -- CQL-created Compact Tables that have no clustering columns, will expose an - additional clustering column ``column1`` with ``UTF8Type``. -- CQL-created Compact Tables that had no regular columns, will expose a - regular column ``value`` with ``BytesType``. -- For CQL-Created Compact Tables, all columns originally defined as - ``regular`` will be come ``static`` -- CQL-created Compact Tables that have clustering but have no regular - columns will have an empty value column (of ``EmptyType``) -- SuperColumn Tables (can only be created through Thrift) will expose - a compact value map with an empty name. -- Thrift-created Compact Tables will have types corresponding to their - Thrift definition. diff --git a/src/doc/4.0-beta1/_sources/cql/changes.rst.txt b/src/doc/4.0-beta1/_sources/cql/changes.rst.txt deleted file mode 100644 index 6691f156a..000000000 --- a/src/doc/4.0-beta1/_sources/cql/changes.rst.txt +++ /dev/null @@ -1,211 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Changes -------- - -The following describes the changes in each version of CQL. - -3.4.5 -^^^^^ - -- Adds support for arithmetic operators (:jira:`11935`) -- Adds support for ``+`` and ``-`` operations on dates (:jira:`11936`) -- Adds ``currentTimestamp``, ``currentDate``, ``currentTime`` and ``currentTimeUUID`` functions (:jira:`13132`) - - -3.4.4 -^^^^^ - -- ``ALTER TABLE`` ``ALTER`` has been removed; a column's type may not be changed after creation (:jira:`12443`). -- ``ALTER TYPE`` ``ALTER`` has been removed; a field's type may not be changed after creation (:jira:`12443`). - -3.4.3 -^^^^^ - -- Adds a new ``duration `` :ref:`data types ` (:jira:`11873`). -- Support for ``GROUP BY`` (:jira:`10707`). -- Adds a ``DEFAULT UNSET`` option for ``INSERT JSON`` to ignore omitted columns (:jira:`11424`). -- Allows ``null`` as a legal value for TTL on insert and update. It will be treated as equivalent to inserting a 0 (:jira:`12216`). - -3.4.2 -^^^^^ - -- If a table has a non zero ``default_time_to_live``, then explicitly specifying a TTL of 0 in an ``INSERT`` or - ``UPDATE`` statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels - the ``default_time_to_live``). This wasn't the case before and the ``default_time_to_live`` was applied even though a - TTL had been explicitly set. -- ``ALTER TABLE`` ``ADD`` and ``DROP`` now allow multiple columns to be added/removed. -- New ``PER PARTITION LIMIT`` option for ``SELECT`` statements (see `CASSANDRA-7017 - `__. -- :ref:`User-defined functions ` can now instantiate ``UDTValue`` and ``TupleValue`` instances via the - new ``UDFContext`` interface (see `CASSANDRA-10818 `__. -- :ref:`User-defined types ` may now be stored in a non-frozen form, allowing individual fields to be updated and - deleted in ``UPDATE`` statements and ``DELETE`` statements, respectively. (`CASSANDRA-7423 - `__). - -3.4.1 -^^^^^ - -- Adds ``CAST`` functions. - -3.4.0 -^^^^^ - -- Support for :ref:`materialized views `. -- ``DELETE`` support for inequality expressions and ``IN`` restrictions on any primary key columns. -- ``UPDATE`` support for ``IN`` restrictions on any primary key columns. - -3.3.1 -^^^^^ - -- The syntax ``TRUNCATE TABLE X`` is now accepted as an alias for ``TRUNCATE X``. - -3.3.0 -^^^^^ - -- :ref:`User-defined functions and aggregates ` are now supported. -- Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings. -- Introduces Roles to supersede user based authentication and access control -- New ``date``, ``time``, ``tinyint`` and ``smallint`` :ref:`data types ` have been added. -- :ref:`JSON support ` has been added -- Adds new time conversion functions and deprecate ``dateOf`` and ``unixTimestampOf``. - -3.2.0 -^^^^^ - -- :ref:`User-defined types ` supported. -- ``CREATE INDEX`` now supports indexing collection columns, including indexing the keys of map collections through the - ``keys()`` function -- Indexes on collections may be queried using the new ``CONTAINS`` and ``CONTAINS KEY`` operators -- :ref:`Tuple types ` were added to hold fixed-length sets of typed positional fields. -- ``DROP INDEX`` now supports optionally specifying a keyspace. - -3.1.7 -^^^^^ - -- ``SELECT`` statements now support selecting multiple rows in a single partition using an ``IN`` clause on combinations - of clustering columns. -- ``IF NOT EXISTS`` and ``IF EXISTS`` syntax is now supported by ``CREATE USER`` and ``DROP USER`` statements, - respectively. - -3.1.6 -^^^^^ - -- A new ``uuid()`` method has been added. -- Support for ``DELETE ... IF EXISTS`` syntax. - -3.1.5 -^^^^^ - -- It is now possible to group clustering columns in a relation, see :ref:`WHERE ` clauses. -- Added support for :ref:`static columns `. - -3.1.4 -^^^^^ - -- ``CREATE INDEX`` now allows specifying options when creating CUSTOM indexes. - -3.1.3 -^^^^^ - -- Millisecond precision formats have been added to the :ref:`timestamp ` parser. - -3.1.2 -^^^^^ - -- ``NaN`` and ``Infinity`` has been added as valid float constants. They are now reserved keywords. In the unlikely case - you we using them as a column identifier (or keyspace/table one), you will now need to double quote them. - -3.1.1 -^^^^^ - -- ``SELECT`` statement now allows listing the partition keys (using the ``DISTINCT`` modifier). See `CASSANDRA-4536 - `__. -- The syntax ``c IN ?`` is now supported in ``WHERE`` clauses. In that case, the value expected for the bind variable - will be a list of whatever type ``c`` is. -- It is now possible to use named bind variables (using ``:name`` instead of ``?``). - -3.1.0 -^^^^^ - -- ``ALTER TABLE`` ``DROP`` option added. -- ``SELECT`` statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. -- ``CREATE`` statements for ``KEYSPACE``, ``TABLE`` and ``INDEX`` now supports an ``IF NOT EXISTS`` condition. - Similarly, ``DROP`` statements support a ``IF EXISTS`` condition. -- ``INSERT`` statements optionally supports a ``IF NOT EXISTS`` condition and ``UPDATE`` supports ``IF`` conditions. - -3.0.5 -^^^^^ - -- ``SELECT``, ``UPDATE``, and ``DELETE`` statements now allow empty ``IN`` relations (see `CASSANDRA-5626 - `__. - -3.0.4 -^^^^^ - -- Updated the syntax for custom :ref:`secondary indexes `. -- Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not - correct (the order was **not** the one of the type of the partition key). Instead, the ``token`` method should always - be used for range queries on the partition key (see :ref:`WHERE clauses `). - -3.0.3 -^^^^^ - -- Support for custom :ref:`secondary indexes ` has been added. - -3.0.2 -^^^^^ - -- Type validation for the :ref:`constants ` has been fixed. For instance, the implementation used to allow - ``'2'`` as a valid value for an ``int`` column (interpreting it has the equivalent of ``2``), or ``42`` as a valid - ``blob`` value (in which case ``42`` was interpreted as an hexadecimal representation of the blob). This is no longer - the case, type validation of constants is now more strict. See the :ref:`data types ` section for details - on which constant is allowed for which type. -- The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of - blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother - transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings - as blobs, you should thus update your client code ASAP to switch blob constants. -- A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is - now also allowed in select clauses. See the :ref:`section on functions ` for details. - -3.0.1 -^^^^^ - -- Date strings (and timestamps) are no longer accepted as valid ``timeuuid`` values. Doing so was a bug in the sense - that date string are not valid ``timeuuid``, and it was thus resulting in `confusing behaviors - `__. However, the following new methods have been added to help - working with ``timeuuid``: ``now``, ``minTimeuuid``, ``maxTimeuuid`` , - ``dateOf`` and ``unixTimestampOf``. -- Float constants now support the exponent notation. In other words, ``4.2E10`` is now a valid floating point value. - -Versioning -^^^^^^^^^^ - -Versioning of the CQL language adheres to the `Semantic Versioning `__ guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version. - -========= ============================================================================================================= - version description -========= ============================================================================================================= - Major The major version *must* be bumped when backward incompatible changes are introduced. This should rarely - occur. - Minor Minor version increments occur when new, but backward compatible, functionality is introduced. - Patch The patch version is incremented when bugs are fixed. -========= ============================================================================================================= diff --git a/src/doc/4.0-beta1/_sources/cql/ddl.rst.txt b/src/doc/4.0-beta1/_sources/cql/ddl.rst.txt deleted file mode 100644 index 88df05b4c..000000000 --- a/src/doc/4.0-beta1/_sources/cql/ddl.rst.txt +++ /dev/null @@ -1,852 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-definition: - -Data Definition ---------------- - -CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in -*keyspaces*. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the :ref:`replication strategy ` used by the keyspace. It is generally encouraged to use -one keyspace by *application*, and thus many cluster may define only one keyspace. - -This section describes the statements used to create, modify, and remove those keyspace and tables. - -Common definitions -^^^^^^^^^^^^^^^^^^ - -The names of the keyspaces and tables are defined by the following grammar: - -.. productionlist:: - keyspace_name: `name` - table_name: [ `keyspace_name` '.' ] `name` - name: `unquoted_name` | `quoted_name` - unquoted_name: re('[a-zA-Z_0-9]{1, 48}') - quoted_name: '"' `unquoted_name` '"' - -Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (``myTable`` is -equivalent to ``mytable``) but case sensitivity can be forced by using double-quotes (``"myTable"`` is different from -``mytable``). - -Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the *current* keyspace (see :ref:`USE statement -`). - -Further, the valid names for columns is simply defined as: - -.. productionlist:: - column_name: `identifier` - -We also define the notion of statement options for use in the following section: - -.. productionlist:: - options: `option` ( AND `option` )* - option: `identifier` '=' ( `identifier` | `constant` | `map_literal` ) - -.. _create-keyspace-statement: - -CREATE KEYSPACE -^^^^^^^^^^^^^^^ - -A keyspace is created using a ``CREATE KEYSPACE`` statement: - -.. productionlist:: - create_keyspace_statement: CREATE KEYSPACE [ IF NOT EXISTS ] `keyspace_name` WITH `options` - -For instance:: - - CREATE KEYSPACE excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3} - AND durable_writes = false; - -Attempting to create a keyspace that already exists will return an error unless the ``IF NOT EXISTS`` option is used. If -it is used, the statement will be a no-op if the keyspace already exists. - -The supported ``options`` are: - -=================== ========== =========== ========= =================================================================== -name kind mandatory default description -=================== ========== =========== ========= =================================================================== -``replication`` *map* yes The replication strategy and options to use for the keyspace (see - details below). -``durable_writes`` *simple* no true Whether to use the commit log for updates on this keyspace - (disable this option at your own risk!). -=================== ========== =========== ========= =================================================================== - -The ``replication`` property is mandatory and must at least contains the ``'class'`` sub-option which defines the -:ref:`replication strategy ` class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following ``'class'``: - -.. _replication-strategy: - -``SimpleStrategy`` -"""""""""""""""""" - -A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -``NetworkTopologyStrategy``. ``SimpleStrategy`` supports a single mandatory argument: - -========================= ====== ======= ============================================= -sub-option type since description -========================= ====== ======= ============================================= -``'replication_factor'`` int all The number of replicas to store per range -========================= ====== ======= ============================================= - -``NetworkTopologyStrategy`` -""""""""""""""""""""""""""" - -A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options: - -===================================== ====== ====== ============================================= -sub-option type since description -===================================== ====== ====== ============================================= -``''`` int all The number of replicas to store per range in - the provided datacenter. -``'replication_factor'`` int 4.0 The number of replicas to use as a default - per datacenter if not specifically provided. - Note that this always defers to existing - definitions or explicit datacenter settings. - For example, to have three replicas per - datacenter, supply this with a value of 3. -===================================== ====== ====== ============================================= - -Note that when ``ALTER`` ing keyspaces and supplying ``replication_factor``, -auto-expansion will only *add* new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying ``replication_factor``, -explicitly zero out the datacenter you want to have zero replicas. - -An example of auto-expanding datacenters with two datacenters: ``DC1`` and ``DC2``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true; - - -An example of auto-expanding and overriding a datacenter:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2} - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true; - -An example that excludes a datacenter while using ``replication_factor``:: - - CREATE KEYSPACE excalibur - WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ; - - DESCRIBE KEYSPACE excalibur - CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true; - -If transient replication has been enabled, transient replicas can be configured for both -``SimpleStrategy`` and ``NetworkTopologyStrategy`` by defining replication factors in the format ``'/'`` - -For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:: - - CREATE KEYSPACE some_keysopace - WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'}; - -.. _use-statement: - -USE -^^^ - -The ``USE`` statement allows to change the *current* keyspace (for the *connection* on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, ...) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A ``USE`` statement simply takes the keyspace to use as current as argument: - -.. productionlist:: - use_statement: USE `keyspace_name` - -.. _alter-keyspace-statement: - -ALTER KEYSPACE -^^^^^^^^^^^^^^ - -An ``ALTER KEYSPACE`` statement allows to modify the options of a keyspace: - -.. productionlist:: - alter_keyspace_statement: ALTER KEYSPACE `keyspace_name` WITH `options` - -For instance:: - - ALTER KEYSPACE Excelsior - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - -The supported options are the same than for :ref:`creating a keyspace `. - -.. _drop-keyspace-statement: - -DROP KEYSPACE -^^^^^^^^^^^^^ - -Dropping a keyspace can be done using the ``DROP KEYSPACE`` statement: - -.. productionlist:: - drop_keyspace_statement: DROP KEYSPACE [ IF EXISTS ] `keyspace_name` - -For instance:: - - DROP KEYSPACE Excelsior; - -Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables. - -If the keyspace does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _create-table-statement: - -CREATE TABLE -^^^^^^^^^^^^ - -Creating a new table uses the ``CREATE TABLE`` statement: - -.. productionlist:: - create_table_statement: CREATE TABLE [ IF NOT EXISTS ] `table_name` - : '(' - : `column_definition` - : ( ',' `column_definition` )* - : [ ',' PRIMARY KEY '(' `primary_key` ')' ] - : ')' [ WITH `table_options` ] - column_definition: `column_name` `cql_type` [ STATIC ] [ PRIMARY KEY] - primary_key: `partition_key` [ ',' `clustering_columns` ] - partition_key: `column_name` - : | '(' `column_name` ( ',' `column_name` )* ')' - clustering_columns: `column_name` ( ',' `column_name` )* - table_options: COMPACT STORAGE [ AND `table_options` ] - : | CLUSTERING ORDER BY '(' `clustering_order` ')' [ AND `table_options` ] - : | `options` - clustering_order: `column_name` (ASC | DESC) ( ',' `column_name` (ASC | DESC) )* - -For instance:: - - CREATE TABLE monkeySpecies ( - species text PRIMARY KEY, - common_name text, - population varint, - average_size int - ) WITH comment='Important biological records'; - - CREATE TABLE timeline ( - userid uuid, - posted_month int, - posted_time uuid, - body text, - posted_by text, - PRIMARY KEY (userid, posted_month, posted_time) - ) WITH compaction = { 'class' : 'LeveledCompactionStrategy' }; - - CREATE TABLE loads ( - machine inet, - cpu int, - mtime timeuuid, - load float, - PRIMARY KEY ((machine, cpu), mtime) - ) WITH CLUSTERING ORDER BY (mtime DESC); - -A CQL table has a name and is composed of a set of *rows*. Creating a table amounts to defining which :ref:`columns -` the rows will be composed, which of those columns compose the :ref:`primary key `, as -well as optional :ref:`options ` for the table. - -Attempting to create an already existing table will return an error unless the ``IF NOT EXISTS`` directive is used. If -it is used, the statement will be a no-op if the table already exists. - - -.. _column-definition: - -Column definitions -~~~~~~~~~~~~~~~~~~ - -Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an :ref:`alter statement`). - -A :token:`column_definition` is primarily comprised of the name of the column defined and it's :ref:`type `, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers: - -``STATIC`` - it declares the column as being a :ref:`static column `. - -``PRIMARY KEY`` - it declares the column as being the sole component of the :ref:`primary key ` of the table. - -.. _static-columns: - -Static columns -`````````````` -Some columns can be declared as ``STATIC`` in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same :ref:`partition key `). For instance:: - - CREATE TABLE t ( - pk int, - t int, - v text, - s text static, - PRIMARY KEY (pk, t) - ); - - INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); - INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); - - SELECT * FROM t; - pk | t | v | s - ----+---+--------+----------- - 0 | 0 | 'val0' | 'static1' - 0 | 1 | 'val1' | 'static1' - -As can be seen, the ``s`` value is the same (``static1``) for both of the row in the partition (the partition key in -that example being ``pk``, both rows are in that same partition): the 2nd insertion has overridden the value for ``s``. - -The use of static columns as the following restrictions: - -- tables with the ``COMPACT STORAGE`` option (see below) cannot use them. -- a table without clustering columns cannot have static columns (in a table without clustering columns, every partition - has only one row, and so every column is inherently static). -- only non ``PRIMARY KEY`` columns can be static. - -.. _primary-key: - -The Primary key -~~~~~~~~~~~~~~~ - -Within a table, a row is uniquely identified by its ``PRIMARY KEY``, and hence all table **must** define a PRIMARY KEY -(and only one). A ``PRIMARY KEY`` definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords ``PRIMARY KEY`` followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the ``PRIMARY KEY`` keywords. The order of the columns in the primary key definition matter. - -A CQL primary key is composed of 2 parts: - -- the :ref:`partition key ` part. It is the first component of the primary key definition. It can be a - single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, - the smallest possible table definition is:: - - CREATE TABLE t (k text PRIMARY KEY); - -- the :ref:`clustering columns `. Those are the columns after the first component of the primary key - definition, and the order of those columns define the *clustering order*. - -Some example of primary key definition are: - -- ``PRIMARY KEY (a)``: ``a`` is the partition key and there is no clustering columns. -- ``PRIMARY KEY (a, b, c)`` : ``a`` is the partition key and ``b`` and ``c`` are the clustering columns. -- ``PRIMARY KEY ((a, b), c)`` : ``a`` and ``b`` compose the partition key (this is often called a *composite* partition - key) and ``c`` is the clustering column. - - -.. _partition-key: - -The partition key -````````````````` - -Within a table, CQL defines the notion of a *partition*. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:: - - CREATE TABLE t ( - a int, - b int, - c int, - d int, - PRIMARY KEY ((a, b), c, d) - ); - - SELECT * FROM t; - a | b | c | d - ---+---+---+--- - 0 | 0 | 0 | 0 // row 1 - 0 | 0 | 1 | 1 // row 2 - 0 | 1 | 2 | 2 // row 3 - 0 | 1 | 3 | 3 // row 4 - 1 | 1 | 4 | 4 // row 5 - -``row 1`` and ``row 2`` are in the same partition, ``row 3`` and ``row 4`` are also in the same partition (but a -different one) and ``row 5`` is in yet another partition. - -Note that a table always has a partition key, and that if the table has no :ref:`clustering columns -`, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns). - -The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes). - -Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot. - -Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done *atomically* and in *isolation*, which is not the case across partitions. - -The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are. - - -.. _clustering-columns: - -The clustering columns -`````````````````````` - -The clustering columns of a table defines the clustering order for the partition of that table. For a given -:ref:`partition `, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:: - - CREATE TABLE t ( - a int, - b int, - c int, - PRIMARY KEY (a, b, c) - ); - - SELECT * FROM t; - a | b | c - ---+---+--- - 0 | 0 | 4 // row 1 - 0 | 1 | 9 // row 2 - 0 | 2 | 2 // row 3 - 0 | 3 | 3 // row 4 - -then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -``b`` column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, ``SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3``) to be very efficient. - - -.. _create-table-options: - -Table options -~~~~~~~~~~~~~ - -A CQL table has a number of options that can be set at creation (and, for most of them, :ref:`altered -` later). These options are specified after the ``WITH`` keyword. - -Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the ``COMPACT STORAGE`` option and the ``CLUSTERING ORDER`` option. Those, as well as the other -options of a table are described in the following sections. - -.. _compact-tables: - -Compact tables -`````````````` - -.. warning:: Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the - same schema obviously), and declaring a table compact **only** creates artificial limitations on the table definition - and usage. It only exists for historical reason and is preserved for backward compatibility And as ``COMPACT - STORAGE`` cannot, as of Cassandra |version|, be removed, it is strongly discouraged to create new table with the - ``COMPACT STORAGE`` option. - -A *compact* table is one defined with the ``COMPACT STORAGE`` option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn't be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation: - -- a compact table cannot use collections nor static columns. -- if a compact table has at least one clustering column, then it must have *exactly* one column outside of the primary - key ones. This imply you cannot add or remove columns after creation in particular. -- a compact table is limited in the indexes it can create, and no materialized view can be created on it. - -.. _clustering-order: - -Reversing the clustering order -`````````````````````````````` - -The clustering order of a table is defined by the :ref:`clustering columns ` of that table. By -default, that ordering is based on natural order of those clustering order, but the ``CLUSTERING ORDER`` allows to -change that clustering order to use the *reverse* natural order for some (potentially all) of the columns. - -The ``CLUSTERING ORDER`` option takes the comma-separated list of the clustering column, each with a ``ASC`` (for -*ascendant*, e.g. the natural order) or ``DESC`` (for *descendant*, e.g. the reverse natural order). Note in particular -that the default (if the ``CLUSTERING ORDER`` option is not used) is strictly equivalent to using the option with all -clustering columns using the ``ASC`` modifier. - -Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences: - -# it limits which ``ORDER BY`` clause are allowed for :ref:`selects ` on that table. You can only - order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column - ``a`` and ``b`` and you defined ``WITH CLUSTERING ORDER (a DESC, b ASC)``, then in queries you will be allowed to use - ``ORDER BY (a DESC, b ASC)`` and (reverse clustering order) ``ORDER BY (a ASC, b DESC)`` but **not** ``ORDER BY (a - ASC, b ASC)`` (nor ``ORDER BY (a DESC, b DESC)``). -# it also change the default order of results when queried (if no ``ORDER BY`` is provided). Results are always returned - in clustering order (within a partition). -# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in - forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of - your columns (which is common with time series for instance where you often want data from the newest to the oldest), - it is an optimization to declare a descending clustering order. - -.. _create-table-general-options: - -Other table options -``````````````````` - -.. todo:: review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance) - -A table supports the following options: - -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| option | kind | default | description | -+================================+==========+=============+===========================================================+ -| ``comment`` | *simple* | none | A free-form, human-readable comment. | -| ``speculative_retry`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``cdc`` | *boolean*| false | Create a Change Data Capture (CDC) log on the table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``additional_write_policy`` | *simple* | 99PERCENTILE| :ref:`Speculative retry options | -| | | | `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``gc_grace_seconds`` | *simple* | 864000 | Time to wait before garbage collecting tombstones | -| | | | (deletion markers). | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``bloom_filter_fp_chance`` | *simple* | 0.00075 | The target probability of false positive of the sstable | -| | | | bloom filters. Said bloom filters will be sized to provide| -| | | | the provided probability (thus lowering this value impact | -| | | | the size of bloom filters in-memory and on-disk) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``default_time_to_live`` | *simple* | 0 | The default expiration time (“TTL”) in seconds for a | -| | | | table. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compaction`` | *map* | *see below* | :ref:`Compaction options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``compression`` | *map* | *see below* | :ref:`Compression options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``caching`` | *map* | *see below* | :ref:`Caching options `. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``memtable_flush_period_in_ms``| *simple* | 0 | Time (in ms) before Cassandra flushes memtables to disk. | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ -| ``read_repair`` | *simple* | BLOCKING | Sets read repair behavior (see below) | -+--------------------------------+----------+-------------+-----------------------------------------------------------+ - -.. _speculative-retry-options: - -Speculative retry options -######################### - -By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ``ONE``, a quorum for ``QUORUM``, and so on. -``speculative_retry`` determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. Speculative retries are used to reduce the latency. The speculative_retry option may be -used to configure rapid read protection with which a coordinator sends more requests than needed to satisfy the Consistency level. - -Pre-4.0 speculative Retry Policy takes a single string as a parameter, this can be ``NONE``, ``ALWAYS``, ``99PERCENTILE`` (PERCENTILE), ``50MS`` (CUSTOM). - -Examples of setting speculative retry are: - -:: - - ALTER TABLE users WITH speculative_retry = '10ms'; - - -Or, - -:: - - ALTER TABLE users WITH speculative_retry = '99PERCENTILE'; - -The problem with these settings is when a single host goes into an unavailable state this drags up the percentiles. This means if we -are set to use ``p99`` alone, we might not speculate when we intended to to because the value at the specified percentile has gone so high. -As a fix 4.0 adds support for hybrid ``MIN()``, ``MAX()`` speculative retry policies (`CASSANDRA-14293 -`_). This means if the normal ``p99`` for the -table is <50ms, we will still speculate at this value and not drag the tail latencies up... but if the ``p99th`` goes above what we know we -should never exceed we use that instead. - -In 4.0 the values (case-insensitive) discussed in the following table are supported: - -============================ ======================== ============================================================================= - Format Example Description -============================ ======================== ============================================================================= - ``XPERCENTILE`` 90.5PERCENTILE Coordinators record average per-table response times for all replicas. - If a replica takes longer than ``X`` percent of this table's average - response time, the coordinator queries an additional replica. - ``X`` must be between 0 and 100. - ``XP`` 90.5P Synonym for ``XPERCENTILE`` - ``Yms`` 25ms If a replica takes more than ``Y`` milliseconds to respond, - the coordinator queries an additional replica. - ``MIN(XPERCENTILE,YMS)`` MIN(99PERCENTILE,35MS) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is lower at the time of calculation. - Parameters are ``XPERCENTILE``, ``XP``, or ``Yms``. - This is helpful to help protect against a single slow instance; in the - happy case the 99th percentile is normally lower than the specified - fixed value however, a slow host may skew the percentile very high - meaning the slower the cluster gets, the higher the value of the percentile, - and the higher the calculated time used to determine if we should - speculate or not. This allows us to set an upper limit that we want to - speculate at, but avoid skewing the tail latencies by speculating at the - lower value when the percentile is less than the specified fixed upper bound. - ``MAX(XPERCENTILE,YMS)`` MAX(90.5P,25ms) A hybrid policy that will use either the specified percentile or fixed - milliseconds depending on which value is higher at the time of calculation. - ``ALWAYS`` Coordinators always query all replicas. - ``NEVER`` Coordinators never query additional replicas. -============================ =================== ============================================================================= - -As of version 4.0 speculative retry allows more friendly params (`CASSANDRA-13876 -`_). The ``speculative_retry`` is more flexible with case. As an example a -value does not have to be ``NONE``, and the following are supported alternatives. - -:: - - alter table users WITH speculative_retry = 'none'; - alter table users WITH speculative_retry = 'None'; - -The text component is case insensitive and for ``nPERCENTILE`` version 4.0 allows ``nP``, for instance ``99p``. -In a hybrid value for speculative retry, one of the two values must be a fixed millisecond value and the other a percentile value. - -Some examples: - -:: - - min(99percentile,50ms) - max(99p,50MS) - MAX(99P,50ms) - MIN(99.9PERCENTILE,50ms) - max(90percentile,100MS) - MAX(100.0PERCENTILE,60ms) - -Two values of the same kind cannot be specified such as ``min(90percentile,99percentile)`` as it wouldn’t be a hybrid value. -This setting does not affect reads with consistency level ``ALL`` because they already query all replicas. - -Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default ``99PERCENTILE``. - - -``additional_write_policy`` specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas. - -.. _cql-compaction-options: - -Compaction options -################## - -The ``compaction`` options must at least define the ``'class'`` sub-option, that defines the compaction strategy class -to use. The supported class are ``'SizeTieredCompactionStrategy'`` (:ref:`STCS `), -``'LeveledCompactionStrategy'`` (:ref:`LCS `) and ``'TimeWindowCompactionStrategy'`` (:ref:`TWCS `) (the -``'DateTieredCompactionStrategy'`` is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be -preferred instead). The default is ``'SizeTieredCompactionStrategy'``. Custom strategy can be provided by specifying the full class name as a :ref:`string constant -`. - -All default strategies support a number of :ref:`common options `, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS `, :ref:`LCS -` and :ref:`TWCS `). - -.. _cql-compression-options: - -Compression options -################### - -The ``compression`` options define if and how the sstables of the table are compressed. Compression is configured on a per-table -basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. The following sub-options are -available: - -========================= =============== ============================================================================= - Option Default Description -========================= =============== ============================================================================= - ``class`` LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, - SnappyCompressor, DeflateCompressor and ZstdCompressor. Use ``'enabled' : false`` to disable - compression. Custom compressor can be provided by specifying the full class - name as a “string constant”:#constants. - - ``enabled`` true Enable/disable sstable compression. If the ``enabled`` option is set to ``false`` no other - options must be specified. - - ``chunk_length_in_kb`` 64 On disk SSTables are compressed by block (to allow random reads). This - defines the size (in KB) of said block. Bigger values may improve the - compression rate, but increases the minimum size of data to be read from disk - for a read. The default value is an optimal value for compressing tables. Chunk length must - be a power of 2 because so is assumed so when computing the chunk number from an uncompressed - file offset. Block size may be adjusted based on read/write access patterns such as: - - - How much data is typically requested at once - - Average size of rows in the table - - ``crc_check_chance`` 1.0 Determines how likely Cassandra is to verify the checksum on each compression chunk during - reads. - - ``compression_level`` 3 Compression level. It is only applicable for ``ZstdCompressor`` and accepts values between - ``-131072`` and ``22``. -========================= =============== ============================================================================= - - -For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4}; - - -.. _cql-caching-options: - -Caching options -############### - -Caching optimizes the use of cache memory of a table. The cached data is weighed by size and access frequency. The ``caching`` -options allows to configure both the *key cache* and the *row cache* for the table. The following -sub-options are available: - -======================== ========= ==================================================================================== - Option Default Description -======================== ========= ==================================================================================== - ``keys`` ALL Whether to cache keys (“key cache”) for this table. Valid values are: ``ALL`` and - ``NONE``. - ``rows_per_partition`` NONE The amount of rows to cache per partition (“row cache”). If an integer ``n`` is - specified, the first ``n`` queried rows of a partition will be cached. Other - possible options are ``ALL``, to cache all rows of a queried partition, or ``NONE`` - to disable row caching. -======================== ========= ==================================================================================== - - -For instance, to create a table with both a key cache and 10 rows per partition:: - - CREATE TABLE simple ( - id int, - key text, - value text, - PRIMARY KEY (key, value) - ) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10}; - - -Read Repair options -################### - -The ``read_repair`` options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back - in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of - replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. - Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement - is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it - is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a - batch, but then select a single row by specifying the clustering column in a SELECT statement. - -The available read repair settings are: - -Blocking -```````` -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity - -None -```` - -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - - -Other considerations: -##################### - -- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to - anticipate future usage when creating a table. - -.. _alter-table-statement: - -ALTER TABLE -^^^^^^^^^^^ - -Altering an existing table uses the ``ALTER TABLE`` statement: - -.. productionlist:: - alter_table_statement: ALTER TABLE `table_name` `alter_table_instruction` - alter_table_instruction: ADD `column_name` `cql_type` ( ',' `column_name` `cql_type` )* - : | DROP `column_name` ( `column_name` )* - : | WITH `options` - -For instance:: - - ALTER TABLE addamsFamily ADD gravesite varchar; - - ALTER TABLE addamsFamily - WITH comment = 'A most excellent and useful table'; - -The ``ALTER TABLE`` statement can: - -- Add new column(s) to the table (through the ``ADD`` instruction). Note that the primary key of a table cannot be - changed and thus newly added column will, by extension, never be part of the primary key. Also note that :ref:`compact - tables ` have restrictions regarding column addition. Note that this is constant (in the amount of - data the cluster contains) time operation. -- Remove column(s) from the table. This drops both the column and all its content, but note that while the column - becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings - below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the - cluster) time operation. -- Change some of the table options (through the ``WITH`` instruction). The :ref:`supported options - ` are the same that when creating a table (outside of ``COMPACT STORAGE`` and ``CLUSTERING - ORDER`` that cannot be changed after creation). Note that setting any ``compaction`` sub-options has the effect of - erasing all previous ``compaction`` options, so you need to re-specify all the sub-options if you want to keep them. - The same note applies to the set of ``compression`` sub-options. - -.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in - microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended but as - Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another - convention. Please be aware that if you do so, dropping a column will not work correctly. - -.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one - **unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation). - - -.. _drop-table-statement: - -DROP TABLE -^^^^^^^^^^ - -Dropping a table uses the ``DROP TABLE`` statement: - -.. productionlist:: - drop_table_statement: DROP TABLE [ IF EXISTS ] `table_name` - -Dropping a table results in the immediate, irreversible removal of the table, including all data it contains. - -If the table does not exist, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. - -.. _truncate-statement: - -TRUNCATE -^^^^^^^^ - -A table can be truncated using the ``TRUNCATE`` statement: - -.. productionlist:: - truncate_statement: TRUNCATE [ TABLE ] `table_name` - -Note that ``TRUNCATE TABLE foo`` is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the ``TABLE`` keyword can be omitted. - -Truncating a table permanently removes all existing data from the table, but without removing the table itself. diff --git a/src/doc/4.0-beta1/_sources/cql/definitions.rst.txt b/src/doc/4.0-beta1/_sources/cql/definitions.rst.txt deleted file mode 100644 index 3df6f2099..000000000 --- a/src/doc/4.0-beta1/_sources/cql/definitions.rst.txt +++ /dev/null @@ -1,234 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. highlight:: cql - -Definitions ------------ - -.. _conventions: - -Conventions -^^^^^^^^^^^ - -To aid in specifying the CQL syntax, we will use the following conventions in this document: - -- Language rules will be given in an informal `BNF variant - `_ notation. In particular, we'll use square brakets - (``[ item ]``) for optional items, ``*`` and ``+`` for repeated items (where ``+`` imply at least one). -- The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to - their definition) while terminal keywords will be provided "all caps". Note however that keywords are - :ref:`identifiers` and are thus case insensitive in practice. We will also define some early construction using - regexp, which we'll indicate with ``re()``. -- The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the - last column definition in a ``CREATE TABLE`` statement is optional but supported if present even though the grammar in - this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL. -- References to keywords or pieces of CQL code in running text will be shown in a ``fixed-width font``. - - -.. _identifiers: - -Identifiers and keywords -^^^^^^^^^^^^^^^^^^^^^^^^ - -The CQL language uses *identifiers* (or *names*) to identify tables, columns and other objects. An identifier is a token -matching the regular expression ``[a-zA-Z][a-zA-Z0-9_]*``. - -A number of such identifiers, like ``SELECT`` or ``WITH``, are *keywords*. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in :ref:`appendix-A`. - -Identifiers and (unquoted) keywords are case insensitive. Thus ``SELECT`` is the same than ``select`` or ``sElEcT``, and -``myId`` is the same than ``myid`` or ``MYID``. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers. - -There is a second kind of identifiers called *quoted identifiers* defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes(``"``). Quoted identifiers are never keywords. Thus ``"select"`` is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while ``select`` -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive (``"My Quoted Id"`` is *different* from ``"my quoted id"``). A fully lowercase quoted identifier that matches -``[a-zA-Z][a-zA-Z0-9_]*`` is however *equivalent* to the unquoted identifier obtained by removing the double-quote (so -``"myid"`` is equivalent to ``myid`` and to ``myId`` but different from ``"myId"``). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so ``"foo "" bar"`` is a valid identifier. - -.. note:: *quoted identifiers* allows to declare columns with arbitrary names, and those can sometime clash with - specific names used by the server. For instance, when using conditional update, the server will respond with a - result-set containing a special result named ``"[applied]"``. If you’ve declared a column with such a name, this - could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but - if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like - ``"[applied]"``) and any name that looks like a function call (like ``"f(x)"``). - -More formally, we have: - -.. productionlist:: - identifier: `unquoted_identifier` | `quoted_identifier` - unquoted_identifier: re('[a-zA-Z][a-zA-Z0-9_]*') - quoted_identifier: '"' (any character where " can appear if doubled)+ '"' - -.. _constants: - -Constants -^^^^^^^^^ - -CQL defines the following kind of *constants*: - -.. productionlist:: - constant: `string` | `integer` | `float` | `boolean` | `uuid` | `blob` | NULL - string: '\'' (any character where ' can appear if doubled)+ '\'' - : '$$' (any character other than '$$') '$$' - integer: re('-?[0-9]+') - float: re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY - boolean: TRUE | FALSE - uuid: `hex`{8}-`hex`{4}-`hex`{4}-`hex`{4}-`hex`{12} - hex: re("[0-9a-fA-F]") - blob: '0' ('x' | 'X') `hex`+ - -In other words: - -- A string constant is an arbitrary sequence of characters enclosed by single-quote(``'``). A single-quote - can be included by repeating it, e.g. ``'It''s raining today'``. Those are not to be confused with quoted - :ref:`identifiers` that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence - of characters by two dollar characters, in which case single-quote can be used without escaping (``$$It's raining - today$$``). That latter form is often used when defining :ref:`user-defined functions ` to avoid having to - escape single-quote characters in function body (as they are more likely to occur than ``$$``). -- Integer, float and boolean constant are defined as expected. Note however than float allows the special ``NaN`` and - ``Infinity`` constants. -- CQL supports UUID_ constants. -- Blobs content are provided in hexadecimal and prefixed by ``0x``. -- The special ``NULL`` constant denotes the absence of value. - -For how these constants are typed, see the :ref:`data-types` section. - -Terms -^^^^^ - -CQL has the notion of a *term*, which denotes the kind of values that CQL support. Terms are defined by: - -.. productionlist:: - term: `constant` | `literal` | `function_call` | `arithmetic_operation` | `type_hint` | `bind_marker` - literal: `collection_literal` | `udt_literal` | `tuple_literal` - function_call: `identifier` '(' [ `term` (',' `term`)* ] ')' - arithmetic_operation: '-' `term` | `term` ('+' | '-' | '*' | '/' | '%') `term` - type_hint: '(' `cql_type` `)` term - bind_marker: '?' | ':' `identifier` - -A term is thus one of: - -- A :ref:`constant `. -- A literal for either :ref:`a collection `, :ref:`a user-defined type ` or :ref:`a tuple ` - (see the linked sections for details). -- A function call: see :ref:`the section on functions ` for details on which :ref:`native function - ` exists and how to define your own :ref:`user-defined ones `. -- An arithmetic operation between terms. see :ref:`the section on arithmetic operations ` -- A *type hint*: see the :ref:`related section ` for details. -- A bind marker, which denotes a variable to be bound at execution time. See the section on :ref:`prepared-statements` - for details. A bind marker can be either anonymous (``?``) or named (``:some_name``). The latter form provides a more - convenient way to refer to the variable for binding it and should generally be preferred. - - -Comments -^^^^^^^^ - -A comment in CQL is a line beginning by either double dashes (``--``) or double slash (``//``). - -Multi-line comments are also supported through enclosure within ``/*`` and ``*/`` (but nesting is not supported). - -:: - - -- This is a comment - // This is a comment too - /* This is - a multi-line comment */ - -Statements -^^^^^^^^^^ - -CQL consists of statements that can be divided in the following categories: - -- :ref:`data-definition` statements, to define and change how the data is stored (keyspaces and tables). -- :ref:`data-manipulation` statements, for selecting, inserting and deleting data. -- :ref:`secondary-indexes` statements. -- :ref:`materialized-views` statements. -- :ref:`cql-roles` statements. -- :ref:`cql-permissions` statements. -- :ref:`User-Defined Functions ` statements. -- :ref:`udts` statements. -- :ref:`cql-triggers` statements. - -All the statements are listed below and are described in the rest of this documentation (see links above): - -.. productionlist:: - cql_statement: `statement` [ ';' ] - statement: `ddl_statement` - : | `dml_statement` - : | `secondary_index_statement` - : | `materialized_view_statement` - : | `role_or_permission_statement` - : | `udf_statement` - : | `udt_statement` - : | `trigger_statement` - ddl_statement: `use_statement` - : | `create_keyspace_statement` - : | `alter_keyspace_statement` - : | `drop_keyspace_statement` - : | `create_table_statement` - : | `alter_table_statement` - : | `drop_table_statement` - : | `truncate_statement` - dml_statement: `select_statement` - : | `insert_statement` - : | `update_statement` - : | `delete_statement` - : | `batch_statement` - secondary_index_statement: `create_index_statement` - : | `drop_index_statement` - materialized_view_statement: `create_materialized_view_statement` - : | `drop_materialized_view_statement` - role_or_permission_statement: `create_role_statement` - : | `alter_role_statement` - : | `drop_role_statement` - : | `grant_role_statement` - : | `revoke_role_statement` - : | `list_roles_statement` - : | `grant_permission_statement` - : | `revoke_permission_statement` - : | `list_permissions_statement` - : | `create_user_statement` - : | `alter_user_statement` - : | `drop_user_statement` - : | `list_users_statement` - udf_statement: `create_function_statement` - : | `drop_function_statement` - : | `create_aggregate_statement` - : | `drop_aggregate_statement` - udt_statement: `create_type_statement` - : | `alter_type_statement` - : | `drop_type_statement` - trigger_statement: `create_trigger_statement` - : | `drop_trigger_statement` - -.. _prepared-statements: - -Prepared Statements -^^^^^^^^^^^^^^^^^^^ - -CQL supports *prepared statements*. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values. - -Any statement that uses at least one bind marker (see :token:`bind_marker`) will need to be *prepared*. After which the statement -can be *executed* by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation. diff --git a/src/doc/4.0-beta1/_sources/cql/dml.rst.txt b/src/doc/4.0-beta1/_sources/cql/dml.rst.txt deleted file mode 100644 index 1308de57e..000000000 --- a/src/doc/4.0-beta1/_sources/cql/dml.rst.txt +++ /dev/null @@ -1,522 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _data-manipulation: - -Data Manipulation ------------------ - -This section describes the statements supported by CQL to insert, update, delete and query data. - -.. _select-statement: - -SELECT -^^^^^^ - -Querying data from data is done using a ``SELECT`` statement: - -.. productionlist:: - select_statement: SELECT [ JSON | DISTINCT ] ( `select_clause` | '*' ) - : FROM `table_name` - : [ WHERE `where_clause` ] - : [ GROUP BY `group_by_clause` ] - : [ ORDER BY `ordering_clause` ] - : [ PER PARTITION LIMIT (`integer` | `bind_marker`) ] - : [ LIMIT (`integer` | `bind_marker`) ] - : [ ALLOW FILTERING ] - select_clause: `selector` [ AS `identifier` ] ( ',' `selector` [ AS `identifier` ] ) - selector: `column_name` - : | `term` - : | CAST '(' `selector` AS `cql_type` ')' - : | `function_name` '(' [ `selector` ( ',' `selector` )* ] ')' - : | COUNT '(' '*' ')' - where_clause: `relation` ( AND `relation` )* - relation: `column_name` `operator` `term` - : '(' `column_name` ( ',' `column_name` )* ')' `operator` `tuple_literal` - : TOKEN '(' `column_name` ( ',' `column_name` )* ')' `operator` `term` - operator: '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY - group_by_clause: `column_name` ( ',' `column_name` )* - ordering_clause: `column_name` [ ASC | DESC ] ( ',' `column_name` [ ASC | DESC ] )* - -For instance:: - - SELECT name, occupation FROM users WHERE userid IN (199, 200, 207); - SELECT JSON name, occupation FROM users WHERE userid = 199; - SELECT name AS user_name, occupation AS user_occupation FROM users; - - SELECT time, value - FROM events - WHERE event_type = 'myEvent' - AND time > '2011-02-03' - AND time <= '2012-01-01' - - SELECT COUNT (*) AS user_count FROM users; - -The ``SELECT`` statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -:ref:`functions ` including :ref:`aggregation ` ones can be applied to the result. - -A ``SELECT`` statement contains at least a :ref:`selection clause ` and the name of the table on which -the selection is on (note that CQL does **not** joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a :ref:`where clause ` and it can optionally have additional -clauses to :ref:`order ` or :ref:`limit ` the results. Lastly, :ref:`queries that require -filtering ` can be allowed if the ``ALLOW FILTERING`` flag is provided. - -.. _selection-clause: - -Selection clause -~~~~~~~~~~~~~~~~ - -The :token:`select_clause` determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of *selectors* or, -alternatively, of the wildcard character (``*``) to select all the columns defined in the table. - -Selectors -````````` - -A :token:`selector` can be one of: - -- A column name of the table selected, to retrieve the values for that column. -- A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the - corresponding column of the result-set will simply have the value of this term for every row returned). -- A casting, which allows to convert a nested selector to a (compatible) type. -- A function call, where the arguments are selector themselves. See the section on :ref:`functions ` for - more details. -- The special call ``COUNT(*)`` to the :ref:`COUNT function `, which counts all non-null results. - -Aliases -``````` - -Every *top-level* selector can also be aliased (using `AS`). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:: - - // Without alias - SELECT intAsBlob(4) FROM t; - - // intAsBlob(4) - // -------------- - // 0x00000004 - - // With alias - SELECT intAsBlob(4) AS four FROM t; - - // four - // ------------ - // 0x00000004 - -.. note:: Currently, aliases aren't recognized anywhere else in the statement where they are used (not in the ``WHERE`` - clause, not in the ``ORDER BY`` clause, ...). You must use the orignal column name instead. - - -``WRITETIME`` and ``TTL`` function -``````````````````````````````````` - -Selection supports two special functions (that aren't allowed anywhere else): ``WRITETIME`` and ``TTL``. Both function -take only one argument and that argument *must* be a column name (so for instance ``TTL(3)`` is invalid). - -Those functions allow to retrieve meta-information that are stored internally for each column, namely: - -- the timestamp of the value of the column for ``WRITETIME``. -- the remaining time to live (in seconds) for the value of the column if it set to expire (and ``null`` otherwise). - -.. _where-clause: - -The ``WHERE`` clause -~~~~~~~~~~~~~~~~~~~~ - -The ``WHERE`` clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the ``PRIMARY KEY`` and/or have a `secondary index <#createIndexStmt>`__ defined on them. - -Not all relations are allowed in a query. For instance, non-equal relations (where ``IN`` is considered as an equal -relation) on a partition key are not supported (but see the use of the ``TOKEN`` method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a **contiguous** (for the ordering) set of rows. For -instance, given:: - - CREATE TABLE posts ( - userid text, - blog_title text, - posted_at timestamp, - entry_title text, - content text, - category int, - PRIMARY KEY (userid, blog_title, posted_at) - ) - -The following query is allowed:: - - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND blog_title='John''s Blog' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):: - - // Needs a blog_title to be set to select ranges of posted_at - SELECT entry_title, content FROM posts - WHERE userid = 'john doe' - AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31' - -When specifying relations, the ``TOKEN`` function can be used on the ``PARTITION KEY`` column to query. In that case, -rows will be selected based on the token of their ``PARTITION_KEY`` rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won't yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -``token(-1) > token(0)`` in particular). Example:: - - SELECT * FROM posts - WHERE token(userid) > token('tom') AND token(userid) < token('bob') - -Moreover, the ``IN`` relation is only allowed on the last column of the partition key and on the last column of the full -primary key. - -It is also possible to “group” ``CLUSTERING COLUMNS`` together in a relation using the tuple notation. For instance:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01') - -will request all rows that sorts after the one having “John's Blog” as ``blog_tile`` and '2012-01-01' for ``posted_at`` -in the clustering order. In particular, rows having a ``post_at <= '2012-01-01'`` will be returned as long as their -``blog_title > 'John''s Blog'``, which would not be the case for:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND blog_title > 'John''s Blog' - AND posted_at > '2012-01-01' - -The tuple notation may also be used for ``IN`` clauses on clustering columns:: - - SELECT * FROM posts - WHERE userid = 'john doe' - AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01')) - -The ``CONTAINS`` operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -``CONTAINS`` applies to the map values. The ``CONTAINS KEY`` operator may only be used on map columns and applies to the -map keys. - -.. _group-by-clause: - -Grouping results -~~~~~~~~~~~~~~~~ - -The ``GROUP BY`` option allows to condense into a single row all selected rows that share the same values for a set -of columns. - -Using the ``GROUP BY`` option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the ``GROUP BY`` option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -``GROUP BY`` clause. - -Aggregate functions will produce a separate value for each group. If no ``GROUP BY`` clause is specified, -aggregates functions will produce a single value for all the rows. - -If a column is selected without an aggregate function, in a statement with a ``GROUP BY``, the first value encounter -in each group will be returned. - -.. _ordering-clause: - -Ordering results -~~~~~~~~~~~~~~~~ - -The ``ORDER BY`` clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (``ASC`` for ascendant and ``DESC`` for descendant, omitting the order being -equivalent to ``ASC``). Currently the possible orderings are limited by the :ref:`clustering order ` -defined on the table: - -- if the table has been defined without any specific ``CLUSTERING ORDER``, then then allowed orderings are the order - induced by the clustering columns and the reverse of that one. -- otherwise, the orderings allowed are the order of the ``CLUSTERING ORDER`` option and the reversed one. - -.. _limit-clause: - -Limiting results -~~~~~~~~~~~~~~~~ - -The ``LIMIT`` option to a ``SELECT`` statement limits the number of rows returned by a query, while the ``PER PARTITION -LIMIT`` option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement. - -.. _allow-filtering: - -Allowing filtering -~~~~~~~~~~~~~~~~~~ - -By default, CQL only allows select queries that don't involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data **returned** by the query (which can be controlled through ``LIMIT``). - -The ``ALLOW FILTERING`` option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ``ALLOW FILTERING`` may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records **may** exhibit performance that depends on the total amount of data stored in the -cluster. - -For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:: - - CREATE TABLE users ( - username text PRIMARY KEY, - firstname text, - lastname text, - birth_year int, - country text - ) - - CREATE INDEX ON users(birth_year); - -Then the following queries are valid:: - - SELECT * FROM users; - SELECT * FROM users WHERE birth_year = 1981; - -because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a ``LIMIT``. - -However, the following query will be rejected:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR'; - -because Cassandra cannot guarantee that it won't have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ``ALLOW -FILTERING`` and so the following query is valid:: - - SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING; - -.. _insert-statement: - -INSERT -^^^^^^ - -Inserting data for a row is done using an ``INSERT`` statement: - -.. productionlist:: - insert_statement: INSERT INTO `table_name` ( `names_values` | `json_clause` ) - : [ IF NOT EXISTS ] - : [ USING `update_parameter` ( AND `update_parameter` )* ] - names_values: `names` VALUES `tuple_literal` - json_clause: JSON `string` [ DEFAULT ( NULL | UNSET ) ] - names: '(' `column_name` ( ',' `column_name` )* ')' - -For instance:: - - INSERT INTO NerdMovies (movie, director, main_actor, year) - VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005) - USING TTL 86400; - - INSERT INTO NerdMovies JSON '{"movie": "Serenity", - "director": "Joss Whedon", - "year": 2005}'; - -The ``INSERT`` statement writes one or more columns for a given row in a table. Note that since a row is identified by -its ``PRIMARY KEY``, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the ``VALUES`` syntax. When using the ``JSON`` syntax, they are optional. See the -section on :ref:`JSON support ` for more detail. - -Note that unlike in SQL, ``INSERT`` does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened. - -It is however possible to use the ``IF NOT EXISTS`` condition to only insert if the row does not exist prior to the -insertion. But please note that using ``IF NOT EXISTS`` will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly. - -All updates for an ``INSERT`` are applied atomically and in isolation. - -Please refer to the :ref:`UPDATE ` section for informations on the :token:`update_parameter`. - -Also note that ``INSERT`` does not support counters, while ``UPDATE`` does. - -.. _update-statement: - -UPDATE -^^^^^^ - -Updating a row is done using an ``UPDATE`` statement: - -.. productionlist:: - update_statement: UPDATE `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : SET `assignment` ( ',' `assignment` )* - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - update_parameter: ( TIMESTAMP | TTL ) ( `integer` | `bind_marker` ) - assignment: `simple_selection` '=' `term` - :| `column_name` '=' `column_name` ( '+' | '-' ) `term` - :| `column_name` '=' `list_literal` '+' `column_name` - simple_selection: `column_name` - :| `column_name` '[' `term` ']' - :| `column_name` '.' `field_name - condition: `simple_selection` `operator` `term` - -For instance:: - - UPDATE NerdMovies USING TTL 400 - SET director = 'Joss Whedon', - main_actor = 'Nathan Fillion', - year = 2005 - WHERE movie = 'Serenity'; - - UPDATE UserActions - SET total = total + 2 - WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 - AND action = 'click'; - -The ``UPDATE`` statement writes one or more columns for a given row in a table. The :token:`where_clause` is used to -select the row to update and must include all columns composing the ``PRIMARY KEY``. Non primary key columns are then -set using the ``SET`` keyword. - -Note that unlike in SQL, ``UPDATE`` does not check the prior existence of the row by default (except through ``IF``, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred. - -It is however possible to use the conditions on some columns through ``IF``, in which case the row will not be updated -unless the conditions are met. But, please note that using ``IF`` conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly. - -In an ``UPDATE`` statement, all updates within the same partition key are applied atomically and in isolation. - -Regarding the :token:`assignment`: - -- ``c = c + 3`` is used to increment/decrement counters. The column name after the '=' sign **must** be the same than - the one before the '=' sign. Note that increment/decrement is only allowed on counters, and are the *only* update - operations allowed on counters. See the section on :ref:`counters ` for details. -- ``id = id + `` and ``id[value1] = value2`` are for collections, see the :ref:`relevant section - ` for details. -- ``id.field = 3`` is for setting the value of a field on a non-frozen user-defined types. see the :ref:`relevant section - ` for details. - -.. _update-parameters: - -Update parameters -~~~~~~~~~~~~~~~~~ - -The ``UPDATE``, ``INSERT`` (and ``DELETE`` and ``BATCH`` for the ``TIMESTAMP``) statements support the following -parameters: - -- ``TIMESTAMP``: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in - microseconds) at the start of statement execution as the timestamp. This is usually a suitable default. -- ``TTL``: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are - automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not - the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL - is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a - default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of ``null`` is equivalent - to inserting with a TTL of 0. - -.. _delete_statement: - -DELETE -^^^^^^ - -Deleting rows or parts of rows uses the ``DELETE`` statement: - -.. productionlist:: - delete_statement: DELETE [ `simple_selection` ( ',' `simple_selection` ) ] - : FROM `table_name` - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : WHERE `where_clause` - : [ IF ( EXISTS | `condition` ( AND `condition` )*) ] - -For instance:: - - DELETE FROM NerdMovies USING TIMESTAMP 1240003134 - WHERE movie = 'Serenity'; - - DELETE phone FROM Users - WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); - -The ``DELETE`` statement deletes columns and rows. If column names are provided directly after the ``DELETE`` keyword, -only those columns are deleted from the row indicated by the ``WHERE`` clause. Otherwise, whole rows are removed. - -The ``WHERE`` clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -``IN`` operator. A range of rows may be deleted using an inequality operator (such as ``>=``). - -``DELETE`` supports the ``TIMESTAMP`` option with the same semantics as in :ref:`updates `. - -In a ``DELETE`` statement, all deletions within the same partition key are applied atomically and in isolation. - -A ``DELETE`` operation can be conditional through the use of an ``IF`` clause, similar to ``UPDATE`` and ``INSERT`` -statements. However, as with ``INSERT`` and ``UPDATE`` statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly. - -.. _batch_statement: - -BATCH -^^^^^ - -Multiple ``INSERT``, ``UPDATE`` and ``DELETE`` can be executed in a single statement by grouping them through a -``BATCH`` statement: - -.. productionlist:: - batch_statement: BEGIN [ UNLOGGED | COUNTER ] BATCH - : [ USING `update_parameter` ( AND `update_parameter` )* ] - : `modification_statement` ( ';' `modification_statement` )* - : APPLY BATCH - modification_statement: `insert_statement` | `update_statement` | `delete_statement` - -For instance:: - - BEGIN BATCH - INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user'); - UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3'; - INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c'); - DELETE name FROM users WHERE userid = 'user1'; - APPLY BATCH; - -The ``BATCH`` statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes: - -- It saves network round-trips between the client and the server (and sometimes between the server coordinator and the - replicas) when batching multiple updates. -- All updates in a ``BATCH`` belonging to a given partition key are performed in isolation. -- By default, all operations in the batch are performed as *logged*, to ensure all mutations eventually complete (or - none will). See the notes on :ref:`UNLOGGED batches ` for more details. - -Note that: - -- ``BATCH`` statements may only contain ``UPDATE``, ``INSERT`` and ``DELETE`` statements (not other batches for instance). -- Batches are *not* a full analogue for SQL transactions. -- If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp - (either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra's conflict - resolution procedure in the case of `timestamp ties `__, operations may - be applied in an order that is different from the order they are listed in the ``BATCH`` statement. To force a - particular operation ordering, you must specify per-operation timestamps. -- A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization. - -.. _unlogged-batches: - -``UNLOGGED`` batches -~~~~~~~~~~~~~~~~~~~~ - -By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition). - -There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the ``UNLOGGED`` option. If the ``UNLOGGED`` option is -used, a failed batch might leave the patch only partly applied. - -``COUNTER`` batches -~~~~~~~~~~~~~~~~~~~ - -Use the ``COUNTER`` option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent. diff --git a/src/doc/4.0-beta1/_sources/cql/functions.rst.txt b/src/doc/4.0-beta1/_sources/cql/functions.rst.txt deleted file mode 100644 index 965125a79..000000000 --- a/src/doc/4.0-beta1/_sources/cql/functions.rst.txt +++ /dev/null @@ -1,581 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-functions: - -.. Need some intro for UDF and native functions in general and point those to it. -.. _udfs: -.. _native-functions: - -Functions ---------- - -CQL supports 2 main categories of functions: - -- the :ref:`scalar functions `, which simply take a number of values and produce an output with it. -- the :ref:`aggregate functions `, which are used to aggregate multiple rows results from a - ``SELECT`` statement. - -In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined -functions. - -.. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when - enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do - evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions`` - in ``cassandra.yaml`` to enable them. - -A function is identifier by its name: - -.. productionlist:: - function_name: [ `keyspace_name` '.' ] `name` - -.. _scalar-functions: - -Scalar functions -^^^^^^^^^^^^^^^^ - -.. _scalar-native-functions: - -Native functions -~~~~~~~~~~~~~~~~ - -Cast -```` - -The ``cast`` function can be used to converts one native datatype to another. - -The following table describes the conversions supported by the ``cast`` function. Cassandra will silently ignore any -cast converting a datatype into its own datatype. - -=============== ======================================================================================================= - From To -=============== ======================================================================================================= - ``ascii`` ``text``, ``varchar`` - ``bigint`` ``tinyint``, ``smallint``, ``int``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``boolean`` ``text``, ``varchar`` - ``counter`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``date`` ``timestamp`` - ``decimal`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``varint``, ``text``, - ``varchar`` - ``double`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``float`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``inet`` ``text``, ``varchar`` - ``int`` ``tinyint``, ``smallint``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``smallint`` ``tinyint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, ``text``, - ``varchar`` - ``time`` ``text``, ``varchar`` - ``timestamp`` ``date``, ``text``, ``varchar`` - ``timeuuid`` ``timestamp``, ``date``, ``text``, ``varchar`` - ``tinyint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``varint``, - ``text``, ``varchar`` - ``uuid`` ``text``, ``varchar`` - ``varint`` ``tinyint``, ``smallint``, ``int``, ``bigint``, ``float``, ``double``, ``decimal``, ``text``, - ``varchar`` -=============== ======================================================================================================= - -The conversions rely strictly on Java's semantics. For example, the double value 1 will be converted to the text value -'1.0'. For instance:: - - SELECT avg(cast(count as double)) FROM myTable - -Token -````` - -The ``token`` function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster. - -The type of the arguments of the ``token`` depend on the type of the partition key columns. The return type depend on -the partitioner in use: - -- For Murmur3Partitioner, the return type is ``bigint``. -- For RandomPartitioner, the return type is ``varint``. -- For ByteOrderedPartitioner, the return type is ``blob``. - -For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:: - - CREATE TABLE users ( - userid text PRIMARY KEY, - username text, - ) - -then the ``token`` function will take a single argument of type ``text`` (in that case, the partition key is ``userid`` -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -``bigint``. - -Uuid -```` -The ``uuid`` function takes no parameters and generates a random type 4 uuid suitable for use in ``INSERT`` or -``UPDATE`` statements. - -.. _timeuuid-functions: - -Timeuuid functions -`````````````````` - -``now`` -####### - -The ``now`` function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -``WHERE`` clauses. For instance, a query of the form:: - - SELECT * FROM myTable WHERE t = now() - -will never return any result by design, since the value returned by ``now()`` is guaranteed to be unique. - -``currentTimeUUID`` is an alias of ``now``. - -``minTimeuuid`` and ``maxTimeuuid`` -################################### - -The ``minTimeuuid`` (resp. ``maxTimeuuid``) function takes a ``timestamp`` value ``t`` (which can be `either a timestamp -or a date string `) and return a *fake* ``timeuuid`` corresponding to the *smallest* (resp. *biggest*) -possible ``timeuuid`` having for timestamp ``t``. So for instance:: - - SELECT * FROM myTable - WHERE t > maxTimeuuid('2013-01-01 00:05+0000') - AND t < minTimeuuid('2013-02-02 10:00+0000') - -will select all rows where the ``timeuuid`` column ``t`` is strictly older than ``'2013-01-01 00:05+0000'`` but strictly -younger than ``'2013-02-02 10:00+0000'``. Please note that ``t >= maxTimeuuid('2013-01-01 00:05+0000')`` would still -*not* select a ``timeuuid`` generated exactly at '2013-01-01 00:05+0000' and is essentially equivalent to ``t > -maxTimeuuid('2013-01-01 00:05+0000')``. - -.. note:: We called the values generated by ``minTimeuuid`` and ``maxTimeuuid`` *fake* UUID because they do no respect - the Time-Based UUID generation process specified by the `RFC 4122 `__. In - particular, the value returned by these 2 methods will not be unique. This means you should only use those methods - for querying (as in the example above). Inserting the result of those methods is almost certainly *a bad idea*. - -Datetime functions -`````````````````` - -Retrieving the current date/time -################################ - -The following functions can be used to retrieve the date/time at the time where the function is invoked: - -===================== =============== - Function name Output type -===================== =============== - ``currentTimestamp`` ``timestamp`` - ``currentDate`` ``date`` - ``currentTime`` ``time`` - ``currentTimeUUID`` ``timeUUID`` -===================== =============== - -For example the last 2 days of data can be retrieved using:: - - SELECT * FROM myTable WHERE date >= currentDate() - 2d - -Time conversion functions -######################### - -A number of functions are provided to “convert” a ``timeuuid``, a ``timestamp`` or a ``date`` into another ``native`` -type. - -===================== =============== =================================================================== - Function name Input type Description -===================== =============== =================================================================== - ``toDate`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``date`` type - ``toDate`` ``timestamp`` Converts the ``timestamp`` argument into a ``date`` type - ``toTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``timestamp`` type - ``toTimestamp`` ``date`` Converts the ``date`` argument into a ``timestamp`` type - ``toUnixTimestamp`` ``timeuuid`` Converts the ``timeuuid`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``timestamp`` Converts the ``timestamp`` argument into a ``bigInt`` raw value - ``toUnixTimestamp`` ``date`` Converts the ``date`` argument into a ``bigInt`` raw value - ``dateOf`` ``timeuuid`` Similar to ``toTimestamp(timeuuid)`` (DEPRECATED) - ``unixTimestampOf`` ``timeuuid`` Similar to ``toUnixTimestamp(timeuuid)`` (DEPRECATED) -===================== =============== =================================================================== - -Blob conversion functions -````````````````````````` -A number of functions are provided to “convert” the native types into binary data (``blob``). For every -```` ``type`` supported by CQL (a notable exceptions is ``blob``, for obvious reasons), the function -``typeAsBlob`` takes a argument of type ``type`` and return it as a ``blob``. Conversely, the function ``blobAsType`` -takes a 64-bit ``blob`` argument and convert it to a ``bigint`` value. And so for instance, ``bigintAsBlob(3)`` is -``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``. - -.. _user-defined-scalar-functions: - -User-defined functions -~~~~~~~~~~~~~~~~~~~~~~ - -User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in *Java* and *JavaScript*. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath. - -UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster. - -UDFs can be *overloaded* - i.e. multiple UDFs with different argument types but the same function name. Example:: - - CREATE FUNCTION sample ( arg int ) ...; - CREATE FUNCTION sample ( arg text ) ...; - -User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing. - -It is valid to use *complex* types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types. - -Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too. - -Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:: - - CREATE FUNCTION some_function ( arg int ) - RETURNS NULL ON NULL INPUT - RETURNS int - LANGUAGE java - AS $$ return arg; $$; - - SELECT some_function(column) FROM atable ...; - UPDATE atable SET col = some_function(?) ...; - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct_using_udt ( udtarg frozen ) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ return udtarg.getString("txt"); $$; - -User-defined functions can be used in ``SELECT``, ``INSERT`` and ``UPDATE`` statements. - -The implicitly available ``udfContext`` field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:: - - CREATE TYPE custom_type (txt text, i int); - CREATE FUNCTION fct\_using\_udt ( somearg int ) - RETURNS NULL ON NULL INPUT - RETURNS custom_type - LANGUAGE java - AS $$ - UDTValue udt = udfContext.newReturnUDTValue(); - udt.setString("txt", "some string"); - udt.setInt("i", 42); - return udt; - $$; - -The definition of the ``UDFContext`` interface can be found in the Apache Cassandra source code for -``org.apache.cassandra.cql3.functions.UDFContext``. - -.. code-block:: java - - public interface UDFContext - { - UDTValue newArgUDTValue(String argName); - UDTValue newArgUDTValue(int argNum); - UDTValue newReturnUDTValue(); - UDTValue newUDTValue(String udtName); - TupleValue newArgTupleValue(String argName); - TupleValue newArgTupleValue(int argNum); - TupleValue newReturnTupleValue(); - TupleValue newTupleValue(String cqlDefinition); - } - -Java UDFs already have some imports for common interfaces and classes defined. These imports are: - -.. code-block:: java - - import java.nio.ByteBuffer; - import java.util.List; - import java.util.Map; - import java.util.Set; - import org.apache.cassandra.cql3.functions.UDFContext; - import com.datastax.driver.core.TypeCodec; - import com.datastax.driver.core.TupleValue; - import com.datastax.driver.core.UDTValue; - -Please note, that these convenience imports are not available for script UDFs. - -.. _create-function-statement: - -CREATE FUNCTION -``````````````` - -Creating a new user-defined function uses the ``CREATE FUNCTION`` statement: - -.. productionlist:: - create_function_statement: CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS] - : `function_name` '(' `arguments_declaration` ')' - : [ CALLED | RETURNS NULL ] ON NULL INPUT - : RETURNS `cql_type` - : LANGUAGE `identifier` - : AS `string` - arguments_declaration: `identifier` `cql_type` ( ',' `identifier` `cql_type` )* - -For instance:: - - CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen, listarg list) - RETURNS NULL ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - - CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int) - CALLED ON NULL INPUT - RETURNS text - LANGUAGE java - AS $$ - // some Java code - $$; - -``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords either creates a function or replaces an existing one with -the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE`` fails if a function with the same signature already -exists. - -If the optional ``IF NOT EXISTS`` keywords are used, the function will -only be created if another function with the same signature does not -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -Behavior on invocation with ``null`` values must be defined for each -function. There are two options: - -#. ``RETURNS NULL ON NULL INPUT`` declares that the function will always - return ``null`` if any of the input arguments is ``null``. -#. ``CALLED ON NULL INPUT`` declares that the function will always be - executed. - -Function Signature -################## - -Signatures are used to distinguish individual functions. The signature consists of: - -#. The fully qualified function name - i.e *keyspace* plus *function-name* -#. The concatenated list of all argument types - -Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules. - -Functions belong to a keyspace. If no keyspace is specified in ````, the current keyspace is used (i.e. -the keyspace specified using the ``USE`` statement). It is not possible to create a user-defined function in one of the -system keyspaces. - -.. _drop-function-statement: - -DROP FUNCTION -````````````` - -Dropping a function uses the ``DROP FUNCTION`` statement: - -.. productionlist:: - drop_function_statement: DROP FUNCTION [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - arguments_signature: `cql_type` ( ',' `cql_type` )* - -For instance:: - - DROP FUNCTION myfunction; - DROP FUNCTION mykeyspace.afunction; - DROP FUNCTION afunction ( int ); - DROP FUNCTION afunction ( text ); - -You must specify the argument types (:token:`arguments_signature`) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions). - -``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if -it doesn't - -.. _aggregate-functions: - -Aggregate functions -^^^^^^^^^^^^^^^^^^^ - -Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set. - -If ``normal`` columns, ``scalar functions``, ``UDT`` fields, ``writetime`` or ``ttl`` are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query. - -Native aggregates -~~~~~~~~~~~~~~~~~ - -.. _count-function: - -Count -````` - -The ``count`` function can be used to count the rows returned by a query. Example:: - - SELECT COUNT (*) FROM plays; - SELECT COUNT (1) FROM plays; - -It also can be used to count the non null value of a given column:: - - SELECT COUNT (scores) FROM plays; - -Max and Min -``````````` - -The ``max`` and ``min`` functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:: - - SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake'; - -Sum -``` - -The ``sum`` function can be used to sum up all the values returned by a query for a given column. For instance:: - - SELECT SUM (players) FROM plays; - -Avg -``` - -The ``avg`` function can be used to compute the average of all the values returned by a query for a given column. For -instance:: - - SELECT AVG (players) FROM plays; - -.. _user-defined-aggregates-functions: - -User-Defined Aggregates -~~~~~~~~~~~~~~~~~~~~~~~ - -User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -*count*, *min*, and *max*. - -Each aggregate requires an *initial state* (``INITCOND``, which defaults to ``null``) of type ``STYPE``. The first -argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional ``FINALFUNC`` is executed with last -state value as its argument. - -``STYPE`` is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate). - -User-defined aggregates can be used in ``SELECT`` statement. - -A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` -statement):: - - CREATE OR REPLACE FUNCTION averageState(state tuple, val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS $$ - if (val != null) { - state.setInt(0, state.getInt(0)+1); - state.setLong(1, state.getLong(1)+val.intValue()); - } - return state; - $$; - - CREATE OR REPLACE FUNCTION averageFinal (state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS $$ - double r = 0; - if (state.getInt(0) == 0) return null; - r = state.getLong(1); - r /= state.getInt(0); - return Double.valueOf(r); - $$; - - CREATE OR REPLACE AGGREGATE average(int) - SFUNC averageState - STYPE tuple - FINALFUNC averageFinal - INITCOND (0, 0); - - CREATE TABLE atable ( - pk int PRIMARY KEY, - val int - ); - - INSERT INTO atable (pk, val) VALUES (1,1); - INSERT INTO atable (pk, val) VALUES (2,2); - INSERT INTO atable (pk, val) VALUES (3,3); - INSERT INTO atable (pk, val) VALUES (4,4); - - SELECT average(val) FROM atable; - -.. _create-aggregate-statement: - -CREATE AGGREGATE -```````````````` - -Creating (or replacing) a user-defined aggregate function uses the ``CREATE AGGREGATE`` statement: - -.. productionlist:: - create_aggregate_statement: CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ] - : `function_name` '(' `arguments_signature` ')' - : SFUNC `function_name` - : STYPE `cql_type` - : [ FINALFUNC `function_name` ] - : [ INITCOND `term` ] - -See above for a complete example. - -``CREATE AGGREGATE`` with the optional ``OR REPLACE`` keywords either creates an aggregate or replaces an existing one -with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature -already exists. - -``CREATE AGGREGATE`` with the optional ``IF NOT EXISTS`` keywords either creates an aggregate if it does not already -exist. - -``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together. - -``STYPE`` defines the type of the state value and must be specified. - -The optional ``INITCOND`` defines the initial state value for the aggregate. It defaults to ``null``. A non-\ ``null`` -``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``. - -``SFUNC`` references an existing function to be used as the state modifying function. The type of first argument of the -state function must match ``STYPE``. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called -with ``null``. - -The optional ``FINALFUNC`` is called just before the aggregate result is returned. It must take only one argument with -type ``STYPE``. The return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS -NULL ON NULL INPUT`` means that the aggregate's return value will be ``null``, if the last state is ``null``. - -If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is -defined, it is the return type of that function. - -.. _drop-aggregate-statement: - -DROP AGGREGATE -`````````````` - -Dropping an user-defined aggregate function uses the ``DROP AGGREGATE`` statement: - -.. productionlist:: - drop_aggregate_statement: DROP AGGREGATE [ IF EXISTS ] `function_name` [ '(' `arguments_signature` ')' ] - -For instance:: - - DROP AGGREGATE myAggregate; - DROP AGGREGATE myKeyspace.anAggregate; - DROP AGGREGATE someAggregate ( int ); - DROP AGGREGATE someAggregate ( text ); - -The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates). - -``DROP AGGREGATE`` with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist. diff --git a/src/doc/4.0-beta1/_sources/cql/index.rst.txt b/src/doc/4.0-beta1/_sources/cql/index.rst.txt deleted file mode 100644 index b4c21cf6c..000000000 --- a/src/doc/4.0-beta1/_sources/cql/index.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _cql: - -The Cassandra Query Language (CQL) -================================== - -This document describes the Cassandra Query Language (CQL) [#]_. Note that this document describes the last version of -the languages. However, the `changes <#changes>`_ section provides the diff between the different versions of CQL. - -CQL offers a model close to SQL in the sense that data is put in *tables* containing *rows* of *columns*. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL. - -.. toctree:: - :maxdepth: 2 - - definitions - types - ddl - dml - indexes - mvs - security - functions - operators - json - triggers - appendices - changes - -.. [#] Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have - been deprecated and remove) and differs from it in numerous ways. diff --git a/src/doc/4.0-beta1/_sources/cql/indexes.rst.txt b/src/doc/4.0-beta1/_sources/cql/indexes.rst.txt deleted file mode 100644 index 81fe429d0..000000000 --- a/src/doc/4.0-beta1/_sources/cql/indexes.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _secondary-indexes: - -Secondary Indexes ------------------ - -CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by: - -.. productionlist:: - index_name: re('[a-zA-Z_0-9]+') - - - -.. _create-index-statement: - -CREATE INDEX -^^^^^^^^^^^^ - -Creating a secondary index on a table uses the ``CREATE INDEX`` statement: - -.. productionlist:: - create_index_statement: CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ `index_name` ] - : ON `table_name` '(' `index_identifier` ')' - : [ USING `string` [ WITH OPTIONS = `map_literal` ] ] - index_identifier: `column_name` - :| ( KEYS | VALUES | ENTRIES | FULL ) '(' `column_name` ')' - -For instance:: - - CREATE INDEX userIndex ON NerdMovies (user); - CREATE INDEX ON Mutants (abilityId); - CREATE INDEX ON users (keys(favs)); - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass'; - CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'}; - -The ``CREATE INDEX`` statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ``ON`` keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time. - -Attempting to create an already existing index will return an error unless the ``IF NOT EXISTS`` option is used. If it -is used, the statement will be a no-op if the index already exists. - -Indexes on Map Keys -~~~~~~~~~~~~~~~~~~~ - -When creating an index on a :ref:`maps `, you may index either the keys or the values. If the column identifier is -placed within the ``keys()`` function, the index will be on the map keys, allowing you to use ``CONTAINS KEY`` in -``WHERE`` clauses. Otherwise, the index will be on the map values. - -.. _drop-index-statement: - -DROP INDEX -^^^^^^^^^^ - -Dropping a secondary index uses the ``DROP INDEX`` statement: - -.. productionlist:: - drop_index_statement: DROP INDEX [ IF EXISTS ] `index_name` - -The ``DROP INDEX`` statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index. - -If the index does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case the -operation is a no-op. diff --git a/src/doc/4.0-beta1/_sources/cql/json.rst.txt b/src/doc/4.0-beta1/_sources/cql/json.rst.txt deleted file mode 100644 index 539180aed..000000000 --- a/src/doc/4.0-beta1/_sources/cql/json.rst.txt +++ /dev/null @@ -1,115 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-json: - -JSON Support ------------- - -Cassandra 2.2 introduces JSON support to :ref:`SELECT ` and :ref:`INSERT ` -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents. - -SELECT JSON -^^^^^^^^^^^ - -With ``SELECT`` statements, the ``JSON`` keyword can be used to return each row as a single ``JSON`` encoded map. The -remainder of the ``SELECT`` statement behavior is the same. - -The result map keys are the same as the column names in a normal result set. For example, a statement like ``SELECT JSON -a, ttl(b) FROM ...`` would result in a map with keys ``"a"`` and ``"ttl(b)"``. However, this is one notable exception: -for symmetry with ``INSERT JSON`` behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, ``SELECT JSON myColumn FROM ...`` would result in a map key ``"\"myColumn\""`` (note the -escaped quotes). - -The map values will ``JSON``-encoded representations (as described below) of the result set values. - -INSERT JSON -^^^^^^^^^^^ - -With ``INSERT`` statements, the new ``JSON`` keyword can be used to enable inserting a ``JSON`` encoded map as a single -row. The format of the ``JSON`` map should generally match that returned by a ``SELECT JSON`` statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named "myKey" and "value", you would do the following:: - - INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}' - -By default (or if ``DEFAULT NULL`` is explicitly used), a column omitted from the ``JSON`` map will be set to ``NULL``, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the ``DEFAULT UNSET`` directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved. - - -JSON Encoding of Cassandra Data Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Where possible, Cassandra will represent and accept data types in their native ``JSON`` representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native ``JSON`` collections (maps and lists) or a JSON-encoded string -representation of the collection. - -The following table describes the encodings that Cassandra will accept in ``INSERT JSON`` values (and ``fromJson()`` -arguments) as well as the format Cassandra will use when returning data for ``SELECT JSON`` statements (and -``fromJson()``): - -=============== ======================== =============== ============================================================== - Type Formats accepted Return format Notes -=============== ======================== =============== ============================================================== - ``ascii`` string string Uses JSON's ``\u`` character escape - ``bigint`` integer, string integer String must be valid 64 bit integer - ``blob`` string string String should be 0x followed by an even number of hex digits - ``boolean`` boolean, string boolean String must be "true" or "false" - ``date`` string string Date in format ``YYYY-MM-DD``, timezone UTC - ``decimal`` integer, float, string float May exceed 32 or 64-bit IEEE-754 floating point precision in - client-side decoder - ``double`` integer, float, string float String must be valid integer or float - ``float`` integer, float, string float String must be valid integer or float - ``inet`` string string IPv4 or IPv6 address - ``int`` integer, string integer String must be valid 32 bit integer - ``list`` list, string list Uses JSON's native list representation - ``map`` map, string map Uses JSON's native map representation - ``smallint`` integer, string integer String must be valid 16 bit integer - ``set`` list, string list Uses JSON's native list representation - ``text`` string string Uses JSON's ``\u`` character escape - ``time`` string string Time of day in format ``HH-MM-SS[.fffffffff]`` - ``timestamp`` integer, string string A timestamp. Strings constant allows to input :ref:`timestamps - as dates `. Datestamps with format ``YYYY-MM-DD - HH:MM:SS.SSS`` are returned. - ``timeuuid`` string string Type 1 UUID. See :token:`constant` for the UUID format - ``tinyint`` integer, string integer String must be valid 8 bit integer - ``tuple`` list, string list Uses JSON's native list representation - ``UDT`` map, string map Uses JSON's native map representation with field names as keys - ``uuid`` string string See :token:`constant` for the UUID format - ``varchar`` string string Uses JSON's ``\u`` character escape - ``varint`` integer, string integer Variable length; may overflow 32 or 64 bit integers in - client-side decoder -=============== ======================== =============== ============================================================== - -The fromJson() Function -^^^^^^^^^^^^^^^^^^^^^^^ - -The ``fromJson()`` function may be used similarly to ``INSERT JSON``, but for a single column value. It may only be used -in the ``VALUES`` clause of an ``INSERT`` statement or as one of the column values in an ``UPDATE``, ``DELETE``, or -``SELECT`` statement. For example, it cannot be used in the selection clause of a ``SELECT`` statement. - -The toJson() Function -^^^^^^^^^^^^^^^^^^^^^ - -The ``toJson()`` function may be used similarly to ``SELECT JSON``, but for a single column value. It may only be used -in the selection clause of a ``SELECT`` statement. diff --git a/src/doc/4.0-beta1/_sources/cql/mvs.rst.txt b/src/doc/4.0-beta1/_sources/cql/mvs.rst.txt deleted file mode 100644 index 200090a60..000000000 --- a/src/doc/4.0-beta1/_sources/cql/mvs.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _materialized-views: - -Materialized Views ------------------- - -Materialized views names are defined by: - -.. productionlist:: - view_name: re('[a-zA-Z_0-9]+') - - -.. _create-materialized-view-statement: - -CREATE MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^^ - -You can create a materialized view on a table using a ``CREATE MATERIALIZED VIEW`` statement: - -.. productionlist:: - create_materialized_view_statement: CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] `view_name` AS - : `select_statement` - : PRIMARY KEY '(' `primary_key` ')' - : WITH `table_options` - -For instance:: - - CREATE MATERIALIZED VIEW monkeySpecies_by_population AS - SELECT * FROM monkeySpecies - WHERE population IS NOT NULL AND species IS NOT NULL - PRIMARY KEY (population, species) - WITH comment='Allow query by population instead of species'; - -The ``CREATE MATERIALIZED VIEW`` statement creates a new materialized view. Each such view is a set of *rows* which -corresponds to rows which are present in the underlying, or base, table specified in the ``SELECT`` statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view. - -Creating a materialized view has 3 main parts: - -- The :ref:`select statement ` that restrict the data included in the view. -- The :ref:`primary key ` definition for the view. -- The :ref:`options ` for the view. - -Attempting to create an already existing materialized view will return an error unless the ``IF NOT EXISTS`` option is -used. If it is used, the statement will be a no-op if the materialized view already exists. - -.. note:: By default, materialized views are built in a single thread. The initial build can be parallelized by - increasing the number of threads specified by the property ``concurrent_materialized_view_builders`` in - ``cassandra.yaml``. This property can also be manipulated at runtime through both JMX and the - ``setconcurrentviewbuilders`` and ``getconcurrentviewbuilders`` nodetool commands. - -.. _mv-select: - -MV select statement -``````````````````` - -The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways: - -- the :ref:`selection ` is limited to those that only select columns of the base table. In other - words, you can't use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can - however use `*` as a shortcut of selecting all columns. Further, :ref:`static columns ` cannot be - included in a materialized view (which means ``SELECT *`` isn't allowed if the base table has static columns). -- the ``WHERE`` clause have the following restrictions: - - - it cannot include any :token:`bind_marker`. - - the columns that are not part of the *base table* primary key can only be restricted by an ``IS NOT NULL`` - restriction. No other restriction is allowed. - - as the columns that are part of the *view* primary key cannot be null, they must always be at least restricted by a - ``IS NOT NULL`` restriction (or any other restriction, but they must have one). - -- it cannot have neither an :ref:`ordering clause `, nor a :ref:`limit `, nor :ref:`ALLOW - FILTERING `. - -.. _mv-primary-key: - -MV primary key -`````````````` - -A view must have a primary key and that primary key must conform to the following restrictions: - -- it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to - exactly one row of the base table. -- it can only contain a single column that is not a primary key column in the base table. - -So for instance, give the following base table definition:: - - CREATE TABLE t ( - k int, - c1 int, - c2 int, - v1 int, - v2 int, - PRIMARY KEY (k, c1, c2) - ) - -then the following view definitions are allowed:: - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, k, c2) - - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (v1, k, c1, c2) - -but the following ones are **not** allowed:: - - // Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL - PRIMARY KEY (v1, v2, k, c1, c2) - - // Error: must include k in the primary as it's a base table primary key column - CREATE MATERIALIZED VIEW mv1 AS - SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL - PRIMARY KEY (c1, c2) - - -.. _mv-options: - -MV options -`````````` - -A materialized view is internally implemented by a table and as such, creating a MV allows the :ref:`same options than -creating a table `. - - -.. _alter-materialized-view-statement: - -ALTER MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^^ - -After creation, you can alter the options of a materialized view using the ``ALTER MATERIALIZED VIEW`` statement: - -.. productionlist:: - alter_materialized_view_statement: ALTER MATERIALIZED VIEW `view_name` WITH `table_options` - -The options that can be updated are the same than at creation time and thus the :ref:`same than for tables -`. - -.. _drop-materialized-view-statement: - -DROP MATERIALIZED VIEW -^^^^^^^^^^^^^^^^^^^^^^ - -Dropping a materialized view users the ``DROP MATERIALIZED VIEW`` statement: - -.. productionlist:: - drop_materialized_view_statement: DROP MATERIALIZED VIEW [ IF EXISTS ] `view_name`; - -If the materialized view does not exists, the statement will return an error, unless ``IF EXISTS`` is used in which case -the operation is a no-op. - -MV Limitations -``````````````` - -.. Note:: Removal of columns not selected in the Materialized View (via ``UPDATE base SET unselected_column = null`` or - ``DELETE unselected_column FROM base``) may shadow missed updates to other columns received by hints or repair. - For this reason, we advise against doing deletions on base columns not selected in views until this is - fixed on CASSANDRA-13826. diff --git a/src/doc/4.0-beta1/_sources/cql/operators.rst.txt b/src/doc/4.0-beta1/_sources/cql/operators.rst.txt deleted file mode 100644 index 1faf0d045..000000000 --- a/src/doc/4.0-beta1/_sources/cql/operators.rst.txt +++ /dev/null @@ -1,74 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _arithmetic_operators: - -Arithmetic Operators --------------------- - -CQL supports the following operators: - -=============== ======================================================================================================= - Operator Description -=============== ======================================================================================================= - \- (unary) Negates operand - \+ Addition - \- Substraction - \* Multiplication - / Division - % Returns the remainder of a division -=============== ======================================================================================================= - -.. _number-arithmetic: - -Number Arithmetic -^^^^^^^^^^^^^^^^^ - -All arithmetic operations are supported on numeric types or counters. - -The return type of the operation will be based on the operand types: - -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - left/right tinyint smallint int bigint counter float double varint decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - **tinyint** tinyint smallint int bigint bigint float double varint decimal - **smallint** smallint smallint int bigint bigint float double varint decimal - **int** int int int bigint bigint float double varint decimal - **bigint** bigint bigint bigint bigint bigint double double varint decimal - **counter** bigint bigint bigint bigint bigint double double varint decimal - **float** float float float double double float double decimal decimal - **double** double double double double double double double decimal decimal - **varint** varint varint varint decimal decimal decimal decimal decimal decimal - **decimal** decimal decimal decimal decimal decimal decimal decimal decimal decimal -============= =========== ========== ========== ========== ========== ========== ========== ========== ========== - -``*``, ``/`` and ``%`` operators have a higher precedence level than ``+`` and ``-`` operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression. - -.. _datetime--arithmetic: - -Datetime Arithmetic -^^^^^^^^^^^^^^^^^^^ - -A ``duration`` can be added (+) or substracted (-) from a ``timestamp`` or a ``date`` to create a new -``timestamp`` or ``date``. So for instance:: - - SELECT * FROM myTable WHERE t = '2017-01-01' - 2d - -will select all the records with a value of ``t`` which is in the last 2 days of 2016. diff --git a/src/doc/4.0-beta1/_sources/cql/security.rst.txt b/src/doc/4.0-beta1/_sources/cql/security.rst.txt deleted file mode 100644 index 429a1ef0d..000000000 --- a/src/doc/4.0-beta1/_sources/cql/security.rst.txt +++ /dev/null @@ -1,538 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-security: - -Security --------- - -.. _cql-roles: - -Database Roles -^^^^^^^^^^^^^^ - -CQL uses database roles to represent users and group of users. Syntactically, a role is defined by: - -.. productionlist:: - role_name: `identifier` | `string` - -.. _create-role-statement: - -CREATE ROLE -~~~~~~~~~~~ - -Creating a role uses the ``CREATE ROLE`` statement: - -.. productionlist:: - create_role_statement: CREATE ROLE [ IF NOT EXISTS ] `role_name` - : [ WITH `role_options` ] - role_options: `role_option` ( AND `role_option` )* - role_option: PASSWORD '=' `string` - :| LOGIN '=' `boolean` - :| SUPERUSER '=' `boolean` - :| OPTIONS '=' `map_literal` - :| ACCESS TO DATACENTERS `set_literal` - :| ACCESS TO ALL DATACENTERS - -For instance:: - - CREATE ROLE new_role; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true; - CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 }; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'}; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS; - -By default roles do not possess ``LOGIN`` privileges or ``SUPERUSER`` status. - -:ref:`Permissions ` on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and ``SUPERUSER`` status are inherited, but the ``LOGIN`` privilege is -not. - -If a role has the ``LOGIN`` privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role. - -Only a client with with the ``CREATE`` permission on the database roles resource may issue ``CREATE ROLE`` requests (see -the :ref:`relevant section ` below), unless the client is a ``SUPERUSER``. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options. - -Role names should be quoted if they contain non-alphanumeric characters. - -.. _setting-credentials-for-internal-authentication: - -Setting credentials for internal authentication -``````````````````````````````````````````````` - -Use the ``WITH PASSWORD`` clause to set a password for internal authentication, enclosing the password in single -quotation marks. - -If internal authentication has not been set up or the role does not have ``LOGIN`` privileges, the ``WITH PASSWORD`` -clause is not necessary. - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ``ACCESS TO ALL DATACENTERS`` can be used for -explicitness, but there's no functional difference. - -Creating a role conditionally -````````````````````````````` - -Attempting to create an existing role results in an invalid query condition unless the ``IF NOT EXISTS`` option is used. -If the option is used and the role exists, the statement is a no-op:: - - CREATE ROLE other_role; - CREATE ROLE IF NOT EXISTS other_role; - - -.. _alter-role-statement: - -ALTER ROLE -~~~~~~~~~~ - -Altering a role options uses the ``ALTER ROLE`` statement: - -.. productionlist:: - alter_role_statement: ALTER ROLE `role_name` WITH `role_options` - -For instance:: - - ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false; - -Restricting connections to specific datacenters -``````````````````````````````````````````````` - -If a ``network_authorizer`` has been configured, you can restrict login roles to specific datacenters with the -``ACCESS TO DATACENTERS`` clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ``ACCESS TO ALL DATACENTERS`` clause. - -Conditions on executing ``ALTER ROLE`` statements: - -- A client must have ``SUPERUSER`` status to alter the ``SUPERUSER`` status of another role -- A client cannot alter the ``SUPERUSER`` status of any role it currently holds -- A client can only modify certain properties of the role with which it identified at login (e.g. ``PASSWORD``) -- To modify properties of a role, the client must be granted ``ALTER`` :ref:`permission ` on that role - -.. _drop-role-statement: - -DROP ROLE -~~~~~~~~~ - -Dropping a role uses the ``DROP ROLE`` statement: - -.. productionlist:: - drop_role_statement: DROP ROLE [ IF EXISTS ] `role_name` - -``DROP ROLE`` requires the client to have ``DROP`` :ref:`permission ` on the role in question. In -addition, client may not ``DROP`` the role with which it identified at login. Finally, only a client with ``SUPERUSER`` -status may ``DROP`` another ``SUPERUSER`` role. - -Attempting to drop a role which does not exist results in an invalid query condition unless the ``IF EXISTS`` option is -used. If the option is used and the role does not exist the statement is a no-op. - -.. note:: DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain - connected and will retain the ability to perform any database actions which do not require :ref:`authorization`. - However, if authorization is enabled, :ref:`permissions` of the dropped role are also revoked, - subject to the :ref:`caching options` configured in :ref:`cassandra.yaml`. - Should a dropped role be subsequently recreated and have new :ref:`permissions` or - :ref:`roles` granted to it, any client sessions still connected will acquire the newly granted - permissions and roles. - -.. _grant-role-statement: - -GRANT ROLE -~~~~~~~~~~ - -Granting a role to another uses the ``GRANT ROLE`` statement: - -.. productionlist:: - grant_role_statement: GRANT `role_name` TO `role_name` - -For instance:: - - GRANT report_writer TO alice; - -This statement grants the ``report_writer`` role to ``alice``. Any permissions granted to ``report_writer`` are also -acquired by ``alice``. - -Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:: - - GRANT role_a TO role_b; - GRANT role_b TO role_a; - - GRANT role_a TO role_b; - GRANT role_b TO role_c; - GRANT role_c TO role_a; - -.. _revoke-role-statement: - -REVOKE ROLE -~~~~~~~~~~~ - -Revoking a role uses the ``REVOKE ROLE`` statement: - -.. productionlist:: - revoke_role_statement: REVOKE `role_name` FROM `role_name` - -For instance:: - - REVOKE report_writer FROM alice; - -This statement revokes the ``report_writer`` role from ``alice``. Any permissions that ``alice`` has acquired via the -``report_writer`` role are also revoked. - -.. _list-roles-statement: - -LIST ROLES -~~~~~~~~~~ - -All the known roles (in the system or granted to specific role) can be listed using the ``LIST ROLES`` statement: - -.. productionlist:: - list_roles_statement: LIST ROLES [ OF `role_name` ] [ NORECURSIVE ] - -For instance:: - - LIST ROLES; - -returns all known roles in the system, this requires ``DESCRIBE`` permission on the database roles resource. And:: - - LIST ROLES OF alice; - -enumerates all roles granted to ``alice``, including those transitively acquired. But:: - - LIST ROLES OF bob NORECURSIVE - -lists all roles directly granted to ``bob`` without including any of the transitively acquired ones. - -Users -^^^^^ - -Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -``USER``. For backward compatibility, the legacy syntax has been preserved with ``USER`` centric statements becoming -synonyms for the ``ROLE`` based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role. - -.. _create-user-statement: - -CREATE USER -~~~~~~~~~~~ - -Creating a user uses the ``CREATE USER`` statement: - -.. productionlist:: - create_user_statement: CREATE USER [ IF NOT EXISTS ] `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - user_option: SUPERUSER | NOSUPERUSER - -For instance:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER; - -``CREATE USER`` is equivalent to ``CREATE ROLE`` where the ``LOGIN`` option is ``true``. So, the following pairs of -statements are equivalent:: - - CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER; - CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false; - - CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - - CREATE USER alice WITH PASSWORD 'password_a'; - CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true; - -.. _alter-user-statement: - -ALTER USER -~~~~~~~~~~ - -Altering the options of a user uses the ``ALTER USER`` statement: - -.. productionlist:: - alter_user_statement: ALTER USER `role_name` [ WITH PASSWORD `string` ] [ `user_option` ] - -For instance:: - - ALTER USER alice WITH PASSWORD 'PASSWORD_A'; - ALTER USER bob SUPERUSER; - -.. _drop-user-statement: - -DROP USER -~~~~~~~~~ - -Dropping a user uses the ``DROP USER`` statement: - -.. productionlist:: - drop_user_statement: DROP USER [ IF EXISTS ] `role_name` - -.. _list-users-statement: - -LIST USERS -~~~~~~~~~~ - -Existing users can be listed using the ``LIST USERS`` statement: - -.. productionlist:: - list_users_statement: LIST USERS - -Note that this statement is equivalent to:: - - LIST ROLES; - -but only roles with the ``LOGIN`` privilege are included in the output. - -Data Control -^^^^^^^^^^^^ - -.. _cql-permissions: - -Permissions -~~~~~~~~~~~ - -Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically: - -- The hierarchy of Data resources, Keyspaces and Tables has the structure ``ALL KEYSPACES`` -> ``KEYSPACE`` -> - ``TABLE``. -- Function resources have the structure ``ALL FUNCTIONS`` -> ``KEYSPACE`` -> ``FUNCTION`` -- Resources representing roles have the structure ``ALL ROLES`` -> ``ROLE`` -- Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ``ALL MBEANS`` -> - ``MBEAN`` - -Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting ``SELECT`` on a ``KEYSPACE`` automatically grants it on all ``TABLES`` in that ``KEYSPACE``. Likewise, granting -a permission on ``ALL FUNCTIONS`` grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace. - -Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes. - -The full set of available permissions is: - -- ``CREATE`` -- ``ALTER`` -- ``DROP`` -- ``SELECT`` -- ``MODIFY`` -- ``AUTHORIZE`` -- ``DESCRIBE`` -- ``EXECUTE`` - -Not all permissions are applicable to every type of resource. For instance, ``EXECUTE`` is only relevant in the context -of functions or mbeans; granting ``EXECUTE`` on a resource representing a table is nonsensical. Attempting to ``GRANT`` -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission. - -=============== =============================== ======================================================================= - Permission Resource Operations -=============== =============================== ======================================================================= - ``CREATE`` ``ALL KEYSPACES`` ``CREATE KEYSPACE`` and ``CREATE TABLE`` in any keyspace - ``CREATE`` ``KEYSPACE`` ``CREATE TABLE`` in specified keyspace - ``CREATE`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` in any keyspace and ``CREATE AGGREGATE`` in any - keyspace - ``CREATE`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE`` in specified keyspace - ``CREATE`` ``ALL ROLES`` ``CREATE ROLE`` - ``ALTER`` ``ALL KEYSPACES`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in any keyspace - ``ALTER`` ``KEYSPACE`` ``ALTER KEYSPACE`` and ``ALTER TABLE`` in specified keyspace - ``ALTER`` ``TABLE`` ``ALTER TABLE`` - ``ALTER`` ``ALL FUNCTIONS`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing any existing - ``ALTER`` ``ALL FUNCTIONS IN KEYSPACE`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing in - specified keyspace - ``ALTER`` ``FUNCTION`` ``CREATE FUNCTION`` and ``CREATE AGGREGATE``: replacing existing - ``ALTER`` ``ALL ROLES`` ``ALTER ROLE`` on any role - ``ALTER`` ``ROLE`` ``ALTER ROLE`` - ``DROP`` ``ALL KEYSPACES`` ``DROP KEYSPACE`` and ``DROP TABLE`` in any keyspace - ``DROP`` ``KEYSPACE`` ``DROP TABLE`` in specified keyspace - ``DROP`` ``TABLE`` ``DROP TABLE`` - ``DROP`` ``ALL FUNCTIONS`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in any keyspace - ``DROP`` ``ALL FUNCTIONS IN KEYSPACE`` ``DROP FUNCTION`` and ``DROP AGGREGATE`` in specified keyspace - ``DROP`` ``FUNCTION`` ``DROP FUNCTION`` - ``DROP`` ``ALL ROLES`` ``DROP ROLE`` on any role - ``DROP`` ``ROLE`` ``DROP ROLE`` - ``SELECT`` ``ALL KEYSPACES`` ``SELECT`` on any table - ``SELECT`` ``KEYSPACE`` ``SELECT`` on any table in specified keyspace - ``SELECT`` ``TABLE`` ``SELECT`` on specified table - ``SELECT`` ``ALL MBEANS`` Call getter methods on any mbean - ``SELECT`` ``MBEANS`` Call getter methods on any mbean matching a wildcard pattern - ``SELECT`` ``MBEAN`` Call getter methods on named mbean - ``MODIFY`` ``ALL KEYSPACES`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table - ``MODIFY`` ``KEYSPACE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on any table in - specified keyspace - ``MODIFY`` ``TABLE`` ``INSERT``, ``UPDATE``, ``DELETE`` and ``TRUNCATE`` on specified table - ``MODIFY`` ``ALL MBEANS`` Call setter methods on any mbean - ``MODIFY`` ``MBEANS`` Call setter methods on any mbean matching a wildcard pattern - ``MODIFY`` ``MBEAN`` Call setter methods on named mbean - ``AUTHORIZE`` ``ALL KEYSPACES`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table - ``AUTHORIZE`` ``KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any table in - specified keyspace - ``AUTHORIZE`` ``TABLE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified table - ``AUTHORIZE`` ``ALL FUNCTIONS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any function - ``AUTHORIZE`` ``ALL FUNCTIONS IN KEYSPACE`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` in specified keyspace - ``AUTHORIZE`` ``FUNCTION`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on specified function - ``AUTHORIZE`` ``ALL MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean - ``AUTHORIZE`` ``MBEANS`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on any mbean matching - a wildcard pattern - ``AUTHORIZE`` ``MBEAN`` ``GRANT PERMISSION`` and ``REVOKE PERMISSION`` on named mbean - ``AUTHORIZE`` ``ALL ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on any role - ``AUTHORIZE`` ``ROLES`` ``GRANT ROLE`` and ``REVOKE ROLE`` on specified roles - ``DESCRIBE`` ``ALL ROLES`` ``LIST ROLES`` on all roles or only roles granted to another, - specified role - ``DESCRIBE`` ``ALL MBEANS`` Retrieve metadata about any mbean from the platform's MBeanServer - ``DESCRIBE`` ``MBEANS`` Retrieve metadata about any mbean matching a wildcard patter from the - platform's MBeanServer - ``DESCRIBE`` ``MBEAN`` Retrieve metadata about a named mbean from the platform's MBeanServer - ``EXECUTE`` ``ALL FUNCTIONS`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function, and use of - any function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL FUNCTIONS IN KEYSPACE`` ``SELECT``, ``INSERT`` and ``UPDATE`` using any function in specified - keyspace and use of any function in keyspace in ``CREATE AGGREGATE`` - ``EXECUTE`` ``FUNCTION`` ``SELECT``, ``INSERT`` and ``UPDATE`` using specified function and use - of the function in ``CREATE AGGREGATE`` - ``EXECUTE`` ``ALL MBEANS`` Execute operations on any mbean - ``EXECUTE`` ``MBEANS`` Execute operations on any mbean matching a wildcard pattern - ``EXECUTE`` ``MBEAN`` Execute operations on named mbean -=============== =============================== ======================================================================= - -.. _grant-permission-statement: - -GRANT PERMISSION -~~~~~~~~~~~~~~~~ - -Granting a permission uses the ``GRANT PERMISSION`` statement: - -.. productionlist:: - grant_permission_statement: GRANT `permissions` ON `resource` TO `role_name` - permissions: ALL [ PERMISSIONS ] | `permission` [ PERMISSION ] - permission: CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE - resource: ALL KEYSPACES - :| KEYSPACE `keyspace_name` - :| [ TABLE ] `table_name` - :| ALL ROLES - :| ROLE `role_name` - :| ALL FUNCTIONS [ IN KEYSPACE `keyspace_name` ] - :| FUNCTION `function_name` '(' [ `cql_type` ( ',' `cql_type` )* ] ')' - :| ALL MBEANS - :| ( MBEAN | MBEANS ) `string` - -For instance:: - - GRANT SELECT ON ALL KEYSPACES TO data_reader; - -This gives any user with the role ``data_reader`` permission to execute ``SELECT`` statements on any table across all -keyspaces:: - - GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer; - -This give any user with the role ``data_writer`` permission to perform ``UPDATE``, ``INSERT``, ``UPDATE``, ``DELETE`` -and ``TRUNCATE`` queries on all tables in the ``keyspace1`` keyspace:: - - GRANT DROP ON keyspace1.table1 TO schema_owner; - -This gives any user with the ``schema_owner`` role permissions to ``DROP`` ``keyspace1.table1``:: - - GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer; - -This grants any user with the ``report_writer`` role permission to execute ``SELECT``, ``INSERT`` and ``UPDATE`` queries -which use the function ``keyspace1.user_function( int )``:: - - GRANT DESCRIBE ON ALL ROLES TO role_admin; - -This grants any user with the ``role_admin`` role permission to view any and all roles in the system with a ``LIST -ROLES`` statement - -.. _grant-all: - -GRANT ALL -````````` - -When the ``GRANT ALL`` form is used, the appropriate set of permissions is determined automatically based on the target -resource. - -Automatic Granting -`````````````````` - -When a resource is created, via a ``CREATE KEYSPACE``, ``CREATE TABLE``, ``CREATE FUNCTION``, ``CREATE AGGREGATE`` or -``CREATE ROLE`` statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource. - -.. _revoke-permission-statement: - -REVOKE PERMISSION -~~~~~~~~~~~~~~~~~ - -Revoking a permission from a role uses the ``REVOKE PERMISSION`` statement: - -.. productionlist:: - revoke_permission_statement: REVOKE `permissions` ON `resource` FROM `role_name` - -For instance:: - - REVOKE SELECT ON ALL KEYSPACES FROM data_reader; - REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer; - REVOKE DROP ON keyspace1.table1 FROM schema_owner; - REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer; - REVOKE DESCRIBE ON ALL ROLES FROM role_admin; - -Because of their function in normal driver operations, certain tables cannot have their `SELECT` permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:: - -* `system_schema.keyspaces` -* `system_schema.columns` -* `system_schema.tables` -* `system.local` -* `system.peers` - -.. _list-permissions-statement: - -LIST PERMISSIONS -~~~~~~~~~~~~~~~~ - -Listing granted permissions uses the ``LIST PERMISSIONS`` statement: - -.. productionlist:: - list_permissions_statement: LIST `permissions` [ ON `resource` ] [ OF `role_name` [ NORECURSIVE ] ] - -For instance:: - - LIST ALL PERMISSIONS OF alice; - -Show all permissions granted to ``alice``, including those acquired transitively from any other roles:: - - LIST ALL PERMISSIONS ON keyspace1.table1 OF bob; - -Show all permissions on ``keyspace1.table1`` granted to ``bob``, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to ``keyspace1.table1``. -For example, should ``bob`` have ``ALTER`` permission on ``keyspace1``, that would be included in the results of this -query. Adding the ``NORECURSIVE`` switch restricts the results to only those permissions which were directly granted to -``bob`` or one of ``bob``'s roles:: - - LIST SELECT PERMISSIONS OF carlos; - -Show any permissions granted to ``carlos`` or any of ``carlos``'s roles, limited to ``SELECT`` permissions on any -resource. diff --git a/src/doc/4.0-beta1/_sources/cql/triggers.rst.txt b/src/doc/4.0-beta1/_sources/cql/triggers.rst.txt deleted file mode 100644 index db3f53e38..000000000 --- a/src/doc/4.0-beta1/_sources/cql/triggers.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _cql-triggers: - -Triggers --------- - -Triggers are identified by a name defined by: - -.. productionlist:: - trigger_name: `identifier` - - -.. _create-trigger-statement: - -CREATE TRIGGER -^^^^^^^^^^^^^^ - -Creating a new trigger uses the ``CREATE TRIGGER`` statement: - -.. productionlist:: - create_trigger_statement: CREATE TRIGGER [ IF NOT EXISTS ] `trigger_name` - : ON `table_name` - : USING `string` - -For instance:: - - CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex'; - -The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a ``lib/triggers`` subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction. - -.. _drop-trigger-statement: - -DROP TRIGGER -^^^^^^^^^^^^ - -Dropping a trigger uses the ``DROP TRIGGER`` statement: - -.. productionlist:: - drop_trigger_statement: DROP TRIGGER [ IF EXISTS ] `trigger_name` ON `table_name` - -For instance:: - - DROP TRIGGER myTrigger ON myTable; diff --git a/src/doc/4.0-beta1/_sources/cql/types.rst.txt b/src/doc/4.0-beta1/_sources/cql/types.rst.txt deleted file mode 100644 index 509a7565e..000000000 --- a/src/doc/4.0-beta1/_sources/cql/types.rst.txt +++ /dev/null @@ -1,559 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier - -.. _data-types: - -Data Types ----------- - -CQL is a typed language and supports a rich set of data types, including :ref:`native types `, -:ref:`collection types `, :ref:`user-defined types `, :ref:`tuple types ` and :ref:`custom -types `: - -.. productionlist:: - cql_type: `native_type` | `collection_type` | `user_defined_type` | `tuple_type` | `custom_type` - - -.. _native-types: - -Native Types -^^^^^^^^^^^^ - -The native types supported by CQL are: - -.. productionlist:: - native_type: ASCII - : | BIGINT - : | BLOB - : | BOOLEAN - : | COUNTER - : | DATE - : | DECIMAL - : | DOUBLE - : | DURATION - : | FLOAT - : | INET - : | INT - : | SMALLINT - : | TEXT - : | TIME - : | TIMESTAMP - : | TIMEUUID - : | TINYINT - : | UUID - : | VARCHAR - : | VARINT - -The following table gives additional informations on the native data types, and on which kind of :ref:`constants -` each type supports: - -=============== ===================== ================================================================================== - type constants supported description -=============== ===================== ================================================================================== - ``ascii`` :token:`string` ASCII character string - ``bigint`` :token:`integer` 64-bit signed long - ``blob`` :token:`blob` Arbitrary bytes (no validation) - ``boolean`` :token:`boolean` Either ``true`` or ``false`` - ``counter`` :token:`integer` Counter column (64-bit signed value). See :ref:`counters` for details - ``date`` :token:`integer`, A date (with no corresponding time value). See :ref:`dates` below for details - :token:`string` - ``decimal`` :token:`integer`, Variable-precision decimal - :token:`float` - ``double`` :token:`integer` 64-bit IEEE-754 floating point - :token:`float` - ``duration`` :token:`duration`, A duration with nanosecond precision. See :ref:`durations` below for details - ``float`` :token:`integer`, 32-bit IEEE-754 floating point - :token:`float` - ``inet`` :token:`string` An IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that - there is no ``inet`` constant, IP address should be input as strings - ``int`` :token:`integer` 32-bit signed int - ``smallint`` :token:`integer` 16-bit signed int - ``text`` :token:`string` UTF8 encoded string - ``time`` :token:`integer`, A time (with no corresponding date value) with nanosecond precision. See - :token:`string` :ref:`times` below for details - ``timestamp`` :token:`integer`, A timestamp (date and time) with millisecond precision. See :ref:`timestamps` - :token:`string` below for details - ``timeuuid`` :token:`uuid` Version 1 UUID_, generally used as a “conflict-free” timestamp. Also see - :ref:`timeuuid-functions` - ``tinyint`` :token:`integer` 8-bit signed int - ``uuid`` :token:`uuid` A UUID_ (of any version) - ``varchar`` :token:`string` UTF8 encoded string - ``varint`` :token:`integer` Arbitrary-precision integer -=============== ===================== ================================================================================== - -.. _counters: - -Counters -~~~~~~~~ - -The ``counter`` type is used to define *counter columns*. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the :ref:`UPDATE statement -` for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0. - -.. _counter-limitations: - -Counters have a number of important limitations: - -- They cannot be used for columns part of the ``PRIMARY KEY`` of a table. -- A table that contains a counter can only contain counters. In other words, either all the columns of a table outside - the ``PRIMARY KEY`` have the ``counter`` type, or none of them have it. -- Counters do not support :ref:`expiration `. -- The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other - words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed). -- Counter updates are, by nature, not `idemptotent `__. An important - consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), - the client has no way to know if the update has been applied or not. In particular, replaying the update may or may - not lead to an over count. - -.. _timestamps: - -Working with timestamps -^^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``timestamp`` type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as `the epoch `__: January 1 1970 at 00:00:00 GMT. - -Timestamps can be input in CQL either using their value as an :token:`integer`, or using a :token:`string` that -represents an `ISO 8601 `__ date. For instance, all of the values below are -valid ``timestamp`` values for Mar 2, 2011, at 04:05:00 AM, GMT: - -- ``1299038700000`` -- ``'2011-02-03 04:05+0000'`` -- ``'2011-02-03 04:05:00+0000'`` -- ``'2011-02-03 04:05:00.000+0000'`` -- ``'2011-02-03T04:05+0000'`` -- ``'2011-02-03T04:05:00+0000'`` -- ``'2011-02-03T04:05:00.000+0000'`` - -The ``+0000`` above is an RFC 822 4-digit time zone specification; ``+0000`` refers to GMT. US Pacific Standard Time is -``-0800``. The time zone may be omitted if desired (``'2011-02-03 04:05:00'``), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible. - -The time of day may also be omitted (``'2011-02-03'`` or ``'2011-02-03+0000'``), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the :ref:`date ` type. - -.. _dates: - -Working with dates -^^^^^^^^^^^^^^^^^^ - -Values of the ``date`` type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970 - -As for :ref:`timestamp `, a date can be input either as an :token:`integer` or using a date -:token:`string`. In the later case, the format should be ``yyyy-mm-dd`` (so ``'2011-02-03'`` for instance). - -.. _times: - -Working with times -^^^^^^^^^^^^^^^^^^ - -Values of the ``time`` type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight. - -As for :ref:`timestamp `, a time can be input either as an :token:`integer` or using a :token:`string` -representing the time. In the later case, the format should be ``hh:mm:ss[.fffffffff]`` (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time: - -- ``'08:12:54'`` -- ``'08:12:54.123'`` -- ``'08:12:54.123456'`` -- ``'08:12:54.123456789'`` - -.. _durations: - -Working with durations -^^^^^^^^^^^^^^^^^^^^^^ - -Values of the ``duration`` type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer. - -A duration can be input as: - - #. ``(quantity unit)+`` like ``12h30m`` where the unit can be: - - * ``y``: years (12 months) - * ``mo``: months (1 month) - * ``w``: weeks (7 days) - * ``d``: days (1 day) - * ``h``: hours (3,600,000,000,000 nanoseconds) - * ``m``: minutes (60,000,000,000 nanoseconds) - * ``s``: seconds (1,000,000,000 nanoseconds) - * ``ms``: milliseconds (1,000,000 nanoseconds) - * ``us`` or ``µs`` : microseconds (1000 nanoseconds) - * ``ns``: nanoseconds (1 nanosecond) - #. ISO 8601 format: ``P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W`` - #. ISO 8601 alternative format: ``P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]`` - -For example:: - - INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s); - INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S); - INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09); - -.. _duration-limitation: - -Duration columns cannot be used in a table's ``PRIMARY KEY``. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if ``1mo`` is greater than ``29d`` without a date -context. - -A ``1d`` duration is not equals to a ``24h`` one as the duration type has been created to be able to support daylight -saving. - -.. _collections: - -Collections -^^^^^^^^^^^ - -CQL supports 3 kind of collections: :ref:`maps`, :ref:`sets` and :ref:`lists`. The types of those collections is defined -by: - -.. productionlist:: - collection_type: MAP '<' `cql_type` ',' `cql_type` '>' - : | SET '<' `cql_type` '>' - : | LIST '<' `cql_type` '>' - -and their values can be inputd using collection literals: - -.. productionlist:: - collection_literal: `map_literal` | `set_literal` | `list_literal` - map_literal: '{' [ `term` ':' `term` (',' `term` : `term`)* ] '}' - set_literal: '{' [ `term` (',' `term`)* ] '}' - list_literal: '[' [ `term` (',' `term`)* ] ']' - -Note however that neither :token:`bind_marker` nor ``NULL`` are supported inside collection literals. - -Noteworthy characteristics -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”...), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations: - -- Individual collections are not indexed internally. Which means that even to access a single element of a collection, - the while collection has to be read (and reading one is not paged internally). -- While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. - Further, some lists operations are not idempotent by nature (see the section on :ref:`lists ` below for - details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when - possible. - -Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data. - -.. _maps: - -Maps -~~~~ - -A ``map`` is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:: - - CREATE TABLE users ( - id text PRIMARY KEY, - name text, - favs map // A map of text keys, and text values - ); - - INSERT INTO users (id, name, favs) - VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' }); - - // Replace the existing map entirely. - UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith'; - -Further, maps support: - -- Updating or inserting one or more elements:: - - UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'; - UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith'; - -- Removing one or more element (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - DELETE favs['author'] FROM users WHERE id = 'jsmith'; - UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith'; - - Note that for removing multiple elements in a ``map``, you remove from it a ``set`` of keys. - -Lastly, TTLs are allowed for both ``INSERT`` and ``UPDATE``, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:: - - UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'; - -will only apply the TTL to the ``{ 'color' : 'green' }`` record, the rest of the map remaining unaffected. - - -.. _sets: - -Sets -~~~~ - -A ``set`` is a (sorted) collection of unique values. You can define and insert a map with:: - - CREATE TABLE images ( - name text PRIMARY KEY, - owner text, - tags set // A set of text values - ); - - INSERT INTO images (name, owner, tags) - VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' }); - - // Replace the existing set entirely - UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg'; - -Further, sets support: - -- Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):: - - UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg'; - -- Removing one or multiple elements (if an element doesn't exist, removing it is a no-op but no error is thrown):: - - UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg'; - -Lastly, as for :ref:`maps `, TTLs if used only apply to the newly inserted values. - -.. _lists: - -Lists -~~~~~ - -.. note:: As mentioned above and further discussed at the end of this section, lists have limitations and specific - performance considerations that you should take into account before using them. In general, if you can use a - :ref:`set ` instead of list, always prefer a set. - -A ``list`` is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:: - - CREATE TABLE plays ( - id text PRIMARY KEY, - game text, - players int, - scores list // A list of integers - ) - - INSERT INTO plays (id, game, players, scores) - VALUES ('123-afde', 'quake', 3, [17, 4, 2]); - - // Replace the existing list entirely - UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde'; - -Further, lists support: - -- Appending and prepending values to a list:: - - UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde'; - UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde'; - -- Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that - position or an error will be thrown that the list is too small:: - - UPDATE plays SET scores[1] = 7 WHERE id = '123-afde'; - -- Removing an element by its position in the list. This imply that the list has a pre-existing element for that position - or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the - list size will be diminished by 1, shifting the position of all the elements following the one deleted:: - - DELETE scores[1] FROM plays WHERE id = '123-afde'; - -- Deleting *all* the occurrences of particular values in the list (if a particular element doesn't occur at all in the - list, it is simply ignored and no error is thrown):: - - UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; - -.. warning:: The append and prepend operations are not idempotent by nature. So in particular, if one of these operation - timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value - twice. - -.. warning:: Setting and removing an element by position and removing occurences of particular values incur an internal - *read-before-write*. They will thus run more slowly and take more ressources than usual updates (with the exclusion - of conditional write that have their own cost). - -Lastly, as for :ref:`maps `, TTLs when used only apply to the newly inserted values. - -.. _udts: - -User-Defined Types -^^^^^^^^^^^^^^^^^^ - -CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the :token:`create_type_statement`, :token:`alter_type_statement` and :token:`drop_type_statement` described below. But -once created, a UDT is simply referred to by its name: - -.. productionlist:: - user_defined_type: `udt_name` - udt_name: [ `keyspace_name` '.' ] `identifier` - - -Creating a UDT -~~~~~~~~~~~~~~ - -Creating a new user-defined type is done using a ``CREATE TYPE`` statement defined by: - -.. productionlist:: - create_type_statement: CREATE TYPE [ IF NOT EXISTS ] `udt_name` - : '(' `field_definition` ( ',' `field_definition` )* ')' - field_definition: `identifier` `cql_type` - -A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:: - - CREATE TYPE phone ( - country_code int, - number text, - ) - - CREATE TYPE address ( - street text, - city text, - zip text, - phones map - ) - - CREATE TABLE user ( - name text PRIMARY KEY, - addresses map> - ) - -Note that: - -- Attempting to create an already existing type will result in an error unless the ``IF NOT EXISTS`` option is used. If - it is used, the statement will be a no-op if the type already exists. -- A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At - creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in - the current keyspace. -- As of Cassandra |version|, UDT have to be frozen in most cases, hence the ``frozen
`` in the table definition - above. Please see the section on :ref:`frozen ` for more details. - -UDT literals -~~~~~~~~~~~~ - -Once a used-defined type has been created, value can be input using a UDT literal: - -.. productionlist:: - udt_literal: '{' `identifier` ':' `term` ( ',' `identifier` ':' `term` )* '}' - -In other words, a UDT literal is like a :ref:`map ` literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:: - - INSERT INTO user (name, addresses) - VALUES ('z3 Pr3z1den7', { - 'home' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'cell' : { country_code: 1, number: '202 456-1111' }, - 'landline' : { country_code: 1, number: '...' } } - }, - 'work' : { - street: '1600 Pennsylvania Ave NW', - city: 'Washington', - zip: '20500', - phones: { 'fax' : { country_code: 1, number: '...' } } - } - }) - -To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be ``null``). - -Altering a UDT -~~~~~~~~~~~~~~ - -An existing user-defined type can be modified using an ``ALTER TYPE`` statement: - -.. productionlist:: - alter_type_statement: ALTER TYPE `udt_name` `alter_type_modification` - alter_type_modification: ADD `field_definition` - : | RENAME `identifier` TO `identifier` ( `identifier` TO `identifier` )* - -You can: - -- add a new field to the type (``ALTER TYPE address ADD country text``). That new field will be ``null`` for any values - of the type created before the addition. -- rename the fields of the type (``ALTER TYPE address RENAME zip TO zipcode``). - -Dropping a UDT -~~~~~~~~~~~~~~ - -You can drop an existing user-defined type using a ``DROP TYPE`` statement: - -.. productionlist:: - drop_type_statement: DROP TYPE [ IF EXISTS ] `udt_name` - -Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error. - -If the type dropped does not exist, an error will be returned unless ``IF EXISTS`` is used, in which case the operation -is a no-op. - -.. _tuples: - -Tuples -^^^^^^ - -CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by: - -.. productionlist:: - tuple_type: TUPLE '<' `cql_type` ( ',' `cql_type` )* '>' - tuple_literal: '(' `term` ( ',' `term` )* ')' - -and can be used thusly:: - - CREATE TABLE durations ( - event text, - duration tuple, - ) - - INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours')); - -Unlike other "composed" types (collections and UDT), a tuple is always :ref:`frozen ` (without the need of the -`frozen` keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so). - -.. _custom-types: - -Custom Types -^^^^^^^^^^^^ - -.. note:: Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is - complex, not user friendly and the other provided types, particularly :ref:`user-defined types `, should almost - always be enough. - -A custom type is defined by: - -.. productionlist:: - custom_type: `string` - -A custom type is a :token:`string` that contains the name of Java class that extends the server side ``AbstractType`` -class and that can be loaded by Cassandra (it should thus be in the ``CLASSPATH`` of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a ``blob``, and can in particular be input using the -:token:`blob` literal syntax. diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_conceptual.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_conceptual.rst.txt deleted file mode 100644 index 8749b799e..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_conceptual.rst.txt +++ /dev/null @@ -1,63 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. conceptual_data_modeling - -Conceptual Data Modeling -^^^^^^^^^^^^^^^^^^^^^^^^ - -First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra. - -Let's use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about. - -For example, let's use a domain that is easily understood and that -everyone can relate to: making hotel reservations. - -The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances. - -The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection. - -.. image:: images/data_modeling_hotel_erd.png - -Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_logical.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_logical.rst.txt deleted file mode 100644 index 27fa4beb7..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_logical.rst.txt +++ /dev/null @@ -1,219 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Logical Data Modeling -===================== - -Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model. - -To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with ``_by_``. For example, ``hotels_by_poi``. - -Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering. - -The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads. - -Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static. - -Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models. - -Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model. - -.. image:: images/data_modeling_chebotko_logical.png - -Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as **K** for partition key -columns and **C**\ ↑ or **C**\ ↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support. - -Hotel Logical Data Model ------------------------- - -The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you'll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access. - -.. image:: images/data_modeling_hotel_logical.png - -Let’s explore the details of each of these tables. - -The first query Q1 is to find hotels near a point of interest, so you’ll -call this table ``hotels_by_poi``. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search. - -You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column. - -An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data. - -Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the ``hotels_by_poi`` table, but you added -only those attributes that were required by the application workflow. - -From the workflow diagram, you know that the ``hotels_by_poi`` table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -``hotel_id`` from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called ``hotels``. - -Another option would have been to store a set of ``poi_names`` in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application. - -Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -``pois_by_hotel`` table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness. - -At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the ``hotel_id`` as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the ``available_rooms_by_hotel_date`` table. - -To support searching over a range, use :ref:`clustering columns -` to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important. - -The design of the ``available_rooms_by_hotel_date`` table is an instance -of the **wide partition** pattern. This -pattern is sometimes called the **wide row** pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query. - -In order to round out the shopping portion of the data model, add the -``amenities_by_room`` table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates. - -Reservation Logical Data Model ------------------------------- - -Now let's switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys. - -.. image:: images/data_modeling_reservation_logical.png - -In order to satisfy Q6, the ``reservations_by_guest`` table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well. - -Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on. - -The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date. - -Finally, you create a ``guests`` table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the ``guests`` table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example. - - -Patterns and Anti-Patterns --------------------------- - -As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern. - -The **time series** pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments. - -The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application. - -One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now :ref:`tombstones ` that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance. - -The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_physical.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_physical.rst.txt deleted file mode 100644 index 758400496..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_physical.rst.txt +++ /dev/null @@ -1,117 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Physical Data Modeling -====================== - -Once you have a logical data model defined, creating the physical model -is a relatively simple process. - -You walk through each of the logical model tables, assigning types to -each item. You can use any valid :ref:`CQL data type `, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design. - -After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let's cover the data -modeling process in more detail by working through an example. - -Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table. - -.. image:: images/data_modeling_chebotko_physical.png - -The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern. - -Hotel Physical Data Model -------------------------- - -Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -``hotel`` keyspace to contain tables for hotel and availability -data, and a ``reservation`` keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns. - -For the ``hotels`` table, use Cassandra’s ``text`` type to -represent the hotel’s ``id``. For the address, create an -``address`` user defined type. Use the ``text`` type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries. - -While it would make sense to use the ``uuid`` type for attributes such -as the ``hotel_id``, this document uses mostly ``text`` attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like "AZ123" or "NY229". This example uses these values -for ``hotel_ids``, while acknowledging they are not necessarily globally -unique. - -You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these ``uuids`` as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type. - -As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure: - -.. image:: images/data_modeling_hotel_physical.png - -Note that the ``address`` type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the ``hotels`` and ``hotels_by_poi`` tables. - -User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the ``address`` -user-defined type. This can reduce complexity in the design. - -Remember that the scope of a UDT is the keyspace in which it is defined. -To use ``address`` in the ``reservation`` keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design. - -Reservation Physical Data Model -------------------------------- - -Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you're going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature. - -.. image:: images/data_modeling_reservation_physical.png - -Note that the ``address`` type is reproduced in this keyspace and -``guest_id`` is modeled as a ``uuid`` type in all of the tables. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_queries.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_queries.rst.txt deleted file mode 100644 index d0119944f..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_queries.rst.txt +++ /dev/null @@ -1,85 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Defining Application Queries -============================ - -Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following: - -- Q1. Find hotels near a given point of interest. - -- Q2. Find information about a given hotel, such as its name and - location. - -- Q3. Find points of interest near a given hotel. - -- Q4. Find an available room in a given date range. - -- Q5. Find the rate and amenities for a room. - -It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example. - -Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases. - -You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations: - -- Q6. Lookup a reservation by confirmation number. - -- Q7. Lookup a reservation by hotel, date, and guest name. - -- Q8. Lookup all reservations by guest name. - -- Q9. View guest details. - -All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries. - -.. image:: images/data_modeling_hotel_queries.png - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_rdbms.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_rdbms.rst.txt deleted file mode 100644 index 7d67d69fc..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_rdbms.rst.txt +++ /dev/null @@ -1,171 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -RDBMS Design -============ - -When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables. - -The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation). - -.. image:: images/data_modeling_hotel_relational.png - -.. design_differences_between_rdbms_and_cassandra - -Design Differences Between RDBMS and Cassandra -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database. - -No joins -~~~~~~~~ - -You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead. - -No referential integrity -~~~~~~~~~~~~~~~~~~~~~~~~ - -Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available. - -Denormalization -~~~~~~~~~~~~~~~ - -In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances. - -A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems. - -In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it. - -Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as :ref:`materialized views ` -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table. - -Query-first design -~~~~~~~~~~~~~~~~~~ - -Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true. - -By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them. - -Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS. - -Designing for optimal storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table. - -A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance. - -Sorting is a design decision -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In an RDBMS, you can easily change the order in which records are -returned to you by using ``ORDER BY`` in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns. - -In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the ``CREATE TABLE`` command. The CQL ``SELECT`` statement does support -``ORDER BY`` semantics, but only in the order specified by the -clustering columns. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_refining.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_refining.rst.txt deleted file mode 100644 index 13a276ed7..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_refining.rst.txt +++ /dev/null @@ -1,218 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. role:: raw-latex(raw) - :format: latex -.. - -Evaluating and Refining Data Models -=================================== - -Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance. - -Calculating Partition Size --------------------------- - -The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit. - -In order to calculate the size of partitions, use the following -formula: - -.. math:: N_v = N_r (N_c - N_{pk} - N_s) + N_s - -The number of values (or cells) in the partition (N\ :sub:`v`) is equal to -the number of static columns (N\ :sub:`s`) plus the product of the number -of rows (N\ :sub:`r`) and the number of of values per row. The number of -values per row is defined as the number of columns (N\ :sub:`c`) minus the -number of primary key columns (N\ :sub:`pk`) and static columns -(N\ :sub:`s`). - -The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast. - -Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the ``available_rooms_by_hotel_date`` table. The table has -four columns total (N\ :sub:`c` = 4), including three primary key columns -(N\ :sub:`pk` = 3) and no static columns (N\ :sub:`s` = 0). Plugging these -values into the formula, the result is: - -.. math:: N_v = N_r (4 - 3 - 0) + 0 = 1N_r - -Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let's assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel. - -Since there is a partition for each hotel, the estimated number of rows -per partition is as follows: - -.. math:: N_r = 100 rooms/hotel \times 730 days = 73,000 rows - -This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you'll see how to do shortly. - -When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems. - -Calculating Size on Disk ------------------------- - -In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -S\ :sub:`t` of a partition: - -.. math:: S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) + - -.. math:: N_v\times sizeOf\big(t_{avg}\big) - -This is a bit more complex than the previous formula, but let's break it -down a bit at a time. Let’s take a look at the notation first: - -- In this formula, c\ :sub:`k` refers to partition key columns, - c\ :sub:`s` to static columns, c\ :sub:`r` to regular columns, and - c\ :sub:`c` to clustering columns. - -- The term t\ :sub:`avg` refers to the average number of bytes of - metadata stored per cell, such as timestamps. It is typical to use an - estimate of 8 bytes for this value. - -- You'll recognize the number of rows N\ :sub:`r` and number of values - N\ :sub:`v` from previous calculations. - -- The **sizeOf()** function refers to the size in bytes of the CQL data - type of each referenced column. - -The first term asks you to sum the size of the partition key columns. For -this example, the ``available_rooms_by_hotel_date`` table has a single -partition key column, the ``hotel_id``, which is of type -``text``. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes. - -The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes. - -The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the ``date``, which is 4 bytes, and the ``room_number``, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean ``is_available``, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB). - -The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB. - -Adding these terms together, you get a final estimate: - -.. math:: Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB - -This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage. - -Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size. - -Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster. - -Breaking Up Large Partitions ----------------------------- - -As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both. - -The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic. - -Continuing to examine the available rooms example, if you add the ``date`` -column to the partition key for the ``available_rooms_by_hotel_date`` -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes. - -Another technique known as **bucketing** is often used to break the data -into moderate-size partitions. For example, you could bucketize the -``available_rooms_by_hotel_date`` table by adding a ``month`` column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -``month`` column is partially duplicative of the ``date``, it provides -a nice way of grouping related data in a partition that will not get -too large. - -.. image:: images/data_modeling_hotel_bucketing.png - -If you really felt strongly about preserving a wide partition design, you -could instead add the ``room_id`` to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_schema.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_schema.rst.txt deleted file mode 100644 index 1876ec3fa..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_schema.rst.txt +++ /dev/null @@ -1,144 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: cql - -Defining Database Schema -======================== - -Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -``hotel`` keyspace, using CQL’s comment feature to document the query -pattern supported by each table:: - - CREATE KEYSPACE hotel WITH replication = - {‘class’: ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE hotel.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE hotel.hotels_by_poi ( - poi_name text, - hotel_id text, - name text, - phone text, - address frozen
, - PRIMARY KEY ((poi_name), hotel_id) ) - WITH comment = ‘Q1. Find hotels near given poi’ - AND CLUSTERING ORDER BY (hotel_id ASC) ; - - CREATE TABLE hotel.hotels ( - id text PRIMARY KEY, - name text, - phone text, - address frozen
, - pois set ) - WITH comment = ‘Q2. Find information about a hotel’; - - CREATE TABLE hotel.pois_by_hotel ( - poi_name text, - hotel_id text, - description text, - PRIMARY KEY ((hotel_id), poi_name) ) - WITH comment = Q3. Find pois near a hotel’; - - CREATE TABLE hotel.available_rooms_by_hotel_date ( - hotel_id text, - date date, - room_number smallint, - is_available boolean, - PRIMARY KEY ((hotel_id), date, room_number) ) - WITH comment = ‘Q4. Find available rooms by hotel date’; - - CREATE TABLE hotel.amenities_by_room ( - hotel_id text, - room_number smallint, - amenity_name text, - description text, - PRIMARY KEY ((hotel_id, room_number), amenity_name) ) - WITH comment = ‘Q5. Find amenities for a room’; - - -Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column ``poi_name``. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL. - -Similarly, here is the schema for the ``reservation`` keyspace:: - - CREATE KEYSPACE reservation WITH replication = {‘class’: - ‘SimpleStrategy’, ‘replication_factor’ : 3}; - - CREATE TYPE reservation.address ( - street text, - city text, - state_or_province text, - postal_code text, - country text ); - - CREATE TABLE reservation.reservations_by_confirmation ( - confirm_number text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - guest_id uuid, - PRIMARY KEY (confirm_number) ) - WITH comment = ‘Q6. Find reservations by confirmation number’; - - CREATE TABLE reservation.reservations_by_hotel_date ( - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((hotel_id, start_date), room_number) ) - WITH comment = ‘Q7. Find reservations by hotel and date’; - - CREATE TABLE reservation.reservations_by_guest ( - guest_last_name text, - hotel_id text, - start_date date, - end_date date, - room_number smallint, - confirm_number text, - guest_id uuid, - PRIMARY KEY ((guest_last_name), hotel_id) ) - WITH comment = ‘Q8. Find reservations by guest name’; - - CREATE TABLE reservation.guests ( - guest_id uuid PRIMARY KEY, - first_name text, - last_name text, - title text, - emails set, - phone_numbers list, - addresses map, - confirm_number text ) - WITH comment = ‘Q9. Find guest by ID’; - -You now have a complete Cassandra schema for storing data for a hotel -application. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_tools.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_tools.rst.txt deleted file mode 100644 index 46fad3346..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/data_modeling_tools.rst.txt +++ /dev/null @@ -1,64 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Data Modeling Tools -============================= - -There are several tools available to help you design and -manage your Cassandra schema and build queries. - -* `Hackolade `_ - is a data modeling tool that supports schema design for Cassandra and - many other NoSQL databases. Hackolade supports the unique concepts of - CQL such as partition keys and clustering columns, as well as data types - including collections and UDTs. It also provides the ability to create - Chebotko diagrams. - -* `Kashlev Data Modeler `_ is a Cassandra - data modeling tool that automates the data modeling methodology - described in this documentation, including identifying - access patterns, conceptual, logical, and physical data modeling, and - schema generation. It also includes model patterns that you can - optionally leverage as a starting point for your designs. - -* DataStax DevCenter is a tool for managing - schema, executing queries and viewing results. While the tool is no - longer actively supported, it is still popular with many developers and - is available as a `free download `_. - DevCenter features syntax highlighting for CQL commands, types, and name - literals. DevCenter provides command completion as you type out CQL - commands and interprets the commands you type, highlighting any errors - you make. The tool provides panes for managing multiple CQL scripts and - connections to multiple clusters. The connections are used to run CQL - commands against live clusters and view the results. The tool also has a - query trace feature that is useful for gaining insight into the - performance of your queries. - -* IDE Plugins - There are CQL plugins available for several Integrated - Development Environments (IDEs), such as IntelliJ IDEA and Apache - NetBeans. These plugins typically provide features such as schema - management and query execution. - -Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation. - -*Material adapted from Cassandra, The Definitive Guide. Published by -O'Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.* \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/data_modeling/index.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/index.rst.txt deleted file mode 100644 index 2f799dc32..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/index.rst.txt +++ /dev/null @@ -1,36 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Data Modeling -************* - -.. toctree:: - :maxdepth: 2 - - intro - data_modeling_conceptual - data_modeling_rdbms - data_modeling_queries - data_modeling_logical - data_modeling_physical - data_modeling_refining - data_modeling_schema - data_modeling_tools - - - - - diff --git a/src/doc/4.0-beta1/_sources/data_modeling/intro.rst.txt b/src/doc/4.0-beta1/_sources/data_modeling/intro.rst.txt deleted file mode 100644 index 630a7d1b5..000000000 --- a/src/doc/4.0-beta1/_sources/data_modeling/intro.rst.txt +++ /dev/null @@ -1,146 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Introduction -============ - -Apache Cassandra stores data in tables, with each table consisting of rows and columns. CQL (Cassandra Query Language) is used to query the data stored in tables. Apache Cassandra data model is based around and optimized for querying. Cassandra does not support relational data modeling intended for relational databases. - -What is Data Modeling? -^^^^^^^^^^^^^^^^^^^^^^ - -Data modeling is the process of identifying entities and their relationships. In relational databases, data is placed in normalized tables with foreign keys used to reference related data in other tables. Queries that the application will make are driven by the structure of the tables and related data are queried as table joins. - -In Cassandra, data modeling is query-driven. The data access patterns and application queries determine the structure and organization of data which then used to design the database tables. - -Data is modeled around specific queries. Queries are best designed to access a single table, which implies that all entities involved in a query must be in the same table to make data access (reads) very fast. Data is modeled to best suit a query or a set of queries. A table could have one or more entities as best suits a query. As entities do typically have relationships among them and queries could involve entities with relationships among them, a single entity may be included in multiple tables. - -Query-driven modeling -^^^^^^^^^^^^^^^^^^^^^ - -Unlike a relational database model in which queries make use of table joins to get data from multiple tables, joins are not supported in Cassandra so all required fields (columns) must be grouped together in a single table. Since each query is backed by a table, data is duplicated across multiple tables in a process known as denormalization. Data duplication and a high write throughput are used to achieve a high read performance. - -Goals -^^^^^ - -The choice of the primary key and partition key is important to distribute data evenly across the cluster. Keeping the number of partitions read for a query to a minimum is also important because different partitions could be located on different nodes and the coordinator would need to send a request to each node adding to the request overhead and latency. Even if the different partitions involved in a query are on the same node, fewer partitions make for a more efficient query. - -Partitions -^^^^^^^^^^ - -Apache Cassandra is a distributed database that stores data across a cluster of nodes. A partition key is used to partition data among the nodes. Cassandra partitions data over the storage nodes using a variant of consistent hashing for data distribution. Hashing is a technique used to map data with which given a key, a hash function generates a hash value (or simply a hash) that is stored in a hash table. A partition key is generated from the first field of a primary key. Data partitioned into hash tables using partition keys provides for rapid lookup. Fewer the partitions used for a query faster is the response time for the query. - -As an example of partitioning, consider table ``t`` in which ``id`` is the only field in the primary key. - -:: - - CREATE TABLE t ( - id int, - k int, - v text, - PRIMARY KEY (id) - ); - -The partition key is generated from the primary key ``id`` for data distribution across the nodes in a cluster. - -Consider a variation of table ``t`` that has two fields constituting the primary key to make a composite or compound primary key. - -:: - - CREATE TABLE t ( - id int, - c text, - k int, - v text, - PRIMARY KEY (id,c) - ); - -For the table ``t`` with a composite primary key the first field ``id`` is used to generate the partition key and the second field ``c`` is the clustering key used for sorting within a partition. Using clustering keys to sort data makes retrieval of adjacent data more efficient. - -In general, the first field or component of a primary key is hashed to generate the partition key and the remaining fields or components are the clustering keys that are used to sort data within a partition. Partitioning data improves the efficiency of reads and writes. The other fields that are not primary key fields may be indexed separately to further improve query performance. - -The partition key could be generated from multiple fields if they are grouped as the first component of a primary key. As another variation of the table ``t``, consider a table with the first component of the primary key made of two fields grouped using parentheses. - -:: - - CREATE TABLE t ( - id1 int, - id2 int, - c1 text, - c2 text - k int, - v text, - PRIMARY KEY ((id1,id2),c1,c2) - ); - -For the preceding table ``t`` the first component of the primary key constituting fields ``id1`` and ``id2`` is used to generate the partition key and the rest of the fields ``c1`` and ``c2`` are the clustering keys used for sorting within a partition. - -Comparing with Relational Data Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Relational databases store data in tables that have relations with other tables using foreign keys. A relational database’s approach to data modeling is table-centric. Queries must use table joins to get data from multiple tables that have a relation between them. Apache Cassandra does not have the concept of foreign keys or relational integrity. Apache Cassandra’s data model is based around designing efficient queries; queries that don’t involve multiple tables. Relational databases normalize data to avoid duplication. Apache Cassandra in contrast de-normalizes data by duplicating data in multiple tables for a query-centric data model. If a Cassandra data model cannot fully integrate the complexity of relationships between the different entities for a particular query, client-side joins in application code may be used. - -Examples of Data Modeling -^^^^^^^^^^^^^^^^^^^^^^^^^ - -As an example, a ``magazine`` data set consists of data for magazines with attributes such as magazine id, magazine name, publication frequency, publication date, and publisher. A basic query (Q1) for magazine data is to list all the magazine names including their publication frequency. As not all data attributes are needed for Q1 the data model would only consist of ``id`` ( for partition key), magazine name and publication frequency as shown in Figure 1. - -.. figure:: images/Figure_1_data_model.jpg - -Figure 1. Data Model for Q1 - -Another query (Q2) is to list all the magazine names by publisher. For Q2 the data model would consist of an additional attribute ``publisher`` for the partition key. The ``id`` would become the clustering key for sorting within a partition. Data model for Q2 is illustrated in Figure 2. - -.. figure:: images/Figure_2_data_model.jpg - -Figure 2. Data Model for Q2 - -Designing Schema -^^^^^^^^^^^^^^^^ - -After the conceptual data model has been created a schema may be designed for a query. For Q1 the following schema may be used. - -:: - - CREATE TABLE magazine_name (id int PRIMARY KEY, name text, publicationFrequency text) - -For Q2 the schema definition would include a clustering key for sorting. - -:: - - CREATE TABLE magazine_publisher (publisher text,id int,name text, publicationFrequency text, - PRIMARY KEY (publisher, id)) WITH CLUSTERING ORDER BY (id DESC) - -Data Model Analysis -^^^^^^^^^^^^^^^^^^^ - -The data model is a conceptual model that must be analyzed and optimized based on storage, capacity, redundancy and consistency. A data model may need to be modified as a result of the analysis. Considerations or limitations that are used in data model analysis include: - -- Partition Size -- Data Redundancy -- Disk space -- Lightweight Transactions (LWT) - -The two measures of partition size are the number of values in a partition and partition size on disk. Though requirements for these measures may vary based on the application a general guideline is to keep number of values per partition to below 100,000 and disk space per partition to below 100MB. - -Data redundancies as duplicate data in tables and multiple partition replicates are to be expected in the design of a data model , but nevertheless should be kept in consideration as a parameter to keep to the minimum. LWT transactions (compare-and-set, conditional update) could affect performance and queries using LWT should be kept to the minimum. - -Using Materialized Views -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: Materialized views (MVs) are experimental in the latest (4.0) release. - -Materialized views (MVs) could be used to implement multiple queries for a single table. A materialized view is a table built from data from another table, the base table, with new primary key and new properties. Changes to the base table data automatically add and update data in a MV. Different queries may be implemented using a materialized view as an MV's primary key differs from the base table. Queries are optimized by the primary key definition. diff --git a/src/doc/4.0-beta1/_sources/development/ci.rst.txt b/src/doc/4.0-beta1/_sources/development/ci.rst.txt deleted file mode 100644 index 77360aea9..000000000 --- a/src/doc/4.0-beta1/_sources/development/ci.rst.txt +++ /dev/null @@ -1,72 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Jenkins CI Environment -********************** - -About CI testing and Apache Cassandra -===================================== - -Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the `dtest `_ scripts written in Python. As outlined in :doc:`testing`, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at `builds.apache.org `_, running `Jenkins `_. - - - -Setting up your own Jenkins server -================================== - -Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution. - -Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment. - -Required plugins ----------------- - -The following plugins need to be installed additionally to the standard plugins (git, ant, ..). - -You can install any missing plugins through the install manager. - -Go to ``Manage Jenkins -> Manage Plugins -> Available`` and install the following plugins and respective dependencies: - -* Job DSL -* Javadoc Plugin -* description setter plugin -* Throttle Concurrent Builds Plug-in -* Test stability history -* Hudson Post build task - - -Setup seed job --------------- - -Config ``New Item`` - -* Name it ``Cassandra-Job-DSL`` -* Select ``Freestyle project`` - -Under ``Source Code Management`` select Git using the repository: ``https://github.com/apache/cassandra-builds`` - -Under ``Build``, confirm ``Add build step`` -> ``Process Job DSLs`` and enter at ``Look on Filesystem``: ``jenkins-dsl/cassandra_job_dsl_seed.groovy`` - -Generated jobs will be created based on the Groovy script's default settings. You may want to override settings by checking ``This project is parameterized`` and add ``String Parameter`` for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches). - -**When done, confirm "Save"** - -You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message `"Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use"`. Goto ``Manage Jenkins`` -> ``In-process Script Approval`` to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates. - -Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label "cassandra", once the job is to be run. Please make sure to make any executors available by selecting ``Build Executor Status`` -> ``Configure`` -> Add "``cassandra``" as label and save. - -Executors need to have "JDK 1.8 (latest)" installed. This is done under ``Manage Jenkins -> Global Tool Configuration -> JDK Installations…``. Executors also need to have the virtualenv package installed on their system. - diff --git a/src/doc/4.0-beta1/_sources/development/code_style.rst.txt b/src/doc/4.0-beta1/_sources/development/code_style.rst.txt deleted file mode 100644 index 5a486a4a3..000000000 --- a/src/doc/4.0-beta1/_sources/development/code_style.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Code Style -========== - -General Code Conventions ------------------------- - - - The Cassandra project follows `Sun's Java coding conventions `_ with an important exception: ``{`` and ``}`` are always placed on a new line - -Exception handling ------------------- - - - Never ever write ``catch (...) {}`` or ``catch (...) { logger.error() }`` merely to satisfy Java's compile-time exception checking. Always propagate the exception up or throw ``RuntimeException`` (or, if it "can't happen," ``AssertionError``). This makes the exceptions visible to automated tests. - - Avoid propagating up checked exceptions that no caller handles. Rethrow as ``RuntimeException`` (or ``IOError``, if that is more applicable). - - Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don't hide it behind a warn; if it isn't, no need for the warning. - - If you genuinely know an exception indicates an expected condition, it's okay to ignore it BUT this must be explicitly explained in a comment. - -Boilerplate ------------ - - - Avoid redundant ``@Override`` annotations when implementing abstract or interface methods. - - Do not implement equals or hashcode methods unless they are actually needed. - - Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in "real" methods to either.) - - Prefer requiring initialization in the constructor to setters. - - Avoid redundant ``this`` references to member fields or methods. - - Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it. - - Always include braces for nested levels of conditionals and loops. Only avoid braces for single level. - -Multiline statements --------------------- - - - Try to keep lines under 120 characters, but use good judgement -- it's better to exceed 120 by a little, than split a line that has no natural splitting points. - - When splitting inside a method call, use one line per parameter and align them, like this: - - :: - - SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(), - columnFamilies.size(), - StorageService.getPartitioner()); - - - When splitting a ternary, use one line per clause, carry the operator, and align like this: - - :: - - var = bar == null - ? doFoo() - : doBar(); - -Whitespace ----------- - - - Please make sure to use 4 spaces instead of the tab character for all your indentation. - - Many lines in many files have a bunch of trailing whitespace... Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn't have to pay attention to whitespace diffs. - -Imports -------- - -Please observe the following order for your imports:: - - java - [blank line] - com.google.common - org.apache.commons - org.junit - org.slf4j - [blank line] - everything else alphabetically - -Format files for IDEs ---------------------- - - - IntelliJ: `intellij-codestyle.jar `_ - - IntelliJ 13: `gist for IntelliJ 13 `_ (this is a work in progress, still working on javadoc, ternary style, line continuations, etc) - - Eclipse (https://github.com/tjake/cassandra-style-eclipse) - - - diff --git a/src/doc/4.0-beta1/_sources/development/dependencies.rst.txt b/src/doc/4.0-beta1/_sources/development/dependencies.rst.txt deleted file mode 100644 index 6dd1cc46b..000000000 --- a/src/doc/4.0-beta1/_sources/development/dependencies.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Dependency Management -********************* - -Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the :doc:`ci` and reported related issues on Jira/ML, in case of any project dependency changes. - -As Cassandra is an Apache product, all included libraries must follow Apache's `software license requirements `_. - -Required steps to add or update libraries -========================================= - -* Add or replace jar file in ``lib`` directory -* Add or update ``lib/license`` files -* Update dependencies in ``build.xml`` - - * Add to ``parent-pom`` with correct version - * Add to ``all-pom`` if simple Cassandra dependency (see below) - - -POM file types -============== - -* **parent-pom** - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here. -* **build-deps-pom(-sources)** + **coverage-deps-pom** - used by ``ant build`` compile target. Listed dependenices will be resolved and copied to ``build/lib/{jar,sources}`` by executing the ``maven-ant-tasks-retrieve-build`` target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution. -* **test-deps-pom** - refered by ``maven-ant-tasks-retrieve-test`` to retrieve and save dependencies to ``build/test/lib``. Exclusively used during JUnit test execution. -* **all-pom** - pom for `cassandra-all.jar `_ that can be installed or deployed to public maven repos via ``ant publish`` - - -Troubleshooting and conflict resolution -======================================= - -Here are some useful commands that may help you out resolving conflicts. - -* ``ant realclean`` - gets rid of the build directory, including build artifacts. -* ``mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j`` - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ``ant mvn-install``. -* ``rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/`` - removes cached local Cassandra maven artifacts - - diff --git a/src/doc/4.0-beta1/_sources/development/documentation.rst.txt b/src/doc/4.0-beta1/_sources/development/documentation.rst.txt deleted file mode 100644 index c623d54b9..000000000 --- a/src/doc/4.0-beta1/_sources/development/documentation.rst.txt +++ /dev/null @@ -1,104 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -Working on Documentation -************************* - -How Cassandra is documented -=========================== - -The official Cassandra documentation lives in the project's git repository. We use a static site generator, `Sphinx `_, to create pages hosted at `cassandra.apache.org `_. You'll also find developer centric content about Cassandra internals in our retired `wiki `_ (not covered by this guide). - -Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses `reStructuredText `_ for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at `existing documents <..>`_ to get a better idea how we use reStructuredText to write our documents. - -So how do you actually start making contributions? - -GitHub based work flow -====================== - -*Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)* - -Follow these steps to contribute using GitHub. It's assumed that you're logged in with an existing account. - -1. Fork the GitHub mirror of the `Cassandra repository `_ - -.. image:: images/docs_fork.png - -2. Create a new branch that you can use to make your edits. It's recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work. - -.. image:: images/docs_create_branch.png - -3. Navigate to document sources ``doc/source`` to find the ``.rst`` file to edit. The URL of the document should correspond to the directory structure. New files can be created using the "Create new file" button: - -.. image:: images/docs_create_file.png - -4. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing ``.rst`` files to get a better idea what format elements to use. - -.. image:: images/docs_editor.png - -Make sure to preview added content before committing any changes. - -.. image:: images/docs_preview.png - -5. Commit your work when you're done. Make sure to add a short description of all your edits since the last time you committed before. - -.. image:: images/docs_commit.png - -6. Finally if you decide that you're done working on your branch, it's time to create a pull request! - -.. image:: images/docs_pr.png - -Afterwards the GitHub Cassandra mirror will list your pull request and you're done. Congratulations! Please give us some time to look at your suggested changes before we get back to you. - - -Jira based work flow -==================== - -*Recommended for major changes* - -Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same `contribution guides `_ as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed. - -Working on documents locally using Sphinx -========================================= - -*Recommended for advanced editing* - -Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at ``doc/README.md``. Setup is very easy (at least on OSX and Linux). - -Notes for committers -==================== - -Please feel free to get involved and merge pull requests created on the GitHub mirror if you're a committer. As this is a read-only repository, you won't be able to merge a PR directly on GitHub. You'll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub. - -You may use a git work flow like this:: - - git remote add github https://github.com/apache/cassandra.git - git fetch github pull//head: - git checkout - -Now either rebase or squash the commit, e.g. for squashing:: - - git reset --soft origin/trunk - git commit --author - -Make sure to add a proper commit message including a "Closes #" text to automatically close the PR. - -Publishing ----------- - -Details for building and publishing of the site at cassandra.apache.org can be found `here `_. - diff --git a/src/doc/4.0-beta1/_sources/development/gettingstarted.rst.txt b/src/doc/4.0-beta1/_sources/development/gettingstarted.rst.txt deleted file mode 100644 index c2f5ef36e..000000000 --- a/src/doc/4.0-beta1/_sources/development/gettingstarted.rst.txt +++ /dev/null @@ -1,60 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _gettingstarted: - -Getting Started -************************* - -Initial Contributions -======================== - -Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we'd suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work. - * Add to or update the documentation - * Answer questions on the user list - * Review and test a submitted patch - * Investigate and fix a reported bug - * Create unit tests and d-tests - -Updating documentation -======================== - -The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (:ref:`patches`). - -Answering questions on the user list -==================================== - -Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the `community `_ page for details on how to subscribe to the mailing list. - -Reviewing and testing a submitted patch -======================================= - -Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in :ref:`_development_how_to_review` or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, "I tested this performance enhacement on our application's standard production load test and found a 3% improvement.") - -Investigate and/or fix a reported bug -===================================== - -Often, the hardest work in fixing a bug is reproducing it. Even if you don't have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (:ref:`patches`). - -Create unit tests and Dtests -============================ - -Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See :ref:`testing` and :ref:`patches` for more detail. - - - diff --git a/src/doc/4.0-beta1/_sources/development/how_to_commit.rst.txt b/src/doc/4.0-beta1/_sources/development/how_to_commit.rst.txt deleted file mode 100644 index dff39832d..000000000 --- a/src/doc/4.0-beta1/_sources/development/how_to_commit.rst.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -How-to Commit -============= - -If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself. - -Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based): - -Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch). - -On cassandra-3.0: - #. ``git am -3 12345-3.0.patch`` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git apply -3 12345-3.3.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git apply -3 12345-trunk.patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -Same scenario, but a branch-based contribution: - -On cassandra-3.0: - #. ``git cherry-pick `` (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place) - -On cassandra-3.3: - #. ``git merge cassandra-3.0 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On trunk: - #. ``git merge cassandra-3.3 -s ours`` - #. ``git format-patch -1 `` - #. ``git apply -3 .patch`` (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt) - #. ``git commit -amend`` - -On any branch: - #. ``git push origin cassandra-3.0 cassandra-3.3 trunk -atomic`` - -.. tip:: - - Notes on git flags: - ``-3`` flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply. - - ``-atomic`` flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue. - -.. tip:: - - The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. - curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch - diff --git a/src/doc/4.0-beta1/_sources/development/how_to_review.rst.txt b/src/doc/4.0-beta1/_sources/development/how_to_review.rst.txt deleted file mode 100644 index 4778b6946..000000000 --- a/src/doc/4.0-beta1/_sources/development/how_to_review.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _how_to_review: - -Review Checklist -**************** - -When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process: - -**General** - - * Does it conform to the :doc:`code_style` guidelines? - * Is there any redundant or duplicate code? - * Is the code as modular as possible? - * Can any singletons be avoided? - * Can any of the code be replaced with library functions? - * Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem? - -**Error-Handling** - - * Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded? - * Where third-party utilities are used, are returning errors being caught? - * Are invalid parameter values handled? - * Are any Throwable/Exceptions passed to the JVMStabilityInspector? - * Are errors well-documented? Does the error message tell the user how to proceed? - * Do exceptions propagate to the appropriate level in the code? - -**Documentation** - - * Do comments exist and describe the intent of the code (the "why", not the "how")? - * Are javadocs added where appropriate? - * Is any unusual behavior or edge-case handling described? - * Are data structures and units of measurement explained? - * Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’? - * Does the code self-document via clear naming, abstractions, and flow control? - * Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed? - * Is the ticket tagged with "client-impacting" and "doc-impacting", where appropriate? - * Has lib/licences been updated for third-party libs? Are they Apache License compatible? - * Is the Component on the JIRA ticket set appropriately? - -**Testing** - - * Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc. - * Do tests exist and are they comprehensive? - * Do unit tests actually test that the code is performing the intended functionality? - * Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse? - * If the code may be affected by multi-node clusters, are there dtests? - * If the code may take a long time to test properly, are there CVH tests? - * Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions? - * If patch affects read/write path, did we test for performance regressions w/multiple workloads? - * If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature? - -**Logging** - - * Are logging statements logged at the correct level? - * Are there logs in the critical path that could affect performance? - * Is there any log that could be added to communicate status or troubleshoot potential problems in this feature? - * Can any unnecessary logging statement be removed? - diff --git a/src/doc/4.0-beta1/_sources/development/ide.rst.txt b/src/doc/4.0-beta1/_sources/development/ide.rst.txt deleted file mode 100644 index 97c73ae61..000000000 --- a/src/doc/4.0-beta1/_sources/development/ide.rst.txt +++ /dev/null @@ -1,185 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Building and IDE Integration -**************************** - -Building From Source -==================== - -Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using `Java 8 `_, `Git `_ and `Ant `_. - -The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:: - - git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk - -Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:: - - git checkout cassandra-3.0 - -You can get a list of available branches with ``git branch``. - -Finally build Cassandra using ant:: - - ant - -This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled. - -.. hint:: - - You can setup multiple working trees for different Cassandra versions from the same repository using `git-worktree `_. - -| - -Setting up Cassandra in IntelliJ IDEA -===================================== - -`IntelliJ IDEA `_ by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra. - -Setup Cassandra as a Project (C* 2.1 and newer) ------------------------------------------------ - -Since 2.1.5, there is a new ant target: ``generate-idea-files``. Please see our `wiki `_ for instructions for older Cassandra versions. - -Please clone and build Cassandra as described above and execute the following steps: - -1. Once Cassandra is built, generate the IDEA files using ant: - -:: - - ant generate-idea-files - -2. Start IDEA - -3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA's File menu - -The project generated by the ant task ``generate-idea-files`` contains nearly everything you need to debug Cassandra and execute unit tests. - - * Run/debug defaults for JUnit - * Run/debug configuration for Cassandra daemon - * License header for Java source files - * Cassandra code style - * Inspections - -| - -Opening Cassandra in Apache NetBeans -======================================= - -`Apache NetBeans `_ is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans. - -Open Cassandra as a Project (C* 4.0 and newer) ------------------------------------------------ - -Please clone and build Cassandra as described above and execute the following steps: - -1. Start Apache NetBeans - -2. Open the NetBeans project from the `ide/` folder of the checked out Cassandra directory using the menu item "Open Project…" in NetBeans' File menu - -The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant `build.xml` script. - - * Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu. - * Profile Project is available via the Profile menu. In the opened Profiler tab, click the green "Profile" button. - * Cassandra's code style is honored in `ide/nbproject/project.properties` - -The `JAVA8_HOME` system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute. - -| - -Setting up Cassandra in Eclipse -=============================== - -Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the `download page `_. The following guide was created with "Eclipse IDE for Java Developers". - -These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x. - -Project Settings ----------------- - -**It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.** - - * Clone and build Cassandra as described above. - * Run ``ant generate-eclipse-files`` to create the Eclipse settings. - * Start Eclipse. - * Select ``File->Import->Existing Projects into Workspace->Select git directory``. - * Make sure "cassandra-trunk" is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above). - * Confirm "Finish" to have your project imported. - -You should now be able to find the project as part of the "Package Explorer" or "Project Explorer" without having Eclipse complain about any errors after building the project automatically. - -Unit Tests ----------- - -Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting ``Run As->JUnit Test``. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting ``Debug As->JUnit Test``. - -Alternatively all unit tests can be run from the command line as described in :doc:`testing` - -Debugging Cassandra Using Eclipse ---------------------------------- - -There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ``./bin/cassandra`` script and connect to the JVM through `remotely `_ from Eclipse or start Cassandra from Eclipse right away. - -Starting Cassandra From Command Line -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - * Set environment variable to define remote debugging options for the JVM: - ``export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"`` - * Start Cassandra by executing the ``./bin/cassandra`` - -Afterwards you should be able to connect to the running Cassandra process through the following steps: - -From the menu, select ``Run->Debug Configurations..`` - -.. image:: images/eclipse_debug0.png - -Create new remote application - -.. image:: images/eclipse_debug1.png - -Configure connection settings by specifying a name and port 1414 - -.. image:: images/eclipse_debug2.png - -Afterwards confirm "Debug" to connect to the JVM and start debugging Cassandra! - -Starting Cassandra From Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cassandra can also be started directly from Eclipse if you don't want to use the command line. - -From the menu, select ``Run->Run Configurations..`` - -.. image:: images/eclipse_debug3.png - -Create new application - -.. image:: images/eclipse_debug4.png - -Specify name, project and main class ``org.apache.cassandra.service.CassandraDaemon`` - -.. image:: images/eclipse_debug5.png - -Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed. - -:: - - -Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true - -.. image:: images/eclipse_debug6.png - -Now just confirm "Debug" and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging! - diff --git a/src/doc/4.0-beta1/_sources/development/index.rst.txt b/src/doc/4.0-beta1/_sources/development/index.rst.txt deleted file mode 100644 index ffa7134dd..000000000 --- a/src/doc/4.0-beta1/_sources/development/index.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Contributing to Cassandra -************************* - -.. toctree:: - :maxdepth: 2 - - gettingstarted - ide - testing - patches - code_style - how_to_review - how_to_commit - documentation - ci - dependencies - release_process diff --git a/src/doc/4.0-beta1/_sources/development/patches.rst.txt b/src/doc/4.0-beta1/_sources/development/patches.rst.txt deleted file mode 100644 index 92c05531e..000000000 --- a/src/doc/4.0-beta1/_sources/development/patches.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _patches: - -Contributing Code Changes -************************* - -Choosing What to Work on -======================== - -Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you're addressing. - -As a general rule of thumb: - * Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the `developer community `_ - * Bug fixes take higher priority compared to features - * The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes. - * Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately - -.. hint:: - - Not sure what to work? Just pick an issue marked as `Low Hanging Fruit `_ Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners. - -Before You Start Coding -======================= - -Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it's generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or :ref:`Slack `. - -You should also - * Avoid redundant work by searching for already reported issues in `JIRA `_ - * Create a new issue early in the process describing what you're working on - not just after finishing your patch - * Link related JIRA issues with your own ticket to provide a better context - * Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code - * Ping people who you actively like to ask for advice on JIRA by `mentioning users `_ - -There are also some fixed rules that you need to be aware: - * Patches will only be applied to branches by following the release model - * Code must be testable - * Code must follow the :doc:`code_style` convention - * Changes must not break compatibility between different Cassandra versions - * Contributions must be covered by the Apache License - -Choosing the Right Branches to Work on -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are currently multiple Cassandra versions maintained in individual branches: - -======= ====== -Version Policy -======= ====== -4.0 Code freeze (see below) -3.11 Critical bug fixes only -3.0 Critical bug fixes only -2.2 Critical bug fixes only -2.1 Critical bug fixes only -======= ====== - -Corresponding branches in git are easy to recognize as they are named ``cassandra-`` (e.g. ``cassandra-3.0``). The ``trunk`` branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases. - -4.0 Code Freeze -""""""""""""""" - -Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance: - - * Bug fixes - * Measurable performance improvements - * Changes not distributed as part of the release such as: - * Testing related improvements and fixes - * Build and infrastructure related changes - * Documentation - -Bug Fixes -""""""""" - -Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be ``cassandra-2.1`` -> ``cassandra-2.2`` -> ``cassandra-3.0`` -> ``cassandra-3.x`` -> ``trunk``. But don't worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn't very common. As a contributor, you're also not expected to provide a single patch for each version. What you need to do however is: - - * Be clear about which versions you could verify to be affected by the bug - * For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases - * If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0) - * Test if the patch can be merged cleanly across branches in the direction listed above - * Be clear which branches may need attention by the committer or even create custom patches for those if you can - -Creating a Patch -================ - -So you've finished coding and the great moment arrives: it's time to submit your patch! - - 1. Create a branch for your changes if you haven't done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. ``git checkout -b 12345-3.0`` - 2. Verify that you follow Cassandra's :doc:`code_style` - 3. Make sure all tests (including yours) pass using ant as described in :doc:`testing`. If you suspect a test failure is unrelated to your change, it may be useful to check the test's status by searching the issue tracker or looking at `CI `_ results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites. - 4. Consider going through the :doc:`how_to_review` for your code. This will help you to understand how others will consider your change for inclusion. - 5. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either: - - a. Attach a patch to JIRA with a single squashed commit in it (per branch), or - b. Squash the commits in-place in your branches into one - - 6. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. Please note that only user-impacting items `should `_ be listed in CHANGES.txt. If you fix a test that does not affect users and does not require changes in runtime code, then no CHANGES.txt entry is necessary. - - :: - - - - patch by ; reviewed by for CASSANDRA-##### - - 7. When you're happy with the result, create a patch: - - :: - - git add - git commit -m '' - git format-patch HEAD~1 - mv (e.g. 12345-trunk.txt, 12345-3.0.txt) - - Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch: - - :: - - git push --set-upstream origin 12345-3.0 - - 8. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless. - 9. Attach the newly generated patch to the ticket/add a link to your branch and click "Submit Patch" at the top of the ticket. This will move the ticket into "Patch Available" status, indicating that your submission is ready for review. - 10. Wait for other developers or committers to review it and hopefully +1 the ticket (see :doc:`how_to_review`). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable. - 11. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into "Patch Available" once again. - -Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work. - - diff --git a/src/doc/4.0-beta1/_sources/development/release_process.rst.txt b/src/doc/4.0-beta1/_sources/development/release_process.rst.txt deleted file mode 100644 index fd86238d9..000000000 --- a/src/doc/4.0-beta1/_sources/development/release_process.rst.txt +++ /dev/null @@ -1,251 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. release_process: - -Release Process -*************** - -.. contents:: :depth: 3 - -|  -| - - - -The steps for Release Managers to create, vote and publish releases for Apache Cassandra. - -While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC member can complete the process of publishing and announcing the release. - - -Prerequisites -============= - -Background docs - * `ASF Release Policy `_ - * `ASF Release Distribution Policy `_ - * `ASF Release Best Practices `_ - - -A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools. - - -Create and publish your GPG key -------------------------------- - -To create a GPG key, follow the `guidelines `_. -The key must be 4096 bit RSA. -Include your public key in:: - - https://dist.apache.org/repos/dist/release/cassandra/KEYS - - -Publish your GPG key in a PGP key server, such as `MIT Keyserver `_. - -Bintray account with access to Apache organisation --------------------------------------------------- - -Publishing a successfully voted upon release requires bintray access to the Apache organisation. Please verify that you have a bintray account and the Apache organisation is listed `here `_. - - -Create Release Artifacts -======================== - -Any committer can perform the following steps to create and call a vote on a proposed release. - -Check that there are no open urgent jira tickets currently being worked on. Also check with the PMC that there's security vulnerabilities currently being worked on in private.' -Current project habit is to check the timing for a new release on the dev mailing lists. - -Perform the Release -------------------- - -Run the following commands to generate and upload release artifacts, to the ASF nexus staging repository and dev distribution location:: - - - cd ~/git - git clone https://github.com/apache/cassandra-builds.git - git clone https://github.com/apache/cassandra.git - - # Edit the variables at the top of the `prepare_release.sh` file - edit cassandra-builds/cassandra-release/prepare_release.sh - - # Ensure your 4096 RSA key is the default secret key - edit ~/.gnupg/gpg.conf # update the `default-key` line - edit ~/.rpmmacros # update the `%gpg_name ` line - - # Ensure DEBFULLNAME and DEBEMAIL is defined and exported, in the debian scripts configuration - edit ~/.devscripts - - # The prepare_release.sh is run from the actual cassandra git checkout, - # on the branch/commit that we wish to tag for the tentative release along with version number to tag. - cd cassandra - git switch cassandra- - - # The following cuts the release artifacts (including deb and rpm packages) and deploy to staging environments - ../cassandra-builds/cassandra-release/prepare_release.sh -v - -Follow the prompts. - -If building the deb or rpm packages fail, those steps can be repeated individually using the `-d` and `-r` flags, respectively. - -Call for a Vote -=============== - -Fill out the following email template and send to the dev mailing list:: - - I propose the following artifacts for release as . - - sha1: - - Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/-tentative - - Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-/org/apache/cassandra/apache-cassandra// - - Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-/ - - The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/ - - The vote will be open for 72 hours (longer if needed). - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=-tentative - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=-tentative - - - -Post-vote operations -==================== - -Any PMC member can perform the following steps to formalize and publish a successfully voted release. - -Publish Artifacts ------------------ - -Run the following commands to publish the voted release artifacts:: - - cd ~/git - # edit the variables at the top of the `finish_release.sh` file - edit cassandra-builds/cassandra-release/finish_release.sh - - # After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout, - # on the tentative release tag that we wish to tag for the final release version number tag. - cd ~/git/cassandra/ - git checkout -tentative - ../cassandra-builds/cassandra-release/finish_release.sh -v - -If successful, take note of the email text output which can be used in the next section "Send Release Announcement". -The output will also list the next steps that are required. - - -Promote Nexus Repository ------------------------- - -* Login to `Nexus repository `_ again. -* Click on "Staging" and then on the repository with id "cassandra-staging". -* Find your closed staging repository, right click on it and choose "Promote". -* Select the "Releases" repository and click "Promote". -* Next click on "Repositories", select the "Releases" repository and validate that your artifacts exist as you expect them. - -Publish the Bintray Uploaded Distribution Packages ---------------------------------------- - -Log into bintray and publish the uploaded artifacts. - -Update and Publish Website --------------------------- - -See `docs `_ for building and publishing the website. - -Also update the CQL doc if appropriate. - -Release version in JIRA ------------------------ - -Release the JIRA version. - -* In JIRA go to the version that you want to release and release it. -* Create a new version, if it has not been done before. - -Update to Next Development Version ----------------------------------- - -Update the codebase to point to the next development version:: - - cd ~/git/cassandra/ - git checkout cassandra- - edit build.xml # update ` ` - edit debian/changelog # add entry for new version - edit CHANGES.txt # add entry for new version - git commit -m "Increment version to " build.xml debian/changelog CHANGES.txt - - # …and forward merge and push per normal procedure - - -Wait for Artifacts to Sync --------------------------- - -Wait for the artifacts to sync at https://downloads.apache.org/cassandra/ - -Send Release Announcement -------------------------- - -Fill out the following email template and send to both user and dev mailing lists:: - - The Cassandra team is pleased to announce the release of Apache Cassandra version . - - Apache Cassandra is a fully distributed database. It is the right choice - when you need scalability and high availability without compromising - performance. - - http://cassandra.apache.org/ - - Downloads of source and binary distributions are listed in our download - section: - - http://cassandra.apache.org/download/ - - This version is release[1] on the series. As always, - please pay attention to the release notes[2] and let us know[3] if you - were to encounter any problem. - - Enjoy! - - [1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb= - [2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb= - [3]: https://issues.apache.org/jira/browse/CASSANDRA - -Update Slack Cassandra topic ---------------------------- - -Update topic in ``cassandra`` :ref:`Slack room ` - /topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don't ask to ask - -Tweet from @Cassandra ---------------------- - -Tweet the new release, from the @Cassandra account - -Delete Old Releases -------------------- - -As described in `When to Archive `_. - -An example of removing old releases:: - - svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist - svn rm debian/pool/main/c/cassandra/* - svn st - # check and commit \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/development/testing.rst.txt b/src/doc/4.0-beta1/_sources/development/testing.rst.txt deleted file mode 100644 index 7f38fe590..000000000 --- a/src/doc/4.0-beta1/_sources/development/testing.rst.txt +++ /dev/null @@ -1,98 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none -.. _testing: - -Testing -******* - -Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you're working on. - - -Unit Testing -============ - -The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the ``test/unit`` directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example. - -.. code-block:: java - - @Test - public void testBatchAndList() throws Throwable - { - createTable("CREATE TABLE %s (k int PRIMARY KEY, l list)"); - execute("BEGIN BATCH " + - "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " + - "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " + - "APPLY BATCH"); - - assertRows(execute("SELECT l FROM %s WHERE k = 0"), - row(list(1, 2, 3))); - } - -Unit tests can be run from the command line using the ``ant test`` command, ``ant test -Dtest.name=`` to execute a test suite or ``ant testsome -Dtest.name= -Dtest.methods=[,testmethod2]`` for individual tests. For example, to run all test methods in the ``org.apache.cassandra.cql3.SimpleQueryTest`` class, you would run:: - - ant test -Dtest.name=SimpleQueryTest - -To run only the ``testStaticCompactTables()`` test method from that class, you would run:: - - ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables - -If you see an error like this:: - - Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found: - org/apache/tools/ant/taskdefs/optional/junit/JUnitTask using the classloader - AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar] - -You will need to install the ant-optional package since it contains the ``JUnitTask`` class. - -Long running tests ------------------- - -Test that consume a significant amount of time during execution can be found in the ``test/long`` directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under ``test/long`` only when using the ``ant long-test`` target. - -DTests -====== - -One way of doing integration or system testing at larger scale is by using `dtest `_, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ``ccmlib`` from the `ccm `_ project. Dtests will setup clusters using this library just as you do running ad-hoc ``ccm`` commands on your local machine. Afterwards dtests will use the `Python driver `_ to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes. - -Using dtests helps us to prevent regression bugs by continually executing tests on the `CI server `_ against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration `here `_. - -The best way to learn how to write dtests is probably by reading the introduction "`How to Write a Dtest `_" and by looking at existing, recently updated tests in the project. New tests must follow certain `style conventions `_ that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR. - -Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you. - -Performance Testing -=================== - -Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable. - -Cassandra Stress Tool ---------------------- - -See :ref:`cassandra_stress` - -cstar_perf ----------- - -Another tool available on github is `cstar_perf `_ that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it. - -CircleCI --------- -Cassandra ships with a default `CircleCI `_ configuration, to enable running tests on your branches, you need to go the CircleCI website, click "Login" and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click "Projects", then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ``ant eclipse-warnings`` and ``ant test`` will be run. If you up the parallelism to 4, it also runs ``ant long-test``, ``ant test-compression`` and ``ant stress-test`` - - diff --git a/src/doc/4.0-beta1/_sources/faq/index.rst.txt b/src/doc/4.0-beta1/_sources/faq/index.rst.txt deleted file mode 100644 index acb7538d6..000000000 --- a/src/doc/4.0-beta1/_sources/faq/index.rst.txt +++ /dev/null @@ -1,299 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Frequently Asked Questions -========================== - -- :ref:`why-cant-list-all` -- :ref:`what-ports` -- :ref:`what-happens-on-joins` -- :ref:`asynch-deletes` -- :ref:`one-entry-ring` -- :ref:`can-large-blob` -- :ref:`nodetool-connection-refused` -- :ref:`to-batch-or-not-to-batch` -- :ref:`selinux` -- :ref:`how-to-unsubscribe` -- :ref:`cassandra-eats-all-my-memory` -- :ref:`what-are-seeds` -- :ref:`are-seeds-SPOF` -- :ref:`why-message-dropped` -- :ref:`oom-map-failed` -- :ref:`what-on-same-timestamp-update` -- :ref:`why-bootstrapping-stream-error` - -.. _why-cant-list-all: - -Why can't I set ``listen_address`` to listen on 0.0.0.0 (all my addresses)? ---------------------------------------------------------------------------- - -Cassandra is a gossip-based distributed system and ``listen_address`` is the address a node tells other nodes to reach -it at. Telling other nodes "contact me on any of my addresses" is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen. - -If you don't want to manually specify an IP to ``listen_address`` for each node in your cluster (understandable!), leave -it blank and Cassandra will use ``InetAddress.getLocalHost()`` to pick an address. Then it's up to you or your ops team -to make things resolve correctly (``/etc/hosts/``, dns, etc). - -One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769). - -See :jira:`256` and :jira:`43` for more gory details. - -.. _what-ports: - -What ports does Cassandra use? ------------------------------- - -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the :ref:`cassandra-yaml`. The JMX port is configurable in ``cassandra-env.sh`` (through JVM -options). All ports are TCP. - -.. _what-happens-on-joins: - -What happens to existing data in my cluster when I add new nodes? ------------------------------------------------------------------ - -When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See :ref:`topology-changes`. - -.. _asynch-deletes: - -I delete data from Cassandra, but disk usage stays the same. What gives? ------------------------------------------------------------------------- - -Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can't actually be removed -when you perform a delete, instead, a marker (also called a "tombstone") is written to indicate the value's new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See :ref:`compaction` for more detail. - -.. _one-entry-ring: - -Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring? ------------------------------------------------------------------------------------------------------------------- - -This happens when you have the same token assigned to each node. Don't do that. - -Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes. - -The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart. - -.. _change-replication-factor: - -Can I change the replication factor (a a keyspace) on a live cluster? ---------------------------------------------------------------------- - -Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data: - -- :ref:`Alter ` the replication factor for desired keyspace (using cqlsh for instance). -- If you're reducing the replication factor, run ``nodetool cleanup`` on the cluster to remove surplus replicated data. - Cleanup runs on a per-node basis. -- If you're increasing the replication factor, run ``nodetool repair -full`` to ensure data is replicated according to the new - configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster - performance. It's highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will - most likely swamp it. Note that you will need to run a full repair (``-full``) to make sure that already repaired - sstables are not skipped. - -.. _can-large-blob: - -Can I Store (large) BLOBs in Cassandra? ---------------------------------------- - -Cassandra isn't optimized for large file or BLOB storage and a single ``blob`` value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks. - -Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -``max_mutation_size_in_kb`` configuration of the :ref:`cassandra-yaml` file (which default to half of -``commitlog_segment_size_in_mb``, which itself default to 32MB). - -.. _nodetool-connection-refused: - -Nodetool says "Connection refused to host: 127.0.1.1" for any remote host. What gives? --------------------------------------------------------------------------------------- - -Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions. - -If you are not using DNS, then make sure that your ``/etc/hosts`` files are accurate on both ends. If that fails, try -setting the ``-Djava.rmi.server.hostname=`` JVM option near the bottom of ``cassandra-env.sh`` to an -interface that you can reach from the remote machine. - -.. _to-batch-or-not-to-batch: - -Will batching my operations speed up my bulk load? --------------------------------------------------- - -No. Using batches to load data will generally just add "spikes" of latency. Use asynchronous INSERTs instead, or use -true :ref:`bulk-loading`. - -An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything! - -.. _selinux: - -On RHEL nodes are unable to join the ring ------------------------------------------ - -Check if `SELinux `__ is on; if it is, turn it off. - -.. _how-to-unsubscribe: - -How do I unsubscribe from the email list? ------------------------------------------ - -Send an email to ``user-unsubscribe@cassandra.apache.org``. - -.. _cassandra-eats-all-my-memory: - -Why does top report that Cassandra is using a lot more memory than the Java heap max? -------------------------------------------------------------------------------------- - -Cassandra uses `Memory Mapped Files `__ (mmap) internally. That is, we -use the operating system's virtual memory system to map a number of on-disk files into the Cassandra process' address -space. This will "use" virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that. - -What matters from the perspective of "memory use" in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap'd /dev/zero, which represent real memory used. The key issue is that for a mmap'd file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write. - -The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don't -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail `here `__. - -.. _what-are-seeds: - -What are seeds? ---------------- - -Seeds are used during startup to discover the cluster. - -If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the :ref:`section on gossip `) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly. - -Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn't need seed on subsequent boot. - -You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed - -Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all. - -Recommended usage of seeds: - -- pick two (or more) nodes per data center as seed nodes. -- sync the seed list to all your nodes - -.. _are-seeds-SPOF: - -Does single seed mean single point of failure? ----------------------------------------------- - -The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system. - -.. _cant-call-jmx-method: - -Why can't I call jmx method X on jconsole? ------------------------------------------- - -Some of JMX operations use array argument and as jconsole doesn't support array argument, those operations can't be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool. - -.. _why-message-dropped: - -Why do I see "... messages dropped ..." in the logs? ----------------------------------------------------- - -This is a symptom of load shedding -- Cassandra defending itself against more requests than it can handle. - -Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -``read_request_timeout``, ``write_request_timeout``, ... in the :ref:`cassandra-yaml`), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response). - -For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result. - -For reads, this means a read request may not have completed. - -Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster. - -.. _oom-map-failed: - -Cassandra dies with ``java.lang.OutOfMemoryError: Map failed`` --------------------------------------------------------------- - -If Cassandra is dying **specifically** with the "Map failed" message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check ``/proc//limits`` to verify -this and raise it (eg, via ulimit in bash). You may also need to increase ``vm.max_map_count.`` Note that the debian -package handles this for you automatically. - - -.. _what-on-same-timestamp-update: - -What happens if two updates are made with the same timestamp? -------------------------------------------------------------- - -Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected. - -.. _why-bootstrapping-stream-error: - -Why bootstrapping a new node fails with a "Stream failed" error? ----------------------------------------------------------------- - -Two main possibilities: - -#. the GC may be creating long pauses disrupting the streaming process -#. compactions happening in the background hold streaming long enough that the TCP connection fails - -In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:: - - $ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5 - -To make those settings permanent, add them to your ``/etc/sysctl.conf`` file. - -Note: `GCE `__'s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment. - - - - - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/getting_started/configuring.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/configuring.rst.txt deleted file mode 100644 index e71eeedbe..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/configuring.rst.txt +++ /dev/null @@ -1,67 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Configuring Cassandra ---------------------- - -For running Cassandra on a single node, the default configuration file present at ``./conf/cassandra.yaml`` is enough, -you shouldn't need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed. - -The Cassandra configuration files can be found in the ``conf`` directory of tarballs. For packages, the configuration -files will be located in ``/etc/cassandra``. - -Main runtime properties -^^^^^^^^^^^^^^^^^^^^^^^ - -Most of configuration in Cassandra is done via yaml properties that can be set in ``cassandra.yaml``. At a minimum you -should consider setting the following properties: - -- ``cluster_name``: the name of your cluster. -- ``seeds``: a comma separated list of the IP addresses of your cluster seeds. -- ``storage_port``: you don't necessarily need to change this but make sure that there are no firewalls blocking this - port. -- ``listen_address``: the IP address of your node, this is what allows other nodes to communicate with this node so it - is important that you change it. Alternatively, you can set ``listen_interface`` to tell Cassandra which interface to - use, and consecutively which address to use. Set only one, not both. -- ``native_transport_port``: as for storage\_port, make sure this port is not blocked by firewalls as clients will - communicate with Cassandra on this port. - -Changing the location of directories -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following yaml properties control the location of directories: - -- ``data_file_directories``: one or more directories where data files are located. -- ``commitlog_directory``: the directory where commitlog files are located. -- ``saved_caches_directory``: the directory where saved caches are located. -- ``hints_directory``: the directory where hints are located. - -For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks. - -Environment variables -^^^^^^^^^^^^^^^^^^^^^ - -JVM-level settings such as heap size can be set in ``cassandra-env.sh``. You can add any additional JVM command line -argument to the ``JVM_OPTS`` environment variable; when Cassandra starts these arguments will be passed to the JVM. - -Logging -^^^^^^^ - -The logger in use is logback. You can change logging properties by editing ``logback.xml``. By default it will log at -INFO level into a file called ``system.log`` and at debug level into a file called ``debug.log``. When running in the -foreground, it will also log at INFO level to the console. - diff --git a/src/doc/4.0-beta1/_sources/getting_started/drivers.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/drivers.rst.txt deleted file mode 100644 index 9a2c1567a..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/drivers.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _client-drivers: - -Client drivers --------------- - -Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver. - -Java -^^^^ - -- `Achilles `__ -- `Astyanax `__ -- `Casser `__ -- `Datastax Java driver `__ -- `Kundera `__ -- `PlayORM `__ - -Python -^^^^^^ - -- `Datastax Python driver `__ - -Ruby -^^^^ - -- `Datastax Ruby driver `__ - -C# / .NET -^^^^^^^^^ - -- `Cassandra Sharp `__ -- `Datastax C# driver `__ -- `Fluent Cassandra `__ - -Nodejs -^^^^^^ - -- `Datastax Nodejs driver `__ -- `Node-Cassandra-CQL `__ - -PHP -^^^ - -- `CQL \| PHP `__ -- `Datastax PHP driver `__ -- `PHP-Cassandra `__ -- `PHP Library for Cassandra `__ - -C++ -^^^ - -- `Datastax C++ driver `__ -- `libQTCassandra `__ - -Scala -^^^^^ - -- `Datastax Spark connector `__ -- `Phantom `__ -- `Quill `__ - -Clojure -^^^^^^^ - -- `Alia `__ -- `Cassaforte `__ -- `Hayt `__ - -Erlang -^^^^^^ - -- `CQerl `__ -- `Erlcass `__ - -Go -^^ - -- `CQLc `__ -- `Gocassa `__ -- `GoCQL `__ - -Haskell -^^^^^^^ - -- `Cassy `__ - -Rust -^^^^ - -- `Rust CQL `__ - -Perl -^^^^ - -- `Cassandra::Client and DBD::Cassandra `__ - -Elixir -^^^^^^ - -- `Xandra `__ -- `CQEx `__ - -Dart -^^^^ - -- `dart_cassandra_cql `__ diff --git a/src/doc/4.0-beta1/_sources/getting_started/index.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/index.rst.txt deleted file mode 100644 index a699aee97..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Getting Started -=============== - -This section covers how to get started using Apache Cassandra and should be the first thing to read if you are new to -Cassandra. - -.. toctree:: - :maxdepth: 2 - - installing - configuring - querying - drivers - production - - diff --git a/src/doc/4.0-beta1/_sources/getting_started/installing.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/installing.rst.txt deleted file mode 100644 index f3a22f21a..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/installing.rst.txt +++ /dev/null @@ -1,324 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Installing Cassandra --------------------- - -These are the instructions for deploying the supported releases of Apache Cassandra on Linux servers. - -Cassandra runs on a wide array of Linux distributions including (but not limited to): - -- Ubuntu, most notably LTS releases 16.04 to 18.04 -- CentOS & RedHat Enterprise Linux (RHEL) including 6.6 to 7.7 -- Amazon Linux AMIs including 2016.09 through to Linux 2 -- Debian versions 8 & 9 -- SUSE Enterprise Linux 12 - -This is not an exhaustive list of operating system platforms, nor is it prescriptive. However users will be -well-advised to conduct exhaustive tests of their own particularly for less-popular distributions of Linux. -Deploying on older versions is not recommended unless you have previous experience with the older distribution -in a production environment. - -Prerequisites -^^^^^^^^^^^^^ - -- Install the latest version of Java 8, either the `Oracle Java Standard Edition 8 - `__ or `OpenJDK 8 `__. To - verify that you have the correct version of java installed, type ``java -version``. -- **NOTE**: *Experimental* support for Java 11 was added in Cassandra 4.0 (`CASSANDRA-9608 `__). - Running Cassandra on Java 11 is *experimental*. Do so at your own risk. For more information, see - `NEWS.txt `__. -- For using cqlsh, the latest version of `Python 2.7 `__ or Python 3.6+. To verify that you have - the correct version of Python installed, type ``python --version``. - -Choosing an installation method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For most users, installing the binary tarball is the simplest choice. The tarball unpacks all its contents -into a single location with binaries and configuration files located in their own subdirectories. The most -obvious attribute of the tarball installation is it does not require ``root`` permissions and can be -installed on any Linux distribution. - -Packaged installations require ``root`` permissions. Install the RPM build on CentOS and RHEL-based -distributions if you want to install Cassandra using YUM. Install the Debian build on Ubuntu and other -Debian-based distributions if you want to install Cassandra using APT. Note that both the YUM and APT -methods required ``root`` permissions and will install the binaries and configuration files as the -``cassandra`` OS user. - -Installing the binary tarball -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10) - OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode) - -2. Download the binary tarball from one of the mirrors on the `Apache Cassandra Download `__ - site. For example, to download 4.0: - -:: - - $ curl -OL http://apache.mirror.digitalpacific.com.au/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz - -NOTE: The mirrors only host the latest versions of each major supported release. To download an earlier -version of Cassandra, visit the `Apache Archives `__. - -3. OPTIONAL: Verify the integrity of the downloaded tarball using one of the methods `here `__. - For example, to verify the hash of the downloaded file using GPG: - -:: - - $ gpg --print-md SHA256 apache-cassandra-4.0.0-bin.tar.gz - apache-cassandra-4.0.0-bin.tar.gz: 28757DDE 589F7041 0F9A6A95 C39EE7E6 - CDE63440 E2B06B91 AE6B2006 14FA364D - -Compare the signature with the SHA256 file from the Downloads site: - -:: - - $ curl -L https://downloads.apache.org/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz.sha256 - 28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d - -4. Unpack the tarball: - -:: - - $ tar xzvf apache-cassandra-4.0.0-bin.tar.gz - -The files will be extracted to the ``apache-cassandra-4.0.0/`` directory. This is the tarball installation -location. - -5. Located in the tarball installation location are the directories for the scripts, binaries, utilities, configuration, data and log files: - -:: - - / - bin/ - conf/ - data/ - doc/ - interface/ - javadoc/ - lib/ - logs/ - pylib/ - tools/ - -For information on how to configure your installation, see -`Configuring Cassandra `__. - -6. Start Cassandra: - -:: - - $ cd apache-cassandra-4.0.0/ - $ bin/cassandra - -NOTE: This will run Cassandra as the authenticated Linux user. - -You can monitor the progress of the startup with: - -:: - - $ tail -f logs/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -7. Check the status of Cassandra: - -:: - - $ bin/nodetool status - -The status column in the output should report UN which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ bin/cqlsh - -Installing the Debian packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10) - OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode) - -2. Add the Apache repository of Cassandra to the file ``cassandra.sources.list``. The latest major version - is 4.0 and the corresponding distribution name is ``40x`` (with an "x" as the suffix). - For older releases use ``311x`` for C* 3.11 series, ``30x`` for 3.0, ``22x`` for 2.2 and ``21x`` for 2.1. - For example, to add the repository for version 4.0 (``40x``): - -:: - - $ echo "deb http://www.apache.org/dist/cassandra/debian 40x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list - deb http://www.apache.org/dist/cassandra/debian 40x main - -3. Add the Apache Cassandra repository keys to the list of trusted keys on the server: - -:: - - $ curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 100 266k 100 266k 0 0 320k 0 --:--:-- --:--:-- --:--:-- 320k - OK - -4. Update the package index from sources: - -:: - - $ sudo apt-get update - -5. Install Cassandra with APT: - -:: - - $ sudo apt-get install cassandra - - -NOTE: A new Linux user ``cassandra`` will get created as part of the installation. The Cassandra service -will also be run as this user. - -6. The Cassandra service gets started automatically after installation. Monitor the progress of - the startup with: - -:: - - $ tail -f /var/log/cassandra/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -NOTE: For information on how to configure your installation, see -`Configuring Cassandra `__. - -7. Check the status of Cassandra: - -:: - - $ nodetool status - -The status column in the output should report ``UN`` which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ cqlsh - -Installing the RPM packages -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. Verify the version of Java installed. For example: - -:: - - $ java -version - openjdk version "1.8.0_222" - OpenJDK Runtime Environment (build 1.8.0_232-b09) - OpenJDK 64-Bit Server VM (build 25.232-b09, mixed mode) - -2. Add the Apache repository of Cassandra to the file ``/etc/yum.repos.d/cassandra.repo`` (as the ``root`` - user). The latest major version is 4.0 and the corresponding distribution name is ``40x`` (with an "x" as the suffix). - For older releases use ``311x`` for C* 3.11 series, ``30x`` for 3.0, ``22x`` for 2.2 and ``21x`` for 2.1. - For example, to add the repository for version 4.0 (``40x``): - -:: - - [cassandra] - name=Apache Cassandra - baseurl=https://downloads.apache.org/cassandra/redhat/40x/ - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=https://downloads.apache.org/cassandra/KEYS - -3. Update the package index from sources: - -:: - - $ sudo yum update - -4. Install Cassandra with YUM: - -:: - - $ sudo yum install cassandra - - -NOTE: A new Linux user ``cassandra`` will get created as part of the installation. The Cassandra service -will also be run as this user. - -5. Start the Cassandra service: - -:: - - $ sudo service cassandra start - -6. Monitor the progress of the startup with: - -:: - - $ tail -f /var/log/cassandra/system.log - -Cassandra is ready when you see an entry like this in the ``system.log``: - -:: - - INFO [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)... - -NOTE: For information on how to configure your installation, see -`Configuring Cassandra `__. - -7. Check the status of Cassandra: - -:: - - $ nodetool status - -The status column in the output should report ``UN`` which stands for "Up/Normal". - -Alternatively, connect to the database with: - -:: - - $ cqlsh - -Further installation info -^^^^^^^^^^^^^^^^^^^^^^^^^ - -For help with installation issues, see the `Troubleshooting `__ section. - - diff --git a/src/doc/4.0-beta1/_sources/getting_started/production.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/production.rst.txt deleted file mode 100644 index fe0c4a591..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/production.rst.txt +++ /dev/null @@ -1,156 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Production Recommendations ----------------------------- - -The ``cassandra.yaml`` and ``jvm.options`` files have a number of notes and recommendations for production usage. This page -expands on some of the notes in these files with additional information. - -Tokens -^^^^^^^ - -Using more than 1 token (referred to as vnodes) allows for more flexible expansion and more streaming peers when -bootstrapping new nodes into the cluster. This can limit the negative impact of streaming (I/O and CPU overhead) -as well as allow for incremental cluster expansion. - -As a tradeoff, more tokens will lead to sharing data with more peers, which can result in decreased availability. To learn more about this we -recommend reading `this paper `_. - -The number of tokens can be changed using the following setting: - -``num_tokens: 16`` - - -Here are the most common token counts with a brief explanation of when and why you would use each one. - -+-------------+---------------------------------------------------------------------------------------------------+ -| Token Count | Description | -+=============+===================================================================================================+ -| 1 | Maximum availablility, maximum cluster size, fewest peers, | -| | but inflexible expansion. Must always | -| | double size of cluster to expand and remain balanced. | -+-------------+---------------------------------------------------------------------------------------------------+ -| 4 | A healthy mix of elasticity and availability. Recommended for clusters which will eventually | -| | reach over 30 nodes. Requires adding approximately 20% more nodes to remain balanced. | -| | Shrinking a cluster may result in cluster imbalance. | -+-------------+---------------------------------------------------------------------------------------------------+ -| 16 | Best for heavily elastic clusters which expand and shrink regularly, but may have issues | -| | availability with larger clusters. Not recommended for clusters over 50 nodes. | -+-------------+---------------------------------------------------------------------------------------------------+ - - -In addition to setting the token count, it's extremely important that ``allocate_tokens_for_local_replication_factor`` be -set as well, to ensure even token allocation. - -.. _read-ahead: - -Read Ahead -^^^^^^^^^^^ - -Read ahead is an operating system feature that attempts to keep as much data loaded in the page cache as possible. The -goal is to decrease latency by using additional throughput on reads where the latency penalty is high due to seek times -on spinning disks. By leveraging read ahead, the OS can pull additional data into memory without the cost of additional -seeks. This works well when available RAM is greater than the size of the hot dataset, but can be problematic when the -hot dataset is much larger than available RAM. The benefit of read ahead decreases as the size of your hot dataset gets -bigger in proportion to available memory. - -With small partitions (usually tables with no partition key, but not limited to this case) and solid state drives, read -ahead can increase disk usage without any of the latency benefits, and in some cases can result in up to -a 5x latency and throughput performance penalty. Read heavy, key/value tables with small (under 1KB) rows are especially -prone to this problem. - -We recommend the following read ahead settings: - -+----------------+-------------------------+ -| Hardware | Initial Recommendation | -+================+=========================+ -|Spinning Disks | 64KB | -+----------------+-------------------------+ -|SSD | 4KB | -+----------------+-------------------------+ - -Read ahead can be adjusted on Linux systems by using the `blockdev` tool. - -For example, we can set read ahead of ``/dev/sda1` to 4KB by doing the following:: - - blockdev --setra 8 /dev/sda1 - -**Note**: blockdev accepts the number of 512 byte sectors to read ahead. The argument of 8 above is equivilent to 4KB. - -Since each system is different, use the above recommendations as a starting point and tuning based on your SLA and -throughput requirements. To understand how read ahead impacts disk resource usage we recommend carefully reading through the -:ref:`troubleshooting ` portion of the documentation. - - -Compression -^^^^^^^^^^^^ - -Compressed data is stored by compressing fixed size byte buffers and writing the data to disk. The buffer size is -determined by the ``chunk_length_in_kb`` element in the compression map of the schema settings. - -The default setting is 16KB starting with Cassandra 4.0. - -Since the entire compressed buffer must be read off disk, using too high of a compression chunk length can lead to -significant overhead when reading small records. Combined with the default read ahead setting this can result in massive -read amplification for certain workloads. - -LZ4Compressor is the default and recommended compression algorithm. - -There is additional information on this topic on `The Last Pickle Blog `_. - -Compaction -^^^^^^^^^^^^ - -There are different :ref:`compaction ` strategies available for different workloads. -We recommend reading up on the different strategies to understand which is the best for your environment. Different tables -may (and frequently do) use different compaction strategies on the same cluster. - -Encryption -^^^^^^^^^^^ - -It is significantly easier to set up peer to peer encryption and client server encryption when setting up your production -cluster as opposed to setting it up once the cluster is already serving production traffic. If you are planning on using network encryption -eventually (in any form), we recommend setting it up now. Changing these configurations down the line is not impossible, -but mistakes can result in downtime or data loss. - -Ensure Keyspaces are Created with NetworkTopologyStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Production clusters should never use SimpleStrategy. Production keyspaces should use the NetworkTopologyStrategy (NTS). - -For example:: - - create KEYSPACE mykeyspace WITH replication = - {'class': 'NetworkTopologyStrategy', 'datacenter1': 3}; - -NetworkTopologyStrategy allows Cassandra to take advantage of multiple racks and data centers. - -Configure Racks and Snitch -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -**Correctly configuring or changing racks after a cluster has been provisioned is an unsupported process**. Migrating from -a single rack to multiple racks is also unsupported and can result in data loss. - -Using ``GossipingPropertyFileSnitch`` is the most flexible solution for on premise or mixed cloud environments. ``Ec2Snitch`` -is reliable for AWS EC2 only environments. - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/getting_started/querying.rst.txt b/src/doc/4.0-beta1/_sources/getting_started/querying.rst.txt deleted file mode 100644 index 55b162bb4..000000000 --- a/src/doc/4.0-beta1/_sources/getting_started/querying.rst.txt +++ /dev/null @@ -1,52 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Inserting and querying ----------------------- - -The API to Cassandra is :ref:`CQL `, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done: - -- either using cqlsh, -- or through a client driver for Cassandra. - -CQLSH -^^^^^ - -cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:: - - $ bin/cqlsh localhost - Connected to Test Cluster at localhost:9042. - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - Use HELP for help. - cqlsh> SELECT cluster_name, listen_address FROM system.local; - - cluster_name | listen_address - --------------+---------------- - Test Cluster | 127.0.0.1 - - (1 rows) - cqlsh> - -See the :ref:`cqlsh section ` for full documentation. - -Client drivers -^^^^^^^^^^^^^^ - -A lot of client drivers are provided by the Community and a list of known drivers is provided in :ref:`the next section -`. You should refer to the documentation of each drivers for more information on how to use them. diff --git a/src/doc/4.0-beta1/_sources/index.rst.txt b/src/doc/4.0-beta1/_sources/index.rst.txt deleted file mode 100644 index 302f8e7fa..000000000 --- a/src/doc/4.0-beta1/_sources/index.rst.txt +++ /dev/null @@ -1,43 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Welcome to Apache Cassandra's documentation! -============================================ - -This is the official documentation for `Apache Cassandra `__ |version|. If you would like -to contribute to this documentation, you are welcome to do so by submitting your contribution like any other patch -following `these instructions `__. - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting_started/index - new/index - architecture/index - cql/index - data_modeling/index - configuration/index - operating/index - tools/index - troubleshooting/index - development/index - faq/index - plugins/index - - bugs - contactus diff --git a/src/doc/4.0-beta1/_sources/new/auditlogging.rst.txt b/src/doc/4.0-beta1/_sources/new/auditlogging.rst.txt deleted file mode 100644 index 0a15a9f6c..000000000 --- a/src/doc/4.0-beta1/_sources/new/auditlogging.rst.txt +++ /dev/null @@ -1,440 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Audit Logging -------------- - -Audit Logging is a new feature in Apache Cassandra 4.0 (`CASSANDRA-12151 -`_). All database activity is logged to a directory in the local filesystem and the audit log files are rolled periodically. All database operations are monitored and recorded. Audit logs are stored in local directory files instead of the database itself as it provides several benefits, some of which are: - -- No additional database capacity is needed to store audit logs -- No query tool is required while storing the audit logs in the database would require a query tool -- Latency of database operations is not affected; no performance impact -- It is easier to implement file based logging than database based logging - -What does Audit Logging Log? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Audit logging logs: - -1. All authentication which includes successful and failed login attempts -2. All database command requests to CQL. Both failed and successful CQL is logged - -More specifically an audit log entry could be one of two types: - -a) CQL Audit Log Entry Type or -b) Common Audit Log Entry Type - -Each of these types comprises of several database operations. The CQL Audit Log Entry Type could be one of the following; the category of the CQL audit log entry type is listed in parentheses. - -1. SELECT(QUERY), -2. UPDATE(DML), -3. DELETE(DML), -4. TRUNCATE(DDL), -5. CREATE_KEYSPACE(DDL), -6. ALTER_KEYSPACE(DDL), -7. DROP_KEYSPACE(DDL), -8. CREATE_TABLE(DDL), -9. DROP_TABLE(DDL), -10. PREPARE_STATEMENT(PREPARE), -11. DROP_TRIGGER(DDL), -12. LIST_USERS(DCL), -13. CREATE_INDEX(DDL), -14. DROP_INDEX(DDL), -15. GRANT(DCL), -16. REVOKE(DCL), -17. CREATE_TYPE(DDL), -18. DROP_AGGREGATE(DDL), -19. ALTER_VIEW(DDL), -20. CREATE_VIEW(DDL), -21. DROP_ROLE(DCL), -22. CREATE_FUNCTION(DDL), -23. ALTER_TABLE(DDL), -24. BATCH(DML), -25. CREATE_AGGREGATE(DDL), -26. DROP_VIEW(DDL), -27. DROP_TYPE(DDL), -28. DROP_FUNCTION(DDL), -29. ALTER_ROLE(DCL), -30. CREATE_TRIGGER(DDL), -31. LIST_ROLES(DCL), -32. LIST_PERMISSIONS(DCL), -33. ALTER_TYPE(DDL), -34. CREATE_ROLE(DCL), -35. USE_KEYSPACE (OTHER). - -The Common Audit Log Entry Type could be one of the following; the category of the Common audit log entry type is listed in parentheses. - -1. REQUEST_FAILURE(ERROR), -2. LOGIN_ERROR(AUTH), -3. UNAUTHORIZED_ATTEMPT(AUTH), -4. LOGIN_SUCCESS (AUTH). - -What Audit Logging does not Log? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging does not log: - -1. Configuration changes made in ``cassandra.yaml`` -2. Nodetool Commands - -Audit Logging is Flexible and Configurable -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging is flexible and configurable in ``cassandra.yaml`` as follows: - -- Keyspaces and tables to be monitored and audited may be specified. -- Users to be included/excluded may be specified. By default all users are audit logged. -- Categories of operations to audit or exclude may be specified. -- The frequency at which to roll the log files may be specified. Default frequency is hourly. - -Configuring Audit Logging -^^^^^^^^^^^^^^^^^^^^^^^^^ -Audit Logging is configured on each node separately. Audit Logging is configured in ``cassandra.yaml`` in the ``audit_logging_options`` setting. -The settings may be same/different on each node. - -Enabling Audit Logging -********************** -Audit logging is enabled by setting the ``enabled`` option to ``true`` in the ``audit_logging_options`` setting. - -:: - - audit_logging_options: - enabled: true - -Setting the Logger -****************** -The audit logger is set with the ``logger`` option. - -:: - - logger: BinAuditLogger - -Two types of audit loggers are supported: ``FileAuditLogger`` and ``BinAuditLogger``. -``BinAuditLogger`` is the default setting. The ``BinAuditLogger`` is an efficient way to log events to file in a binary format. - -``FileAuditLogger`` is synchronous, file-based audit logger; just uses the standard logging mechanism. ``FileAuditLogger`` logs events to ``audit/audit.log`` file using ``slf4j`` logger. - -The ``NoOpAuditLogger`` is a No-Op implementation of the audit logger to be used as a default audit logger when audit logging is disabled. - -Setting the Audit Logs Directory -******************************** -The audit logs directory is set with the ``audit_logs_dir`` option. A new directory is not created automatically and an existing directory must be set. Audit Logs directory can be configured using ``cassandra.logdir.audit`` system property or default is set to ``cassandra.logdir + /audit/``. A user created directory may be set. As an example, create a directory for the audit logs and set its permissions. - -:: - - sudo mkdir –p /cassandra/audit/logs/hourly - sudo chmod -R 777 /cassandra/audit/logs/hourly - -Set the directory for the audit logs directory using the ``audit_logs_dir`` option. - -:: - - audit_logs_dir: "/cassandra/audit/logs/hourly" - - -Setting Keyspaces to Audit -************************** -Set the keyspaces to include with the ``included_keyspaces`` option and the keyspaces to exclude with the ``excluded_keyspaces`` option. By default all keyspaces are included. By default, ``system``, ``system_schema`` and ``system_virtual_schema`` are excluded. - -:: - - # included_keyspaces: - # excluded_keyspaces: system, system_schema, system_virtual_schema - -Setting Categories to Audit -*************************** - -The categories of database operations to be included are specified with the ``included_categories`` option as a comma separated list. By default all supported categories are included. The categories of database operations to be excluded are specified with ``excluded_categories`` option as a comma separated list. By default no category is excluded. - -:: - - # included_categories: - # excluded_categories: - -The supported categories for audit log are: - -1. QUERY -2. DML -3. DDL -4. DCL -5. OTHER -6. AUTH -7. ERROR -8. PREPARE - -Setting Users to Audit -********************** - -Users to audit log are set with the ``included_users`` and ``excluded_users`` options. The ``included_users`` option specifies a comma separated list of users to include explicitly and by default all users are included. The ``excluded_users`` option specifies a comma separated list of users to exclude explicitly and by default no user is excluded. - -:: - - # included_users: - # excluded_users: - -Setting the Roll Frequency -*************************** -The ``roll_cycle`` option sets the frequency at which the audit log file is rolled. Supported values are ``MINUTELY``, ``HOURLY``, and ``DAILY``. Default value is ``HOURLY``, which implies that after every hour a new audit log file is created. - -:: - - roll_cycle: HOURLY - -An audit log file could get rolled for other reasons as well such as a log file reaches the configured size threshold. - -Setting Archiving Options -************************* - -The archiving options are for archiving the rolled audit logs. The ``archive`` command to use is set with the ``archive_command`` option and the ``max_archive_retries`` sets the maximum # of tries of failed archive commands. - -:: - - # archive_command: - # max_archive_retries: 10 - -Default archive command is ``"/path/to/script.sh %path"`` where ``%path`` is replaced with the file being rolled: - -Other Settings -*************** - -The other audit logs settings are as follows. - -:: - - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - -The ``block`` option specifies whether the audit logging should block if the logging falls behind or should drop log records. - -The ``max_queue_weight`` option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. - -The ``max_log_size`` option sets the maximum size of the rolled files to retain on disk before deleting the oldest. - -Using Nodetool to Enable Audit Logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``nodetool enableauditlog`` command may be used to enable audit logs and it overrides the settings in ``cassandra.yaml``. The ``nodetool enableauditlog`` command syntax is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] enableauditlog - [--excluded-categories ] - [--excluded-keyspaces ] - [--excluded-users ] - [--included-categories ] - [--included-keyspaces ] - [--included-users ] [--logger ] - -OPTIONS - --excluded-categories - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - - --excluded-keyspaces - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used - - --excluded-users - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - - -h , --host - Node hostname or ip address - - --included-categories - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - - --included-keyspaces - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - - --included-users - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - - --logger - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -u , --username - Remote jmx agent username - - -The ``nodetool disableauditlog`` command disables audit log. The command syntax is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] disableauditlog - -OPTIONS - -h , --host - Node hostname or ip address - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -u , --username - Remote jmx agent username - -Viewing the Audit Logs -^^^^^^^^^^^^^^^^^^^^^^ -An audit log event comprises of a keyspace that is being audited, the operation that is being logged, the scope and the user. An audit log entry comprises of the following attributes concatenated with a "|". - -:: - - type (AuditLogEntryType): Type of request - source (InetAddressAndPort): Source IP Address from which request originated - user (String): User name - timestamp (long ): Timestamp of the request - batch (UUID): Batch of request - keyspace (String): Keyspace on which request is made - scope (String): Scope of request such as Table/Function/Aggregate name - operation (String): Database operation such as CQL command - options (QueryOptions): CQL Query options - state (QueryState): State related to a given query - -Some of these attributes may not be applicable to a given request and not all of these options must be set. - -An Audit Logging Demo -^^^^^^^^^^^^^^^^^^^^^^ -To demonstrate audit logging enable and configure audit logs with following settings. - -:: - - audit_logging_options: - enabled: true - logger: BinAuditLogger - audit_logs_dir: "/cassandra/audit/logs/hourly" - # included_keyspaces: - # excluded_keyspaces: system, system_schema, system_virtual_schema - # included_categories: - # excluded_categories: - # included_users: - # excluded_users: - roll_cycle: HOURLY - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled: - # archive_command: - # max_archive_retries: 10 - -Create the audit log directory ``/cassandra/audit/logs/hourly`` and set its permissions as discussed earlier. Run some CQL commands such as create a keyspace, create a table and query a table. Any supported CQL commands may be run as discussed in section **What does Audit Logging Log?**. Change directory (with ``cd`` command) to the audit logs directory. - -:: - - cd /cassandra/audit/logs/hourly - -List the files/directories and some ``.cq4`` files should get listed. These are the audit logs files. - -:: - - [ec2-user@ip-10-0-2-238 hourly]$ ls -l - total 28 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 03:01 20190802-02.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 03:01 20190802-03.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 03:01 directory-listing.cq4t - -The ``auditlogviewer`` tool is used to dump audit logs. Run the ``auditlogviewer`` tool. Audit log files directory path is a required argument. The output should be similar to the following output. - -:: - - [ec2-user@ip-10-0-2-238 hourly]$ auditlogviewer /cassandra/audit/logs/hourly - WARN 03:12:11,124 Using Pauser.sleepy() as not enough processors, have 2, needs 8+ - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427328|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE AuditLogKeyspace; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427329|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE "auditlogkeyspace" - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711446279|type :SELECT|category:QUERY|ks:auditlogkeyspace|scope:t|operation:SELECT * FROM t; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564713878834|type :DROP_TABLE|category:DDL|ks:auditlogkeyspace|scope:t|operation:DROP TABLE IF EXISTS - AuditLogKeyspace.t; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42382|timestamp:1564714618360|ty - pe:REQUEST_FAILURE|category:ERROR|operation:CREATE KEYSPACE AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};; Cannot add - existing keyspace "auditlogkeyspace" - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714690968|type :DROP_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:DROP KEYSPACE AuditLogKeyspace; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42406|timestamp:1564714708329|ty pe:CREATE_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:CREATE KEYSPACE - AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - Type: AuditLog - LogMessage: - user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714870678|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE auditlogkeyspace; - [ec2-user@ip-10-0-2-238 hourly]$ - - -The ``auditlogviewer`` tool usage syntax is as follows. - -:: - - ./auditlogviewer - Audit log files directory path is a required argument. - usage: auditlogviewer [...] [options] - -- - View the audit log contents in human readable format - -- - Options are: - -f,--follow Upon reaching the end of the log continue indefinitely - waiting for more records - -h,--help display this help message - -r,--roll_cycle How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -Diagnostic events for user audit logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Any native transport enabled client is able to subscribe to diagnostic events that are raised around authentication and CQL operations. These events can then be consumed and used by external tools to implement a Cassandra user auditing solution. - diff --git a/src/doc/4.0-beta1/_sources/new/fqllogging.rst.txt b/src/doc/4.0-beta1/_sources/new/fqllogging.rst.txt deleted file mode 100644 index 881f39fa8..000000000 --- a/src/doc/4.0-beta1/_sources/new/fqllogging.rst.txt +++ /dev/null @@ -1,689 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Full Query Logging ------------------- - -Apache Cassandra 4.0 adds a new feature to support a means of logging all queries as they were invoked (`CASSANDRA-13983 -`_). For correctness testing it's useful to be able to capture production traffic so that it can be replayed against both the old and new versions of Cassandra while comparing the results. - -Cassandra 4.0 includes an implementation of a full query logging (FQL) that uses chronicle-queue to implement a rotating log of queries. Some of the features of FQL are: - -- Single thread asynchronously writes log entries to disk to reduce impact on query latency -- Heap memory usage bounded by a weighted queue with configurable maximum weight sitting in front of logging thread -- If the weighted queue is full producers can be blocked or samples can be dropped -- Disk utilization is bounded by deleting old log segments once a configurable size is reached -- The on disk serialization uses a flexible schema binary format (chronicle-wire) making it easy to skip unrecognized fields, add new ones, and omit old ones. -- Can be enabled and configured via JMX, disabled, and reset (delete on disk data), logging path is configurable via both JMX and YAML -- Introduce new ``fqltool`` in ``/bin`` that currently implements ``Dump`` which can dump in a readable format full query logs as well as follow active full query logs. FQL ``Replay`` and ``Compare`` are also available. - -Cassandra 4.0 has a binary full query log based on Chronicle Queue that can be controlled using ``nodetool enablefullquerylog``, ``disablefullquerylog``, and ``resetfullquerylog``. The log contains all queries invoked, approximate time they were invoked, any parameters necessary to bind wildcard values, and all query options. A readable version of the log can be dumped or tailed using the new ``bin/fqltool`` utility. The full query log is designed to be safe to use in production and limits utilization of heap memory and disk space with limits you can specify when enabling the log. - -Objective -^^^^^^^^^^ -Full Query Logging logs all requests to the CQL interface. The full query logs could be used for debugging, performance benchmarking, testing and auditing CQL queries. The audit logs also include CQL requests but full query logging is dedicated to CQL requests only with features such as FQL Replay and FQL Compare that are not available in audit logging. - -Full Query Logger -^^^^^^^^^^^^^^^^^^ -The Full Query Logger is a logger that logs entire query contents after the query finishes. FQL only logs the queries that successfully complete. The other queries (e.g. timed out, failed) are not to be logged. Queries are logged in one of two modes: single query or batch of queries. The log for an invocation of a batch of queries includes the following attributes: - -:: - - type - The type of the batch - queries - CQL text of the queries - values - Values to bind to as parameters for the queries - queryOptions - Options associated with the query invocation - queryState - Timestamp state associated with the query invocation - batchTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked - -The log for single CQL query includes the following attributes: - -:: - - query - CQL query text - queryOptions - Options associated with the query invocation - queryState - Timestamp state associated with the query invocation - queryTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked - -Full query logging is backed up by ``BinLog``. BinLog is a quick and dirty binary log. Its goal is good enough performance, predictable footprint, simplicity in terms of implementation and configuration and most importantly minimal impact on producers of log records. Performance safety is accomplished by feeding items to the binary log using a weighted queue and dropping records if the binary log falls sufficiently far behind. Simplicity and good enough performance is achieved by using a single log writing thread as well as Chronicle Queue to handle writing the log, making it available for readers, as well as log rolling. - -Weighted queue is a wrapper around any blocking queue that turns it into a blocking weighted queue. The queue will weigh each element being added and removed. Adding to the queue is blocked if adding would violate the weight bound. If an element weighs in at larger than the capacity of the queue then exactly one such element will be allowed into the queue at a time. If the weight of an object changes after it is added it could create issues. Checking weight should be cheap so memorize expensive to compute weights. If weight throws that can also result in leaked permits so it's always a good idea to memorize weight so it doesn't throw. In the interests of not writing unit tests for methods no one uses there is a lot of ``UnsupportedOperationException``. If you need them then add them and add proper unit tests to ``WeightedQueueTest``. "Good" tests. 100% coverage including exception paths and resource leaks. - - -The FQL tracks information about store files: - -- Store files as they are added and their storage impact. Delete them if over storage limit. -- The files in the chronicle queue that have already rolled -- The number of bytes in store files that have already rolled - -FQL logger sequence is as follows: - -1. Start the consumer thread that writes log records. Can only be done once. -2. Offer a record to the log. If the in memory queue is full the record will be dropped and offer will return false. -3. Put a record into the log. If the in memory queue is full the putting thread will be blocked until there is space or it is interrupted. -4. Clean up the buffers on thread exit, finalization will check again once this is no longer reachable ensuring there are no stragglers in the queue. -5. Stop the consumer thread that writes log records. Can be called multiple times. - -Next, we shall demonstrate full query logging with an example. - - -Configuring Full Query Logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Full Query Logger default options are configured on a per node basis in ``cassandra.yaml`` with following configuration property. - -:: - - full_query_logging_options: - -As an example setup create a three node Cassandra 4.0 cluster. The ``nodetool status`` command lists the nodes in the cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool status - Datacenter: us-east-1 - ===================== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- AddressLoad Tokens Owns (effective) Host ID Rack - UN 10.0.1.115 442.42 KiB 25632.6% b64cb32a-b32a-46b4-9eeb-e123fa8fc287 us-east-1b - UN 10.0.3.206 559.52 KiB 25631.9% 74863177-684b-45f4-99f7-d1006625dc9e us-east-1d - UN 10.0.2.238 587.87 KiB 25635.5% 4dcdadd2-41f9-4f34-9892-1f20868b27c7 us-east-1c - - -In subsequent sub-sections we shall discuss enabling and configuring full query logging. - -Setting the FQL Directory -************************* - -A dedicated directory path must be provided to write full query log data to when the full query log is enabled. The directory for FQL must exist, and have permissions set. The full query log will recursively delete the contents of this path at times. It is recommended not to place links in this directory to other sections of the filesystem. The ``full_query_log_dir`` property in ``cassandra.yaml`` is pre-configured. - -:: - - full_query_log_dir: /tmp/cassandrafullquerylog - -The ``log_dir`` option may be used to configure the FQL directory if the ``full_query_log_dir`` is not set. - -:: - - full_query_logging_options: - # log_dir: - -Create the FQL directory if it does not exist and set its permissions. - -:: - - sudo mkdir -p /tmp/cassandrafullquerylog - sudo chmod -R 777 /tmp/cassandrafullquerylog - -Setting the Roll Cycle -********************** - -The ``roll_cycle`` option sets how often to roll FQL log segments so they can potentially be reclaimed. Supported values are ``MINUTELY``, ``HOURLY`` and ``DAILY``. Default setting is ``HOURLY``. - -:: - - roll_cycle: HOURLY - -Setting Other Options -********************* - -The ``block`` option specifies whether the FQL should block if the FQL falls behind or should drop log records. Default value of ``block`` is ``true``. The ``max_queue_weight`` option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. The ``max_log_size`` option sets the maximum size of the rolled files to retain on disk before deleting the oldest file. The ``archive_command`` option sets the archive command to execute on rolled log files. The ``max_archive_retries`` option sets the max number of retries of failed archive commands. - -:: - - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file - being rolled: - # archive_command: - # max_archive_retries: 10 - -The ``max_queue_weight`` must be > 0. Similarly ``max_log_size`` must be > 0. An example full query logging options is as follows. - -:: - - full_query_log_dir: /tmp/cassandrafullquerylog - - # default options for full query logging - these can be overridden from command line when - executing - # nodetool enablefullquerylog - # nodetool enablefullquerylog - #full_query_logging_options: - # log_dir: - roll_cycle: HOURLY - # block: true - # max_queue_weight: 268435456 # 256 MiB - # max_log_size: 17179869184 # 16 GiB - ## archive command is "/path/to/script.sh %path" where %path is replaced with the file - being rolled: - # archive_command: - # max_archive_retries: 10 - -The ``full_query_log_dir`` setting is not within the ``full_query_logging_options`` but still is for full query logging. - -Enabling Full Query Logging -*************************** - -Full Query Logging is enabled on a per-node basis. . The ``nodetool enablefullquerylog`` command is used to enable full query logging. Defaults for the options are configured in ``cassandra.yaml`` and these can be overridden from command line. - -The syntax of the nodetool enablefullquerylog command is as follows: - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] enablefullquerylog - [--archive-command ] [--blocking] - [--max-archive-retries ] - [--max-log-size ] [--max-queue-weight ] - [--path ] [--roll-cycle ] - - OPTIONS - --archive-command - Command that will handle archiving rolled full query log files. - Format is "/path/to/script.sh %path" where %path will be replaced - with the file to archive - - --blocking - If the queue is full whether to block producers or drop samples. - - -h , --host - Node hostname or ip address - - --max-archive-retries - Max number of archive retries. - - --max-log-size - How many bytes of log data to store before dropping segments. Might - not be respected if a log file hasn't rolled so it can be deleted. - - --max-queue-weight - Maximum number of bytes of query data to queue to disk before - blocking or dropping samples. - - -p , --port - Remote jmx agent port number - - --path - Path to store the full query log at. Will have it's contents - recursively deleted. - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - --roll-cycle - How often to roll the log file (MINUTELY, HOURLY, DAILY). - - -u , --username - Remote jmx agent username - -Run the following command on each node in the cluster. - -:: - - nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - -After the full query logging has been enabled run some CQL statements to generate full query logs. - -Running CQL Statements -^^^^^^^^^^^^^^^^^^^^^^^ - -Start CQL interface with ``cqlsh`` command. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cqlsh - Connected to Cassandra Cluster at 127.0.0.1:9042. - [cqlsh 5.0.1 | Cassandra 4.0-SNAPSHOT | CQL spec 3.4.5 | Native protocol v4] - Use HELP for help. - cqlsh> - -Run some CQL statements. Create a keyspace. Create a table and add some data. Query the table. - -:: - - cqlsh> CREATE KEYSPACE AuditLogKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - cqlsh> USE AuditLogKeyspace; - cqlsh:auditlogkeyspace> CREATE TABLE t ( - ...id int, - ...k int, - ...v text, - ...PRIMARY KEY (id) - ... ); - cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 1, 'val1'); - cqlsh:auditlogkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 0 | 1 | val1 - - (1 rows) - cqlsh:auditlogkeyspace> - -Viewing the Full Query Logs -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``fqltool`` is used to view the full query logs. The ``fqltool`` has the following usage syntax. - -:: - - fqltool [] - - The most commonly used fqltool commands are: - compare Compare result files generated by fqltool replay - dump Dump the contents of a full query log - help Display help information - replay Replay full query logs - - See 'fqltool help ' for more information on a specific command. - -The ``fqltool dump`` command is used to dump (list) the contents of a full query log. Run the ``fqltool dump`` command after some CQL statements have been run. - -The full query logs get listed. Truncated output is as follows: - -:: - - [ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ fqltool dump ./ - WARN [main] 2019-08-02 03:07:53,635 Slf4jExceptionHandler.java:42 - Using Pauser.sleepy() as not enough processors, have 2, needs 8+ - Type: single-query - Query start time: 1564708322030 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system.peers - Values: - - Type: single-query - Query start time: 1564708322054 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system.local WHERE key='local' - Values: - - Type: single-query - Query start time: 1564708322109 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.keyspaces - Values: - - Type: single-query - Query start time: 1564708322116 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.tables - Values: - - Type: single-query - Query start time: 1564708322139 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.columns - Values: - - Type: single-query - Query start time: 1564708322142 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.functions - Values: - - Type: single-query - Query start time: 1564708322141 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.aggregates - Values: - - Type: single-query - Query start time: 1564708322143 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.types - Values: - - Type: single-query - Query start time: 1564708322144 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.indexes - Values: - - Type: single-query - Query start time: 1564708322142 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.triggers - Values: - - Type: single-query - Query start time: 1564708322145 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708322 - Query: SELECT * FROM system_schema.views - Values: - - Type: single-query - Query start time: 1564708345408 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: CREATE KEYSPACE AuditLogKeyspace - WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}; - Values: - - Type: single-query - Query start time: 1564708345675 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708345 - Query: SELECT peer, rpc_address, schema_version FROM system.peers - Values: - - Type: single-query - Query start time: 1564708345676 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708345 - Query: SELECT schema_version FROM system.local WHERE key='local' - Values: - - Type: single-query - Query start time: 1564708346323 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708346 - Query: SELECT * FROM system_schema.keyspaces WHERE keyspace_name = 'auditlogkeyspace' - Values: - - Type: single-query - Query start time: 1564708360873 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: USE AuditLogKeyspace; - Values: - - Type: single-query - Query start time: 1564708360874 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: USE "auditlogkeyspace" - Values: - - Type: single-query - Query start time: 1564708378837 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:-2147483648 - Query: CREATE TABLE t ( - id int, - k int, - v text, - PRIMARY KEY (id) - ); - Values: - - Type: single-query - Query start time: 1564708379247 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708379 - Query: SELECT * FROM system_schema.tables WHERE keyspace_name = 'auditlogkeyspace' AND table_name = 't' - Values: - - Type: single-query - Query start time: 1564708379255 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708379 - Query: SELECT * FROM system_schema.views WHERE keyspace_name = 'auditlogkeyspace' AND view_name = 't' - Values: - - Type: single-query - Query start time: 1564708397144 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708397 - Query: INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - Values: - - Type: single-query - Query start time: 1564708397167 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708397 - Query: INSERT INTO t (id, k, v) VALUES (0, 1, 'val1'); - Values: - - Type: single-query - Query start time: 1564708434782 - Protocol version: 4 - Generated timestamp:-9223372036854775808 - Generated nowInSeconds:1564708434 - Query: SELECT * FROM t; - Values: - - [ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ - - - -Full query logs are generated on each node. Enabling of full query logging on one node and the log files generated on the node are as follows: - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@52.1.243.83 - Last login: Fri Aug 2 00:14:53 2019 from 75.155.255.51 - [ec2-user@ip-10-0-3-206 ~]$ sudo mkdir /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 ~]$ cd /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 01:24 20190802-01.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 01:23 directory-listing.cq4t - [ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ - -Enabling of full query logging on another node and the log files generated on the node are as follows: - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@3.86.103.229 - Last login: Fri Aug 2 00:13:04 2019 from 75.155.255.51 - [ec2-user@ip-10-0-1-115 ~]$ sudo mkdir /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 ~]$ cd /tmp/cassandrafullquerylog - [ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug 2 01:24 20190802-01.cq4 - -rw-rw-r--. 1 ec2-user ec2-user 65536 Aug 2 01:23 directory-listing.cq4t - [ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ - -The ``nodetool resetfullquerylog`` resets the full query logger if it is enabled. Also deletes any generated files in the last used full query log path as well as the one configured in ``cassandra.yaml``. It stops the full query log and cleans files in the configured full query log directory from ``cassandra.yaml`` as well as JMX. - -Full Query Replay -^^^^^^^^^^^^^^^^^ -The ``fqltool`` provides the ``replay`` command (`CASSANDRA-14618 -`_) to replay the full query logs. The FQL replay could be run on a different machine or even a different cluster for testing, debugging and performance benchmarking. - -The main objectives of ``fqltool replay`` are: - -- To be able to compare different runs of production traffic against different versions/configurations of Cassandra. -- Take FQL logs from several machines and replay them in "order" by the timestamps recorded. -- Record the results from each run to be able to compare different runs (against different clusters/versions/etc). -- If fqltool replay is run against 2 or more clusters, the results could be compared. - -The FQL replay could also be used on the same node on which the full query log are generated to recreate a dropped database object. - - The syntax of ``fqltool replay`` is as follows: - -:: - - fqltool replay [--keyspace ] [--results ] - [--store-queries ] --target ... [--] - [...] - - OPTIONS - --keyspace - Only replay queries against this keyspace and queries without - keyspace set. - - --results - Where to store the results of the queries, this should be a - directory. Leave this option out to avoid storing results. - - --store-queries - Path to store the queries executed. Stores queries in the same order - as the result sets are in the result files. Requires --results - - --target - Hosts to replay the logs to, can be repeated to replay to more - hosts. - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [...] - Paths containing the full query logs to replay. - -As an example of using ``fqltool replay``, drop a keyspace. - -:: - - cqlsh:auditlogkeyspace> DROP KEYSPACE AuditLogKeyspace; - -Subsequently run ``fqltool replay``. The directory to store results of queries and the directory to store the queries run are specified and these directories must be created and permissions set before running ``fqltool replay``. The ``--results`` and ``--store-queries`` directories are optional but if ``--store-queries`` is to be set the ``--results`` must also be set. - -:: - - [ec2-user@ip-10-0-2-238 cassandra]$ fqltool replay --keyspace AuditLogKeyspace --results - /cassandra/fql/logs/results/replay --store-queries /cassandra/fql/logs/queries/replay -- - target 3.91.56.164 -- /tmp/cassandrafullquerylog - -Describe the keyspaces after running ``fqltool replay`` and the keyspace that was dropped gets listed again. - -:: - - cqlsh:auditlogkeyspace> DESC KEYSPACES; - - system_schema system system_distributed system_virtual_schema - system_auth auditlogkeyspace system_traces system_views - - cqlsh:auditlogkeyspace> - -Full Query Compare -^^^^^^^^^^^^^^^^^^ -The ``fqltool compare`` command (`CASSANDRA-14619 -`_) is used to compare result files generated by ``fqltool replay``. The ``fqltool compare`` command that can take the recorded runs from ``fqltool replay`` and compares them, it should output any differences and potentially all queries against the mismatching partition up until the mismatch. - -The ``fqltool compare`` could be used for comparing result files generated by different versions of Cassandra or different Cassandra configurations as an example. The command usage is as follows: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ fqltool help compare - NAME - fqltool compare - Compare result files generated by fqltool replay - - SYNOPSIS - fqltool compare --queries [--] [...] - - OPTIONS - --queries - Directory to read the queries from. It is produced by the fqltool - replay --store-queries option. - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [...] - Directories containing result files to compare. - -The ``fqltool compare`` stores each row as a separate chronicle document to be able to avoid reading up the entire result set in memory when comparing document formats: - -To mark the start of a new result set: - -:: - - ------------------- - version: int16 - type: column_definitions - column_count: int32; - column_definition: text, text - column_definition: text, text - .... - -------------------- - - -To mark a failed query set: - -:: - - --------------------- - version: int16 - type: query_failed - message: text - --------------------- - -To mark a row set: - -:: - - -------------------- - version: int16 - type: row - row_column_count: int32 - column: bytes - --------------------- - -To mark the end of a result set: - -:: - - ------------------- - version: int16 - type: end_resultset - ------------------- - - -Performance Overhead of FQL -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In performance testing FQL appears to have little or no overhead in ``WRITE`` only workloads, and a minor overhead in ``MIXED`` workload. diff --git a/src/doc/4.0-beta1/_sources/new/index.rst.txt b/src/doc/4.0-beta1/_sources/new/index.rst.txt deleted file mode 100644 index 5ef867ba1..000000000 --- a/src/doc/4.0-beta1/_sources/new/index.rst.txt +++ /dev/null @@ -1,32 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -New Features in Apache Cassandra 4.0 -==================================== - -This section covers the new features in Apache Cassandra 4.0. - -.. toctree:: - :maxdepth: 2 - - java11 - virtualtables - auditlogging - fqllogging - messaging - streaming - transientreplication - diff --git a/src/doc/4.0-beta1/_sources/new/java11.rst.txt b/src/doc/4.0-beta1/_sources/new/java11.rst.txt deleted file mode 100644 index df906d409..000000000 --- a/src/doc/4.0-beta1/_sources/new/java11.rst.txt +++ /dev/null @@ -1,274 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Support for Java 11 -------------------- - -In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions. - -One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (`CASSANDRA-9608 -`_). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0. - -**Note**: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use. - -Support Matrix -^^^^^^^^^^^^^^ - -The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis. - -Table 1 : Support Matrix for Java - -+---------------+--------------+-----------------+ -| | Java 8 (Run) | Java 11 (Run) | -+---------------+--------------+-----------------+ -| Java 8 (Build)|Supported |Supported | -+---------------+--------------+-----------------+ -| Java 11(Build)| Not Supported|Supported | -+---------------+--------------+-----------------+ - -Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0. - -Using Java 8 to Build -^^^^^^^^^^^^^^^^^^^^^ - -To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows: - -:: - -$ sudo yum install java-1.8.0-openjdk-devel - -Set ``JAVA_HOME`` and ``JRE_HOME`` environment variables in the shell bash script. First, open the bash script: - -:: - -$ sudo vi ~/.bashrc - -Set the environment variables including the ``PATH``. - -:: - - $ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies. - -:: - - $ git clone https://github.com/apache/cassandra.git - -If Cassandra is already running stop Cassandra with the following command. - -:: - - [ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon - -Build the source code from the ``cassandra`` directory, which has the ``build.xml`` build script. The Apache Ant uses the Java version set in the ``JAVA_HOME`` environment variable. - -:: - - $ cd ~/cassandra - $ ant - -Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for ``CASSANDRA_HOME`` in the bash script. Also add the ``CASSANDRA_HOME/bin`` to the ``PATH`` variable. - -:: - - $ export CASSANDRA_HOME=~/cassandra - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin - -To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the ``CASSANDRA_HOME/bin`` directory, which is in the ``PATH`` env variable. - -:: - - $ cassandra - -The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet: - -:: - - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3- - 146.ec2.internal:7000:7001 - INFO [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK - 64-Bit Server VM/11.0.3 - INFO [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size: - 1004.000MiB/1004.000MiB - -The following output indicates a single node Cassandra 4.0 cluster has started. - -:: - - INFO [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on - address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl) - ... - ... - INFO [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any - peers but continuing anyway since node is in its own seed list - INFO [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state - INFO [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip - INFO [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled, - when pool is exhausted (max is 251.000MiB) it will allocate on heap - INFO [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto - bootstrap because it is configured to be a seed node. - ... - ... - INFO [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring - INFO [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state - jump to NORMAL - -Using Java 11 to Build -^^^^^^^^^^^^^^^^^^^^^^ -If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command. - -:: - - $ yum install java-11-openjdk-devel - -Set the environment variables in the bash script for Java 11. The first command is to open the bash script. - -:: - - $ sudo vi ~/.bashrc - $ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk - $ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre - $ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin - -To build source code with Java 11 one of the following two options must be used. - - 1. Include Apache Ant command-line option ``-Duse.jdk=11`` as follows: - :: - - $ ant -Duse.jdk=11 - - 2. Set environment variable ``CASSANDRA_USE_JDK11`` to ``true``: - :: - - $ export CASSANDRA_USE_JDK11=true - -As an example, set the environment variable ``CASSANDRA_USE_JDK11`` to ``true``. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - -Or, set the command-line option. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true - -The build output should include the following. - -:: - - _build_java: - [echo] Compiling for Java 11 - ... - ... - build: - - _main-jar: - [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar - ... - ... - _build-test: - [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes - [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes - ... - ... - jar: - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF - [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar - [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF - [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar - - BUILD SUCCESSFUL - Total time: 1 minute 3 seconds - [ec2-user@ip-172-30-3-146 cassandra]$ - -Common Issues -^^^^^^^^^^^^^^ -One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output. - -:: - - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must - be set when building from java 11 - Total time: 1 second - [ec2-user@ip-172-30-3-146 cassandra]$ - -The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - [ec2-user@ip-172-30-3-146 ~]$ cassandra - ... - ... - Error: A JNI error has occurred, please check your installation and try again - Exception in thread "main" java.lang.UnsupportedClassVersionError: - org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of - the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes - class file versions up to 52.0 - at java.lang.ClassLoader.defineClass1(Native Method) - at java.lang.ClassLoader.defineClass(ClassLoader.java:763) - at ... - ... - -The ``CASSANDRA_USE_JDK11`` variable or the command-line option ``-Duse.jdk11`` cannot be used to build with Java 8. To demonstrate set ``JAVA_HOME`` to version 8. - -:: - - [root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com - Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51 - [ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME - /usr/lib/jvm/java-1.8.0-openjdk - -Set the ``CASSANDRA_USE_JDK11=true`` or command-line option ``-Duse.jdk11=true``. Subsequently, run Apache Ant to start the build. The build fails with error message listed. - -:: - - [ec2-user@ip-172-30-3-146 ~]$ cd - cassandra - [ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true - [ec2-user@ip-172-30-3-146 cassandra]$ ant - Buildfile: /home/ec2-user/cassandra/build.xml - - validate-build-conf: - - BUILD FAILED - /home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot - be set when building from java 8 - - Total time: 0 seconds - diff --git a/src/doc/4.0-beta1/_sources/new/messaging.rst.txt b/src/doc/4.0-beta1/_sources/new/messaging.rst.txt deleted file mode 100644 index 755c9d106..000000000 --- a/src/doc/4.0-beta1/_sources/new/messaging.rst.txt +++ /dev/null @@ -1,257 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Improved Internode Messaging ------------------------------- - - -Apache Cassandra 4.0 has added several new improvements to internode messaging. - -Optimized Internode Messaging Protocol -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The internode messaging protocol has been optimized (`CASSANDRA-14485 -`_). Previously the ``IPAddressAndPort`` of the sender was included with each message that was sent even though the ``IPAddressAndPort`` had already been sent once when the initial connection/session was established. In Cassandra 4.0 ``IPAddressAndPort`` has been removed from every separate message sent and only sent when connection/session is initiated. - -Another improvement is that at several instances (listed) a fixed 4-byte integer value has been replaced with ``vint`` as a ``vint`` is almost always less than 1 byte: - -- The ``paramSize`` (the number of parameters in the header) -- Each individual parameter value -- The ``payloadSize`` - - -NIO Messaging -^^^^^^^^^^^^^^^ -In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to non-blocking I/O (NIO) via Netty (`CASSANDRA-8457 -`_). - -As serialization format, each message contains a header with several fixed fields, an optional key-value parameters section, and then the message payload itself. Note: the IP address in the header may be either IPv4 (4 bytes) or IPv6 (16 bytes). - - The diagram below shows the IPv4 address for brevity. - -:: - - 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6 - 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | PROTOCOL MAGIC | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Message ID | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Timestamp | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Addr len | IP Address (IPv4) / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Verb / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Parameters size / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | Parameter data / - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - / | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Payload size | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | / - / Payload / - / | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -An individual parameter has a String key and a byte array value. The key is serialized with its length, encoded as two bytes, followed by the UTF-8 byte encoding of the string. The body is serialized with its length, encoded as four bytes, followed by the bytes of the value. - -Resource limits on Queued Messages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -System stability is improved by enforcing strict resource limits (`CASSANDRA-15066 -`_) on the number of outbound messages that are queued, measured by the ``serializedSize`` of the message. There are three separate limits imposed simultaneously to ensure that progress is always made without any reasonable combination of failures impacting a node’s stability. - -1. Global, per-endpoint and per-connection limits are imposed on messages queued for delivery to other nodes and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire size of the message being sent or received. -2. The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. Each node-pair has three links: urgent, small and large. So any given node may have a maximum of ``N*3 * (internode_application_send_queue_capacity_in_bytes + internode_application_receive_queue_capacity_in_bytes)`` messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens nodes should need to communicate with significant bandwidth. -3. The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, on all links to or from a single node in the cluster. The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, on all links to or from any node in the cluster. The following configuration settings have been added to ``cassandra.yaml`` for resource limits on queued messages. - -:: - - internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB - internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB - internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB - internode_application_receive_queue_capacity_in_bytes: 4194304 #4MiB - internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB - internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB - -Virtual Tables for Messaging Metrics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Metrics is improved by keeping metrics using virtual tables for inter-node inbound and outbound messaging (`CASSANDRA-15066 -`_). For inbound messaging a virtual table (``internode_inbound``) has been added to keep metrics for: - -- Bytes and count of messages that could not be serialized or flushed due to an error -- Bytes and count of messages scheduled -- Bytes and count of messages successfully processed -- Bytes and count of messages successfully received -- Nanos and count of messages throttled -- Bytes and count of messages expired -- Corrupt frames recovered and unrecovered - -A separate virtual table (``internode_outbound``) has been added for outbound inter-node messaging. The outbound virtual table keeps metrics for: - -- Bytes and count of messages pending -- Bytes and count of messages sent -- Bytes and count of messages expired -- Bytes and count of messages that could not be sent due to an error -- Bytes and count of messages overloaded -- Active Connection Count -- Connection Attempts -- Successful Connection Attempts - -Hint Messaging -^^^^^^^^^^^^^^ - -A specialized version of hint message that takes an already encoded in a ``ByteBuffer`` hint and sends it verbatim has been added. It is an optimization for when dispatching a hint file of the current messaging version to a node of the same messaging version, which is the most common case. It saves on extra ``ByteBuffer`` allocations one redundant hint deserialization-serialization cycle. - -Internode Application Timeout -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A configuration setting has been added to ``cassandra.yaml`` for the maximum continuous period a connection may be unwritable in application space. - -:: - -# internode_application_timeout_in_ms = 30000 - -Some other new features include logging of message size to trace message for tracing a query. - -Paxos prepare and propose stage for local requests optimized -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In pre-4.0 Paxos prepare and propose messages always go through entire ``MessagingService`` stack in Cassandra even if request is to be served locally, we can enhance and make local requests severed w/o involving ``MessagingService``. Similar things are done elsewhere in Cassandra which skips ``MessagingService`` stage for local requests. - -This is what it looks like in pre 4.0 if we have tracing on and run a light-weight transaction: - -:: - - Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11 - 21:55:18.971000 | A.B.C.D | 15045 - … REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] | - 2017-09-11 21:55:18.976000 | A.B.C.D | 20270 - … Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 | - A.B.C.D | 20372 - -Same thing applies for Propose stage as well. - -In version 4.0 Paxos prepare and propose stage for local requests are optimized (`CASSANDRA-13862 -`_). - -Quality Assurance -^^^^^^^^^^^^^^^^^ - -Several other quality assurance improvements have been made in version 4.0 (`CASSANDRA-15066 -`_). - -Framing -******* -Version 4.0 introduces framing to all internode messages, i.e. the grouping of messages into a single logical payload with headers and trailers; these frames are guaranteed to either contain at most one message, that is split into its own unique sequence of frames (for large messages), or that a frame contains only complete messages. - -Corruption prevention -********************* -Previously, intra-datacenter internode messages would be unprotected from corruption by default, as only LZ4 provided any integrity checks. All messages to post 4.0 nodes are written to explicit frames, which may be: - -- LZ4 encoded -- CRC protected - -The Unprotected option is still available. - -Resilience -********** -For resilience, all frames are written with a separate CRC protected header, of 8 and 6 bytes respectively. If corruption occurs in this header, the connection must be reset, as before. If corruption occurs anywhere outside of the header, the corrupt frame will be skipped, leaving the connection intact and avoiding the loss of any messages unnecessarily. - -Previously, any issue at any point in the stream would result in the connection being reset, with the loss of any in-flight messages. - -Efficiency -********** -The overall memory usage, and number of byte shuffles, on both inbound and outbound messages is reduced. - -Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB), that is filled before any compressed frame can be produced. Our frame encoders avoid this redundant copy, as well as freeing 192KiB per endpoint. - -Inbound, frame decoders guarantee only to copy the number of bytes necessary to parse a frame, and to never store more bytes than necessary. This improvement applies twice to LZ4 connections, improving both the message decode and the LZ4 frame decode. - -Inbound Path -************ -Version 4.0 introduces several improvements to the inbound path. - -An appropriate message handler is used based on whether large or small messages are expected on a particular connection as set in a flag. ``NonblockingBufferHandler``, running on event loop, is used for small messages, and ``BlockingBufferHandler``, running off event loop, for large messages. The single implementation of ``InboundMessageHandler`` handles messages of any size effectively by deriving size of the incoming message from the byte stream. In addition to deriving size of the message from the stream, incoming message expiration time is proactively read, before attempting to deserialize the entire message. If it’s expired at the time when a message is encountered the message is just skipped in the byte stream altogether. -And if a message fails to be deserialized while still on the receiving side - say, because of table id or column being unknown - bytes are skipped, without dropping the entire connection and losing all the buffered messages. An immediately reply back is sent to the coordinator node with the failure reason, rather than waiting for the coordinator callback to expire. This logic is extended to a corrupted frame; a corrupted frame is safely skipped over without dropping the connection. - -Inbound path imposes strict limits on memory utilization. Specifically, the memory occupied by all parsed, but unprocessed messages is bound - on per-connection, per-endpoint, and global basis. Once a connection exceeds its local unprocessed capacity and cannot borrow any permits from per-endpoint and global reserve, it simply stops processing further messages, providing natural backpressure - until sufficient capacity is regained. - -Outbound Connections -******************** - -Opening a connection -++++++++++++++++++++ -A consistent approach is adopted for all kinds of failure to connect, including: refused by endpoint, incompatible versions, or unexpected exceptions; - -- Retry forever, until either success or no messages waiting to deliver. -- Wait incrementally longer periods before reconnecting, up to a maximum of 1s. -- While failing to connect, no reserve queue limits are acquired. - -Closing a connection -++++++++++++++++++++ -- Correctly drains outbound messages that are waiting to be delivered (unless disconnected and fail to reconnect). -- Messages written to a closing connection are either delivered or rejected, with a new connection being opened if the old is irrevocably closed. -- Unused connections are pruned eventually. - -Reconnecting -++++++++++++ - -We sometimes need to reconnect a perfectly valid connection, e.g. if the preferred IP address changes. We ensure that the underlying connection has no in-progress operations before closing it and reconnecting. - -Message Failure -++++++++++++++++ -Propagates to callbacks instantly, better preventing overload by reclaiming committed memory. - -Expiry -~~~~~~~~ -- No longer experiences head-of-line blocking (e.g. undroppable message preventing all droppable messages from being expired). -- While overloaded, expiry is attempted eagerly on enqueuing threads. -- While disconnected we schedule regular pruning, to handle the case where messages are no longer being sent, but we have a large backlog to expire. - -Overload -~~~~~~~~~ -- Tracked by bytes queued, as opposed to number of messages. - -Serialization Errors -~~~~~~~~~~~~~~~~~~~~~ -- Do not result in the connection being invalidated; the message is simply completed with failure, and then erased from the frame. -- Includes detected mismatch between calculated serialization size to actual. - -Failures to flush to network, perhaps because the connection has been reset are not currently notified to callback handlers, as the necessary information has been discarded, though it would be possible to do so in future if we decide it is worth our while. - -QoS -+++++ -"Gossip" connection has been replaced with a general purpose "Urgent" connection, for any small messages impacting system stability. - -Metrics -+++++++ -We track, and expose via Virtual Table and JMX, the number of messages and bytes that: we could not serialize or flush due to an error, we dropped due to overload or timeout, are pending, and have successfully sent. - -Added a Message size limit -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra pre-4.0 doesn't protect the server from allocating huge buffers for the inter-node Message objects. Adding a message size limit would be good to deal with issues such as a malfunctioning cluster participant. Version 4.0 introduced max message size config param, akin to max mutation size - set to endpoint reserve capacity by default. - -Recover from unknown table when deserializing internode messages -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -As discussed in (`CASSANDRA-9289 -`_) it would be nice to gracefully recover from seeing an unknown table in a message from another node. Pre-4.0, we close the connection and reconnect, which can cause other concurrent queries to fail. -Version 4.0 fixes the issue by wrapping message in-stream with -``TrackedDataInputPlus``, catching -``UnknownCFException``, and skipping the remaining bytes in this message. TCP won't be closed and it will remain connected for other messages. diff --git a/src/doc/4.0-beta1/_sources/new/streaming.rst.txt b/src/doc/4.0-beta1/_sources/new/streaming.rst.txt deleted file mode 100644 index 1807eb402..000000000 --- a/src/doc/4.0-beta1/_sources/new/streaming.rst.txt +++ /dev/null @@ -1,162 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Improved Streaming ---------------------- - -Apache Cassandra 4.0 has made several improvements to streaming. Streaming is the process used by nodes of a cluster to exchange data in the form of SSTables. Streaming of SSTables is performed for several operations, such as: - -- SSTable Repair -- Host Replacement -- Range movements -- Bootstrapping -- Rebuild -- Cluster expansion - -Streaming based on Netty -^^^^^^^^^^^^^^^^^^^^^^^^ - -Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO) with Netty (`CASSANDRA-12229 -`_). It replaces the single-threaded (or sequential), synchronous, blocking model of streaming messages and transfer of files. Netty supports non-blocking, asynchronous, multi-threaded streaming with which multiple connections are opened simultaneously. Non-blocking implies that threads are not blocked as they don’t wait for a response for a sent request. A response could be returned in a different thread. With asynchronous, connections and threads are decoupled and do not have a 1:1 relation. Several more connections than threads may be opened. - -Zero Copy Streaming -^^^^^^^^^^^^^^^^^^^^ - -Pre-4.0, during streaming Cassandra reifies the SSTables into objects. This creates unnecessary garbage and slows down the whole streaming process as some SSTables can be transferred as a whole file rather than individual partitions. Cassandra 4.0 has added support for streaming entire SSTables when possible (`CASSANDRA-14556 -`_) for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use ZeroCopy for eligible SSTables significantly speeding up transfers and increasing throughput. A zero-copy path avoids bringing data into user-space on both sending and receiving side. Any streaming related operations will notice corresponding improvement. Zero copy streaming is hardware bound; only limited by the hardware limitations (Network and Disk IO ). - -High Availability -***************** -In benchmark tests Zero Copy Streaming is 5x faster than partitions based streaming. Faster streaming provides the benefit of improved availability. A cluster’s recovery mainly depends on the streaming speed, Cassandra clusters with failed nodes will be able to recover much more quickly (5x faster). If a node fails, SSTables need to be streamed to a replacement node. During the replacement operation, the new Cassandra node streams SSTables from the neighboring nodes that hold copies of the data belonging to this new node’s token range. Depending on the amount of data stored, this process can require substantial network bandwidth, taking some time to complete. The longer these range movement operations take, the more the cluster availability is lost. Failure of multiple nodes would reduce high availability greatly. The faster the new node completes streaming its data, the faster it can serve traffic, increasing the availability of the cluster. - -Enabling Zero Copy Streaming -***************************** -Zero copy streaming is enabled by setting the following setting in ``cassandra.yaml``. - -:: - - stream_entire_sstables: true - -By default zero copy streaming is enabled. - -SSTables Eligible for Zero Copy Streaming -***************************************** -Zero copy streaming is used if all partitions within the SSTable need to be transmitted. This is common when using ``LeveledCompactionStrategy`` or when partitioning SSTables by token range has been enabled. All partition keys in the SSTables are iterated over to determine the eligibility for Zero Copy streaming. - -Benefits of Zero Copy Streaming -******************************** -When enabled, it permits Cassandra to zero-copy stream entire eligible SSTables between nodes, including every component. This speeds up the network transfer significantly subject to throttling specified by ``stream_throughput_outbound_megabits_per_sec``. - -Enabling this will reduce the GC pressure on sending and receiving node. While this feature tries to keep the disks balanced, it cannot guarantee it. This feature will be automatically disabled if internode encryption is enabled. Currently this can be used with Leveled Compaction. - -Configuring for Zero Copy Streaming -************************************ -Throttling would reduce the streaming speed. The ``stream_throughput_outbound_megabits_per_sec`` throttles all outbound streaming file transfers on a node to the given total throughput in Mbps. When unset, the default is 200 Mbps or 25 MB/s. - -:: - - stream_throughput_outbound_megabits_per_sec: 200 - -To run any Zero Copy streaming benchmark the ``stream_throughput_outbound_megabits_per_sec`` must be set to a really high value otherwise, throttling will be significant and the benchmark results will not be meaningful. - -The ``inter_dc_stream_throughput_outbound_megabits_per_sec`` throttles all streaming file transfer between the datacenters, this setting allows users to throttle inter dc stream throughput in addition to throttling all network stream traffic as configured with ``stream_throughput_outbound_megabits_per_sec``. When unset, the default is 200 Mbps or 25 MB/s. - -:: - - inter_dc_stream_throughput_outbound_megabits_per_sec: 200 - -SSTable Components Streamed with Zero Copy Streaming -***************************************************** -Zero Copy Streaming streams entire SSTables. SSTables are made up of multiple components in separate files. SSTable components streamed are listed in Table 1. - -Table 1. SSTable Components - -+------------------+---------------------------------------------------+ -|SSTable Component | Description | -+------------------+---------------------------------------------------+ -| Data.db |The base data for an SSTable: the remaining | -| |components can be regenerated based on the data | -| |component. | -+------------------+---------------------------------------------------+ -| Index.db |Index of the row keys with pointers to their | -| |positions in the data file. | -+------------------+---------------------------------------------------+ -| Filter.db |Serialized bloom filter for the row keys in the | -| |SSTable. | -+------------------+---------------------------------------------------+ -|CompressionInfo.db|File to hold information about uncompressed | -| |data length, chunk offsets etc. | -+------------------+---------------------------------------------------+ -| Statistics.db |Statistical metadata about the content of the | -| |SSTable. | -+------------------+---------------------------------------------------+ -| Digest.crc32 |Holds CRC32 checksum of the data file | -| |size_bytes. | -+------------------+---------------------------------------------------+ -| CRC.db |Holds the CRC32 for chunks in an uncompressed file.| -+------------------+---------------------------------------------------+ -| Summary.db |Holds SSTable Index Summary | -| |(sampling of Index component) | -+------------------+---------------------------------------------------+ -| TOC.txt |Table of contents, stores the list of all | -| |components for the SSTable. | -+------------------+---------------------------------------------------+ - -Custom component, used by e.g. custom compaction strategy may also be included. - -Repair Streaming Preview -^^^^^^^^^^^^^^^^^^^^^^^^ - -Repair with ``nodetool repair`` involves streaming of repaired SSTables and a repair preview has been added to provide an estimate of the amount of repair streaming that would need to be performed. Repair preview (`CASSANDRA-13257 -`_) is invoke with ``nodetool repair --preview`` using option: - -:: - --prv, --preview - -It determines ranges and amount of data to be streamed, but doesn't actually perform repair. - -Parallelizing of Streaming of Keyspaces -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The streaming of the different keyspaces for bootstrap and rebuild has been parallelized in Cassandra 4.0 (`CASSANDRA-4663 -`_). - -Unique nodes for Streaming in Multi-DC deployment -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Range Streamer picks unique nodes to stream data from when number of replicas in each DC is three or more (`CASSANDRA-4650 -`_). What the optimization does is to even out the streaming load across the cluster. Without the optimization, some node can be picked up to stream more data than others. This patch allows to select dedicated node to stream only one range. - -This will increase the performance of bootstrapping a node and will also put less pressure on nodes serving the data. This does not affect if N < 3 in each DC as then it streams data from only 2 nodes. - -Stream Operation Types -^^^^^^^^^^^^^ - -It is important to know the type or purpose of a certain stream. Version 4.0 (`CASSANDRA-13064 -`_) adds an ``enum`` to distinguish between the different types of streams. Stream types are available both in a stream request and a stream task. The different stream types are: - -- Restore replica count -- Unbootstrap -- Relocation -- Bootstrap -- Rebuild -- Bulk Load -- Repair - -Disallow Decommission when number of Replicas will drop below configured RF -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`CASSANDRA-12510 -`_ guards against decommission that will drop # of replicas below configured replication factor (RF), and adds the ``--force`` option that allows decommission to continue if intentional; force decommission of this node even when it reduces the number of replicas to below configured RF. diff --git a/src/doc/4.0-beta1/_sources/new/transientreplication.rst.txt b/src/doc/4.0-beta1/_sources/new/transientreplication.rst.txt deleted file mode 100644 index 438f43797..000000000 --- a/src/doc/4.0-beta1/_sources/new/transientreplication.rst.txt +++ /dev/null @@ -1,155 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Transient Replication ---------------------- - -**Note**: - -Transient Replication (`CASSANDRA-14404 -`_) is an experimental feature designed for expert Apache Cassandra users who are able to validate every aspect of the database for their application and deployment. -That means being able to check that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, configuration, operational practices, and availability requirements. -Apache Cassandra 4.0 has the initial implementation of transient replication. Future releases of Cassandra will make this feature suitable for a wider audience. -It is anticipated that a future version will support monotonic reads with transient replication as well as LWT, logged batches, and counters. Being experimental, Transient replication is **not** recommended for production use. - -Objective -^^^^^^^^^ - -The objective of transient replication is to decouple storage requirements from data redundancy (or consensus group size) using incremental repair, in order to reduce storage overhead. -Certain nodes act as full replicas (storing all the data for a given token range), and some nodes act as transient replicas, storing only unrepaired data for the same token ranges. - -The optimization that is made possible with transient replication is called "Cheap quorums", which implies that data redundancy is increased without corresponding increase in storage usage. - -Transient replication is useful when sufficient full replicas are unavailable to receive and store all the data. -Transient replication allows you to configure a subset of replicas to only replicate data that hasn't been incrementally repaired. -As an optimization, we can avoid writing data to a transient replica if we have successfully written data to the full replicas. - -After incremental repair, transient data stored on transient replicas can be discarded. - -Enabling Transient Replication -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Transient replication is not enabled by default. Transient replication must be enabled on each node in a cluster separately by setting the following configuration property in ``cassandra.yaml``. - -:: - - enable_transient_replication: true - -Transient replication may be configured with both ``SimpleStrategy`` and ``NetworkTopologyStrategy``. Transient replication is configured by setting replication factor as ``/``. - -As an example, create a keyspace with replication factor (RF) 3. - -:: - - CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy', - 'replication_factor' : 4/1}; - - -As another example, ``some_keysopace keyspace`` will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient: - -:: - - CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy', - 'DC1' : '3/1'', 'DC2' : '5/2'}; - -Transiently replicated keyspaces only support tables with ``read_repair`` set to ``NONE``. - -Important Restrictions: - -- RF cannot be altered while some endpoints are not in a normal state (no range movements). -- You can't add full replicas if there are any transient replicas. You must first remove all transient replicas, then change the # of full replicas, then add back the transient replicas. -- You can only safely increase number of transients one at a time with incremental repair run in between each time. - - -Additionally, transient replication cannot be used for: - -- Monotonic Reads -- Lightweight Transactions (LWTs) -- Logged Batches -- Counters -- Keyspaces using materialized views -- Secondary indexes (2i) - -Cheap Quorums -^^^^^^^^^^^^^ - -Cheap quorums are a set of optimizations on the write path to avoid writing to transient replicas unless sufficient full replicas are not available to satisfy the requested consistency level. -Hints are never written for transient replicas. Optimizations on the read path prefer reading from transient replicas. -When writing at quorum to a table configured to use transient replication the quorum will always prefer available full -replicas over transient replicas so that transient replicas don't have to process writes. Tail latency is reduced by -rapid write protection (similar to rapid read protection) when full replicas are slow or unavailable by sending writes -to transient replicas. Transient replicas can serve reads faster as they don't have to do anything beyond bloom filter -checks if they have no data. With vnodes and large cluster sizes they will not have a large quantity of data -even for failure of one or more full replicas where transient replicas start to serve a steady amount of write traffic -for some of their transiently replicated ranges. - -Speculative Write Option -^^^^^^^^^^^^^^^^^^^^^^^^ -The ``CREATE TABLE`` adds an option ``speculative_write_threshold`` for use with transient replicas. The option is of type ``simple`` with default value as ``99PERCENTILE``. When replicas are slow or unresponsive ``speculative_write_threshold`` specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas. - - -Pending Ranges and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Pending ranges refers to the movement of token ranges between transient replicas. When a transient range is moved, there -will be a period of time where both transient replicas would need to receive any write intended for the logical -transient replica so that after the movement takes effect a read quorum is able to return a response. Nodes are *not* -temporarily transient replicas during expansion. They stream data like a full replica for the transient range before they -can serve reads. A pending state is incurred similar to how there is a pending state for full replicas. Transient replicas -also always receive writes when they are pending. Pending transient ranges are sent a bit more data and reading from -them is avoided. - - -Read Repair and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Read repair never attempts to repair a transient replica. Reads will always include at least one full replica. -They should also prefer transient replicas where possible. Range scans ensure the entire scanned range performs -replica selection that satisfies the requirement that every range scanned includes one full replica. During incremental -& validation repair handling, at transient replicas anti-compaction does not output any data for transient ranges as the -data will be dropped after repair, and transient replicas never have data streamed to them. - - -Transitioning between Full Replicas and Transient Replicas -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The additional state transitions that transient replication introduces requires streaming and ``nodetool cleanup`` to -behave differently. When data is streamed it is ensured that it is streamed from a full replica and not a transient replica. - -Transitioning from not replicated to transiently replicated means that a node must stay pending until the next incremental -repair completes at which point the data for that range is known to be available at full replicas. - -Transitioning from transiently replicated to fully replicated requires streaming from a full replica and is identical -to how data is streamed when transitioning from not replicated to replicated. The transition is managed so the transient -replica is not read from as a full replica until streaming completes. It can be used immediately for a write quorum. - -Transitioning from fully replicated to transiently replicated requires cleanup to remove repaired data from the transiently -replicated range to reclaim space. It can be used immediately for a write quorum. - -Transitioning from transiently replicated to not replicated requires cleanup to be run to remove the formerly transiently replicated data. - -When transient replication is in use ring changes are supported including add/remove node, change RF, add/remove DC. - - -Transient Replication supports EACH_QUORUM -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -(`CASSANDRA-14727 -`_) adds support for Transient Replication support for ``EACH_QUORUM``. Per (`CASSANDRA-14768 -`_), we ensure we write to at least a ``QUORUM`` of nodes in every DC, -regardless of how many responses we need to wait for and our requested consistency level. This is to minimally surprise -users with transient replication; with normal writes, we soft-ensure that we reach ``QUORUM`` in all DCs we are able to, -by writing to every node; even if we don't wait for ACK, we have in both cases sent sufficient messages. diff --git a/src/doc/4.0-beta1/_sources/new/virtualtables.rst.txt b/src/doc/4.0-beta1/_sources/new/virtualtables.rst.txt deleted file mode 100644 index 1a39dc678..000000000 --- a/src/doc/4.0-beta1/_sources/new/virtualtables.rst.txt +++ /dev/null @@ -1,342 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Virtual Tables --------------- - -Apache Cassandra 4.0 implements virtual tables (`CASSANDRA-7622 -`_). - -Definition -^^^^^^^^^^ - -A virtual table is a table that is backed by an API instead of data explicitly managed and stored as SSTables. Apache Cassandra 4.0 implements a virtual keyspace interface for virtual tables. Virtual tables are specific to each node. - -Objective -^^^^^^^^^ - -A virtual table could have several uses including: - -- Expose metrics through CQL -- Expose YAML configuration information - -How are Virtual Tables different from regular tables? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables and virtual keyspaces are quite different from regular tables and keyspaces respectively such as: - -- Virtual tables are read-only, but it is likely to change -- Virtual tables are not replicated -- Virtual tables are local only and non distributed -- Virtual tables have no associated SSTables -- Consistency level of the queries sent virtual tables are ignored -- Virtual tables are managed by Cassandra and a user cannot run DDL to create new virtual tables or DML to modify existing virtual tables -- Virtual tables are created in special keyspaces and not just any keyspace -- All existing virtual tables use ``LocalPartitioner``. Since a virtual table is not replicated the partitioner sorts in order of partition keys instead of by their hash. -- Making advanced queries with ``ALLOW FILTERING`` and aggregation functions may be used with virtual tables even though in normal tables we dont recommend it - -Virtual Keyspaces -^^^^^^^^^^^^^^^^^ - -Apache Cassandra 4.0 has added two new keyspaces for virtual tables: ``system_virtual_schema`` and ``system_views``. Run the following command to list the keyspaces: - -:: - - cqlsh> DESC KEYSPACES; - system_schema system system_distributed system_virtual_schema - system_auth system_traces system_views - -The ``system_virtual_schema keyspace`` contains schema information on virtual tables. The ``system_views`` keyspace contains the actual virtual tables. - -Virtual Table Limitations -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables and virtual keyspaces have some limitations initially though some of these could change such as: - -- Cannot alter or drop virtual keyspaces or tables -- Cannot truncate virtual tables -- Expiring columns are not supported by virtual tables -- Conditional updates are not supported by virtual tables -- Cannot create tables in virtual keyspaces -- Cannot perform any operations against virtual keyspace -- Secondary indexes are not supported on virtual tables -- Cannot create functions in virtual keyspaces -- Cannot create types in virtual keyspaces -- Materialized views are not supported on virtual tables -- Virtual tables don't support ``DELETE`` statements -- Cannot ``CREATE TRIGGER`` against a virtual table -- Conditional ``BATCH`` statements cannot include mutations for virtual tables -- Cannot include a virtual table statement in a logged batch -- Mutations for virtual and regular tables cannot exist in the same batch -- Conditional ``BATCH`` statements cannot include mutations for virtual tables -- Cannot create aggregates in virtual keyspaces; but may run aggregate functions on select - -Listing and Describing Virtual Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Virtual tables in a virtual keyspace may be listed with ``DESC TABLES``. The ``system_views`` virtual keyspace tables include the following: - -:: - - cqlsh> USE system_views; - cqlsh:system_views> DESC TABLES; - coordinator_scans clients tombstones_scanned internode_inbound - disk_usage sstable_tasks live_scanned caches - local_writes max_partition_size local_reads - coordinator_writes internode_outbound thread_pools - local_scans coordinator_reads settings - -Some of the salient virtual tables in ``system_views`` virtual keyspace are described in Table 1. - -Table 1 : Virtual Tables in system_views - -+------------------+---------------------------------------------------+ -|Virtual Table | Description | -+------------------+---------------------------------------------------+ -| clients |Lists information about all connected clients. | -+------------------+---------------------------------------------------+ -| disk_usage |Disk usage including disk_space, keyspace_name, | -| |and table_name by system keyspaces. | -+------------------+---------------------------------------------------+ -| local_writes |A table metric for local writes | -| |including count, keyspace_name, | -| |max, median, per_second, and | -| |table_name. | -+------------------+---------------------------------------------------+ -| caches |Displays the general cache information including | -| |cache name, capacity_bytes, entry_count, hit_count,| -| |hit_ratio double, recent_hit_rate_per_second, | -| |recent_request_rate_per_second, request_count, and | -| |size_bytes. | -+------------------+---------------------------------------------------+ -| local_reads |A table metric for local reads information. | -+------------------+---------------------------------------------------+ -| sstable_tasks |Lists currently running tasks such as compactions | -| |and upgrades on SSTables. | -+------------------+---------------------------------------------------+ -|internode_inbound |Lists information about the inbound | -| |internode messaging. | -+------------------+---------------------------------------------------+ -| thread_pools |Lists metrics for each thread pool. | -+------------------+---------------------------------------------------+ -| settings |Displays configuration settings in cassandra.yaml. | -+------------------+---------------------------------------------------+ -|max_partition_size|A table metric for maximum partition size. | -+------------------+---------------------------------------------------+ -|internode_outbound|Information about the outbound internode messaging.| -| | | -+------------------+---------------------------------------------------+ - -We shall discuss some of the virtual tables in more detail next. - -Clients Virtual Table -********************* - -The ``clients`` virtual table lists all active connections (connected clients) including their ip address, port, connection stage, driver name, driver version, hostname, protocol version, request count, ssl enabled, ssl protocol and user name: - -:: - - cqlsh:system_views> select * from system_views.clients; - address | port | connection_stage | driver_name | driver_version | hostname | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username - -----------+-------+------------------+-------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+----------- - 127.0.0.1 | 50628 | ready | null | null | localhost | 4 | 55 | null | False | null | anonymous - 127.0.0.1 | 50630 | ready | null | null | localhost | 4 | 70 | null | False | null | anonymous - - (2 rows) - -Some examples of how ``clients`` can be used are: - -- To find applications using old incompatible versions of drivers before upgrading and with ``nodetool enableoldprotocolversions`` and ``nodetool disableoldprotocolversions`` during upgrades. -- To identify clients sending too many requests. -- To find if SSL is enabled during the migration to and from ssl. - - -The virtual tables may be described with ``DESCRIBE`` statement. The DDL listed however cannot be run to create a virtual table. As an example describe the ``system_views.clients`` virtual table: - -:: - - cqlsh:system_views> DESC TABLE system_views.clients; - CREATE TABLE system_views.clients ( - address inet, - connection_stage text, - driver_name text, - driver_version text, - hostname text, - port int, - protocol_version int, - request_count bigint, - ssl_cipher_suite text, - ssl_enabled boolean, - ssl_protocol text, - username text, - PRIMARY KEY (address, port)) WITH CLUSTERING ORDER BY (port ASC) - AND compaction = {'class': 'None'} - AND compression = {}; - -Caches Virtual Table -******************** -The ``caches`` virtual table lists information about the caches. The four caches presently created are chunks, counters, keys and rows. A query on the ``caches`` virtual table returns the following details: - -:: - - cqlsh:system_views> SELECT * FROM system_views.caches; - name | capacity_bytes | entry_count | hit_count | hit_ratio | recent_hit_rate_per_second | recent_request_rate_per_second | request_count | size_bytes - ---------+----------------+-------------+-----------+-----------+----------------------------+--------------------------------+---------------+------------ - chunks | 229638144 | 29 | 166 | 0.83 | 5 | 6 | 200 | 475136 - counters | 26214400 | 0 | 0 | NaN | 0 | 0 | 0 | 0 - keys | 52428800 | 14 | 124 | 0.873239 | 4 | 4 | 142 | 1248 - rows | 0 | 0 | 0 | NaN | 0 | 0 | 0 | 0 - - (4 rows) - -Settings Virtual Table -********************** -The ``settings`` table is rather useful and lists all the current configuration settings from the ``cassandra.yaml``. The encryption options are overridden to hide the sensitive truststore information or passwords. The configuration settings however cannot be set using DML on the virtual table presently: -:: - - cqlsh:system_views> SELECT * FROM system_views.settings; - - name | value - -------------------------------------+-------------------- - allocate_tokens_for_keyspace | null - audit_logging_options_enabled | false - auto_snapshot | true - automatic_sstable_upgrade | false - cluster_name | Test Cluster - enable_transient_replication | false - hinted_handoff_enabled | true - hints_directory | /home/ec2-user/cassandra/data/hints - incremental_backups | false - initial_token | null - ... - ... - ... - rpc_address | localhost - ssl_storage_port | 7001 - start_native_transport | true - storage_port | 7000 - stream_entire_sstables | true - (224 rows) - - -The ``settings`` table can be really useful if yaml file has been changed since startup and dont know running configuration, or to find if they have been modified via jmx/nodetool or virtual tables. - - -Thread Pools Virtual Table -************************** - -The ``thread_pools`` table lists information about all thread pools. Thread pool information includes active tasks, active tasks limit, blocked tasks, blocked tasks all time, completed tasks, and pending tasks. A query on the ``thread_pools`` returns following details: - -:: - - cqlsh:system_views> select * from system_views.thread_pools; - - name | active_tasks | active_tasks_limit | blocked_tasks | blocked_tasks_all_time | completed_tasks | pending_tasks - ------------------------------+--------------+--------------------+---------------+------------------------+-----------------+--------------- - AntiEntropyStage | 0 | 1 | 0 | 0 | 0 | 0 - CacheCleanupExecutor | 0 | 1 | 0 | 0 | 0 | 0 - CompactionExecutor | 0 | 2 | 0 | 0 | 881 | 0 - CounterMutationStage | 0 | 32 | 0 | 0 | 0 | 0 - GossipStage | 0 | 1 | 0 | 0 | 0 | 0 - HintsDispatcher | 0 | 2 | 0 | 0 | 0 | 0 - InternalResponseStage | 0 | 2 | 0 | 0 | 0 | 0 - MemtableFlushWriter | 0 | 2 | 0 | 0 | 1 | 0 - MemtablePostFlush | 0 | 1 | 0 | 0 | 2 | 0 - MemtableReclaimMemory | 0 | 1 | 0 | 0 | 1 | 0 - MigrationStage | 0 | 1 | 0 | 0 | 0 | 0 - MiscStage | 0 | 1 | 0 | 0 | 0 | 0 - MutationStage | 0 | 32 | 0 | 0 | 0 | 0 - Native-Transport-Requests | 1 | 128 | 0 | 0 | 130 | 0 - PendingRangeCalculator | 0 | 1 | 0 | 0 | 1 | 0 - PerDiskMemtableFlushWriter_0 | 0 | 2 | 0 | 0 | 1 | 0 - ReadStage | 0 | 32 | 0 | 0 | 13 | 0 - Repair-Task | 0 | 2147483647 | 0 | 0 | 0 | 0 - RequestResponseStage | 0 | 2 | 0 | 0 | 0 | 0 - Sampler | 0 | 1 | 0 | 0 | 0 | 0 - SecondaryIndexManagement | 0 | 1 | 0 | 0 | 0 | 0 - ValidationExecutor | 0 | 2147483647 | 0 | 0 | 0 | 0 - ViewBuildExecutor | 0 | 1 | 0 | 0 | 0 | 0 - ViewMutationStage | 0 | 32 | 0 | 0 | 0 | 0 - -(24 rows) - -Internode Inbound Messaging Virtual Table -***************************************** - -The ``internode_inbound`` virtual table is for the internode inbound messaging. Initially no internode inbound messaging may get listed. In addition to the address, port, datacenter and rack information includes corrupt frames recovered, corrupt frames unrecovered, error bytes, error count, expired bytes, expired count, processed bytes, processed count, received bytes, received count, scheduled bytes, scheduled count, throttled count, throttled nanos, using bytes, using reserve bytes. A query on the ``internode_inbound`` returns following details: - -:: - - cqlsh:system_views> SELECT * FROM system_views.internode_inbound; - address | port | dc | rack | corrupt_frames_recovered | corrupt_frames_unrecovered | - error_bytes | error_count | expired_bytes | expired_count | processed_bytes | - processed_count | received_bytes | received_count | scheduled_bytes | scheduled_count | throttled_count | throttled_nanos | using_bytes | using_reserve_bytes - ---------+------+----+------+--------------------------+----------------------------+- - ---------- - (0 rows) - -SSTables Tasks Virtual Table -**************************** - -The ``sstable_tasks`` could be used to get information about running tasks. It lists following columns: - -:: - - cqlsh:system_views> SELECT * FROM sstable_tasks; - keyspace_name | table_name | task_id | kind | progress | total | unit - ---------------+------------+--------------------------------------+------------+----------+----------+------- - basic | wide2 | c3909740-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 60418761 | 70882110 | bytes - basic | wide2 | c7556770-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 2995623 | 40314679 | bytes - - -As another example, to find how much time is remaining for SSTable tasks, use the following query: - -:: - - SELECT total - progress AS remaining - FROM system_views.sstable_tasks; - -Other Virtual Tables -******************** - -Some examples of using other virtual tables are as follows. - -Find tables with most disk usage: - -:: - - cqlsh> SELECT * FROM disk_usage WHERE mebibytes > 1 ALLOW FILTERING; - - keyspace_name | table_name | mebibytes - ---------------+------------+----------- - keyspace1 | standard1 | 288 - tlp_stress | keyvalue | 3211 - -Find queries on table/s with greatest read latency: - -:: - - cqlsh> SELECT * FROM local_read_latency WHERE per_second > 1 ALLOW FILTERING; - - keyspace_name | table_name | p50th_ms | p99th_ms | count | max_ms | per_second - ---------------+------------+----------+----------+----------+---------+------------ - tlp_stress | keyvalue | 0.043 | 0.152 | 49785158 | 186.563 | 11418.356 - - -The system_virtual_schema keyspace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``system_virtual_schema`` keyspace has three tables: ``keyspaces``, ``columns`` and ``tables`` for the virtual keyspace definitions, virtual table definitions, and virtual column definitions respectively. It is used by Cassandra internally and a user would not need to access it directly. diff --git a/src/doc/4.0-beta1/_sources/operating/audit_logging.rst.txt b/src/doc/4.0-beta1/_sources/operating/audit_logging.rst.txt deleted file mode 100644 index 068209ee8..000000000 --- a/src/doc/4.0-beta1/_sources/operating/audit_logging.rst.txt +++ /dev/null @@ -1,236 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - - - -Audit Logging ------------------- - -Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml. - -- ``BinAuditLogger`` An efficient way to log events to file in a binary format. -- ``FileAuditLogger`` Logs events to ``audit/audit.log`` file using slf4j logger. - -*Recommendation* ``BinAuditLogger`` is a community recommended logger considering the performance - -What does it capture -^^^^^^^^^^^^^^^^^^^^^^^ - -Audit logging captures following events - -- Successful as well as unsuccessful login attempts. - -- All database commands executed via Native protocol (CQL) attempted or successfully executed. - -Limitations -^^^^^^^^^^^ - -Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log. - -What does it log -^^^^^^^^^^^^^^^^^^^ -Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with `|` s to yield the final message. - - - ``user``: User name(if available) - - ``host``: Host IP, where the command is being executed - - ``source ip address``: Source IP address from where the request initiated - - ``source port``: Source port number from where the request initiated - - ``timestamp``: unix time stamp - - ``type``: Type of the request (SELECT, INSERT, etc.,) - - ``category`` - Category of the request (DDL, DML, etc.,) - - ``keyspace`` - Keyspace(If applicable) on which request is targeted to be executed - - ``scope`` - Table/Aggregate name/ function name/ trigger name etc., as applicable - - ``operation`` - CQL command being executed - -How to configure -^^^^^^^^^^^^^^^^^^ -Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using ``nodetool``. - -cassandra.yaml configurations for AuditLog -""""""""""""""""""""""""""""""""""""""""""""" - - ``enabled``: This option enables/ disables audit log - - ``logger``: Class name of the logger/ custom logger. - - ``audit_logs_dir``: Auditlogs directory location, if not set, default to `cassandra.logdir.audit` or `cassandra.logdir` + /audit/ - - ``included_keyspaces``: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces - - ``excluded_keyspaces``: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except `system`, `system_schema` and `system_virtual_schema` - - ``included_categories``: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories - - ``excluded_categories``: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category - - ``included_users``: Comma separated list of users to be included in audit log, default - includes all users - - ``excluded_users``: Comma separated list of users to be excluded from audit log, default - excludes no user - - -List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE - -NodeTool command to enable AuditLog -""""""""""""""""""""""""""""""""""""" -``enableauditlog``: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command. - -:: - - nodetool enableauditlog - -Options -********** - - -``--excluded-categories`` - Comma separated list of Audit Log Categories to be excluded for - audit log. If not set the value from cassandra.yaml will be used - -``--excluded-keyspaces`` - Comma separated list of keyspaces to be excluded for audit log. If - not set the value from cassandra.yaml will be used. - Please remeber that `system`, `system_schema` and `system_virtual_schema` are excluded by default, - if you are overwriting this option via nodetool, - remember to add these keyspaces back if you dont want them in audit logs - -``--excluded-users`` - Comma separated list of users to be excluded for audit log. If not - set the value from cassandra.yaml will be used - -``--included-categories`` - Comma separated list of Audit Log Categories to be included for - audit log. If not set the value from cassandra.yaml will be used - -``--included-keyspaces`` - Comma separated list of keyspaces to be included for audit log. If - not set the value from cassandra.yaml will be used - -``--included-users`` - Comma separated list of users to be included for audit log. If not - set the value from cassandra.yaml will be used - -``--logger`` - Logger name to be used for AuditLogging. Default BinAuditLogger. If - not set the value from cassandra.yaml will be used - - -NodeTool command to disable AuditLog -""""""""""""""""""""""""""""""""""""""" - -``disableauditlog``: Disables AuditLog. - -:: - - nodetool disableuditlog - - - - - - - -NodeTool command to reload AuditLog filters -""""""""""""""""""""""""""""""""""""""""""""" - -``enableauditlog``: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous ``loggername`` and updated filters - -E.g., - -:: - - nodetool enableauditlog --loggername --included-keyspaces - - - -View the contents of AuditLog Files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``auditlogviewer`` is the new tool introduced to help view the contents of binlog file in human readable text format. - -:: - - auditlogviewer [...] [options] - -Options -"""""""" - -``-f,--follow`` - Upon reacahing the end of the log continue indefinitely - waiting for more records -``-r,--roll_cycle`` - How often to roll the log file was rolled. May be - necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, - DAILY). Default HOURLY. - -``-h,--help`` - display this help message - -For example, to dump the contents of audit log files on the console - -:: - - auditlogviewer /logs/cassandra/audit - -Sample output -""""""""""""" - -:: - - LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1" - - - -Configuring BinAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``BinAuditLogger`` as a logger in AuditLogging, set the logger to ``BinAuditLogger`` in cassandra.yaml under ``audit_logging_options`` section. ``BinAuditLogger`` can be futher configued using its advanced options in cassandra.yaml. - - -Adcanced Options for BinAuditLogger -"""""""""""""""""""""""""""""""""""""" - -``block`` - Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to ``true`` so that AuditLog records wont be lost - -``max_queue_weight`` - Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to ``256 * 1024 * 1024`` - -``max_log_size`` - Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to ``16L * 1024L * 1024L * 1024L`` - -``roll_cycle`` - How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to ``"HOURLY"`` - -Configuring FileAuditLogger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use ``FileAuditLogger`` as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log - - -.. code-block:: xml - - - - ${cassandra.logdir}/audit/audit.log - - - ${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip - - 50MB - 30 - 5GB - - - %-5level [%thread] %date{ISO8601} %F:%L - %msg%n - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/operating/backups.rst.txt b/src/doc/4.0-beta1/_sources/operating/backups.rst.txt deleted file mode 100644 index 01cb6c588..000000000 --- a/src/doc/4.0-beta1/_sources/operating/backups.rst.txt +++ /dev/null @@ -1,660 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. -.. highlight:: none - -Backups -------- - -Apache Cassandra stores data in immutable SSTable files. Backups in Apache Cassandra database are backup copies of the database data that is stored as SSTable files. Backups are used for several purposes including the following: - -- To store a data copy for durability -- To be able to restore a table if table data is lost due to node/partition/network failure -- To be able to transfer the SSTable files to a different machine; for portability - -Types of Backups -^^^^^^^^^^^^^^^^ -Apache Cassandra supports two kinds of backup strategies. - -- Snapshots -- Incremental Backups - -A *snapshot* is a copy of a table’s SSTable files at a given time, created via hard links. The DDL to create the table is stored as well. Snapshots may be created by a user or created automatically. -The setting (``snapshot_before_compaction``) in ``cassandra.yaml`` determines if snapshots are created before each compaction. -By default ``snapshot_before_compaction`` is set to false. -Snapshots may be created automatically before keyspace truncation or dropping of a table by setting ``auto_snapshot`` to true (default) in ``cassandra.yaml``. -Truncates could be delayed due to the auto snapshots and another setting in ``cassandra.yaml`` determines how long the coordinator should wait for truncates to complete. -By default Cassandra waits 60 seconds for auto snapshots to complete. - -An *incremental backup* is a copy of a table’s SSTable files created by a hard link when memtables are flushed to disk as SSTables. -Typically incremental backups are paired with snapshots to reduce the backup time as well as reduce disk space. -Incremental backups are not enabled by default and must be enabled explicitly in ``cassandra.yaml`` (with ``incremental_backups`` setting) or with the Nodetool. -Once enabled, Cassandra creates a hard link to each SSTable flushed or streamed locally in a ``backups/`` subdirectory of the keyspace data. Incremental backups of system tables are also created. - -Data Directory Structure -^^^^^^^^^^^^^^^^^^^^^^^^ -The directory structure of Cassandra data consists of different directories for keyspaces, and tables with the data files within the table directories. Directories backups and snapshots to store backups and snapshots respectively for a particular table are also stored within the table directory. The directory structure for Cassandra is illustrated in Figure 1. - -.. figure:: Figure_1_backups.jpg - -Figure 1. Directory Structure for Cassandra Data - - -Setting Up Example Tables for Backups and Snapshots -**************************************************** -In this section we shall create some example data that could be used to demonstrate incremental backups and snapshots. We have used a three node Cassandra cluster. -First, the keyspaces are created. Subsequently tables are created within a keyspace and table data is added. We have used two keyspaces ``CQLKeyspace`` and ``CatalogKeyspace`` with two tables within each. -Create ``CQLKeyspace``: - -:: - - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Create table ``t`` in the ``CQLKeyspace`` keyspace. - -:: - - cqlsh> USE CQLKeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - - -Add data to table ``t``: - -:: - - cqlsh:cqlkeyspace> - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - - -A table query lists the data: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - - (2 rows) - -Create another table ``t2``: - -:: - - cqlsh:cqlkeyspace> CREATE TABLE t2 ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Add data to table ``t2``: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (1, 1, 'val1'); - cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (2, 2, 'val2'); - - -A table query lists table data: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t2; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - 2 | 2 | val2 - - (3 rows) - -Create a second keyspace ``CatalogKeyspace``: - -:: - - cqlsh:cqlkeyspace> CREATE KEYSPACE CatalogKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Create a table called ``journal`` in ``CatalogKeyspace``: - -:: - - cqlsh:cqlkeyspace> USE CatalogKeyspace; - cqlsh:catalogkeyspace> CREATE TABLE journal ( - ... id int, - ... name text, - ... publisher text, - ... PRIMARY KEY (id) - ... ); - - -Add data to table ``journal``: - -:: - - cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (0, 'Apache - Cassandra Magazine', 'Apache Cassandra'); - cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (1, 'Couchbase - Magazine', 'Couchbase'); - -Query table ``journal`` to list its data: - -:: - - cqlsh:catalogkeyspace> SELECT * FROM journal; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - -Add another table called ``magazine``: - -:: - - cqlsh:catalogkeyspace> CREATE TABLE magazine ( - ... id int, - ... name text, - ... publisher text, - ... PRIMARY KEY (id) - ... ); - -Add table data to ``magazine``: - -:: - - cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (0, 'Apache - Cassandra Magazine', 'Apache Cassandra'); - cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (1, 'Couchbase - Magazine', 'Couchbase'); - -List table ``magazine``’s data: - -:: - - cqlsh:catalogkeyspace> SELECT * from magazine; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - -Snapshots -^^^^^^^^^ -In this section including sub-sections we shall demonstrate creating snapshots. The command used to create a snapshot is ``nodetool snapshot`` and its usage is as follows: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool help snapshot - NAME - nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot - of the specified table - - SYNOPSIS - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] snapshot - [(-cf | --column-family
| --table
)] - [(-kt | --kt-list | -kc | --kc.list )] - [(-sf | --skip-flush)] [(-t | --tag )] [--] [] - - OPTIONS - -cf
, --column-family
, --table
- The table name (you must specify one and only one keyspace for using - this option) - - -h , --host - Node hostname or ip address - - -kt , --kt-list , -kc , --kc.list - The list of Keyspace.table to take snapshot.(you must not specify - only keyspace) - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -sf, --skip-flush - Do not flush memtables before snapshotting (snapshot will not - contain unflushed data) - - -t , --tag - The name of the snapshot - - -u , --username - Remote jmx agent username - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - - [] - List of keyspaces. By default, all keyspaces - -Configuring for Snapshots -*************************** -To demonstrate creating snapshots with Nodetool on the commandline we have set -``auto_snapshots`` setting to ``false`` in ``cassandra.yaml``: - -:: - - auto_snapshot: false - -Also set ``snapshot_before_compaction`` to ``false`` to disable creating snapshots automatically before compaction: - -:: - - snapshot_before_compaction: false - -Creating Snapshots -******************* -To demonstrate creating snapshots start with no snapshots. Search for snapshots and none get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - -We shall be using the example keyspaces and tables to create snapshots. - -Taking Snapshots of all Tables in a Keyspace -+++++++++++++++++++++++++++++++++++++++++++++ - -To take snapshots of all tables in a keyspace and also optionally tag the snapshot the syntax becomes: - -:: - - nodetool snapshot --tag -- - -As an example create a snapshot called ``catalog-ks`` for all the tables in the ``catalogkeyspace`` keyspace: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag catalog-ks -- catalogkeyspace - Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [catalog-ks] and - options {skipFlush=false} - Snapshot directory: catalog-ks - -Search for snapshots and ``snapshots`` directories for the tables ``journal`` and ``magazine``, which are in the ``catalogkeyspace`` keyspace should get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots - -Snapshots of all tables in multiple keyspaces may be created similarly, as an example: - -:: - - nodetool snapshot --tag catalog-cql-ks --catalogkeyspace,cqlkeyspace - -Taking Snapshots of Single Table in a Keyspace -++++++++++++++++++++++++++++++++++++++++++++++ -To take a snapshot of a single table the ``nodetool snapshot`` command syntax becomes as follows: - -:: - - nodetool snapshot --tag --table
-- - -As an example create a snapshot for table ``magazine`` in keyspace ``catalokeyspace``: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag magazine --table magazine -- - catalogkeyspace - Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [magazine] and - options {skipFlush=false} - Snapshot directory: magazine - -Taking Snapshot of Multiple Tables from same Keyspace -++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To take snapshots of multiple tables in a keyspace the list of *Keyspace.table* must be specified with option ``--kt-list``. As an example create snapshots for tables ``t`` and ``t2`` in the ``cqlkeyspace`` keyspace: - -:: - - nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag multi-table - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag - multi-table - Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi- - table] and options {skipFlush=false} - Snapshot directory: multi-table - -Multiple snapshots of the same set of tables may be created and tagged with a different name. As an example, create another snapshot for the same set of tables ``t`` and ``t2`` in the ``cqlkeyspace`` keyspace and tag the snapshots differently: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag - multi-table-2 - Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi- - table-2] and options {skipFlush=false} - Snapshot directory: multi-table-2 - -Taking Snapshot of Multiple Tables from Different Keyspaces -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To take snapshots of multiple tables that are in different keyspaces the command syntax is the same as when multiple tables are in the same keyspace. Each *keyspace.table* must be specified separately in the ``--kt-list`` option. As an example, create a snapshot for table ``t`` in the ``cqlkeyspace`` and table ``journal`` in the catalogkeyspace and tag the snapshot ``multi-ks``. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list - catalogkeyspace.journal,cqlkeyspace.t --tag multi-ks - Requested creating snapshot(s) for [catalogkeyspace.journal,cqlkeyspace.t] with snapshot - name [multi-ks] and options {skipFlush=false} - Snapshot directory: multi-ks - -Listing Snapshots -*************************** -To list snapshots use the ``nodetool listsnapshots`` command. All the snapshots that we created in the preceding examples get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool listsnapshots - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - multi-table cqlkeyspace t2 4.86 KiB 5.67 KiB - multi-table cqlkeyspace t 4.89 KiB 5.7 KiB - multi-ks cqlkeyspace t 4.89 KiB 5.7 KiB - multi-ks catalogkeyspace journal 4.9 KiB 5.73 KiB - magazine catalogkeyspace magazine 4.9 KiB 5.73 KiB - multi-table-2 cqlkeyspace t2 4.86 KiB 5.67 KiB - multi-table-2 cqlkeyspace t 4.89 KiB 5.7 KiB - catalog-ks catalogkeyspace journal 4.9 KiB 5.73 KiB - catalog-ks catalogkeyspace magazine 4.9 KiB 5.73 KiB - - Total TrueDiskSpaceUsed: 44.02 KiB - -Finding Snapshots Directories -****************************** -The ``snapshots`` directories may be listed with ``find –name snapshots`` command: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name snapshots - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/snapshots - ./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots - [ec2-user@ip-10-0-2-238 ~]$ - -To list the snapshots for a particular table first change directory ( with ``cd``) to the ``snapshots`` directory for the table. As an example, list the snapshots for the ``catalogkeyspace/journal`` table. Two snapshots get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/journal- - 296a2d30c22a11e9b1350d927649052c/snapshots - [ec2-user@ip-10-0-2-238 snapshots]$ ls -l - total 0 - drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:44 catalog-ks - drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:52 multi-ks - -A ``snapshots`` directory lists the SSTable files in the snapshot. ``Schema.cql`` file is also created in each snapshot for the schema definition DDL that may be run in CQL to create the table when restoring from a snapshot: - -:: - - [ec2-user@ip-10-0-2-238 snapshots]$ cd catalog-ks - [ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 31 Aug 19 02:44 manifest.jsonZ - - -rw-rw-r--. 4 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 4 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 4 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 4 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 4 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 4 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 4 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 4 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - -rw-rw-r--. 1 ec2-user ec2-user 814 Aug 19 02:44 schema.cql - -Clearing Snapshots -****************** -Snapshots may be cleared or deleted with the ``nodetool clearsnapshot`` command. Either a specific snapshot name must be specified or the ``–all`` option must be specified. -As an example delete a snapshot called ``magazine`` from keyspace ``cqlkeyspace``: - -:: - - nodetool clearsnapshot -t magazine – cqlkeyspace - Delete all snapshots from cqlkeyspace with the –all option. - nodetool clearsnapshot –all -- cqlkeyspace - - - -Incremental Backups -^^^^^^^^^^^^^^^^^^^ -In the following sub-sections we shall discuss configuring and creating incremental backups. - -Configuring for Incremental Backups -*********************************** - -To create incremental backups set ``incremental_backups`` to ``true`` in ``cassandra.yaml``. - -:: - - incremental_backups: true - -This is the only setting needed to create incremental backups. By default ``incremental_backups`` setting is set to ``false`` because a new set of SSTable files is created for each data flush and if several CQL statements are to be run the ``backups`` directory could fill up quickly and use up storage that is needed to store table data. -Incremental backups may also be enabled on the command line with the Nodetool command ``nodetool enablebackup``. Incremental backups may be disabled with ``nodetool disablebackup`` command. Status of incremental backups, whether they are enabled may be found with ``nodetool statusbackup``. - - - -Creating Incremental Backups -****************************** -After each table is created flush the table data with ``nodetool flush`` command. Incremental backups get created. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t2 - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush catalogkeyspace journal magazine - -Finding Incremental Backups -*************************** - -Incremental backups are created within the Cassandra’s ``data`` directory within a table directory. Backups may be found with following command. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name backups - - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - ./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/backups - ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/backups - ./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups - -Creating an Incremental Backup -****************************** -This section discusses how incremental backups are created in more detail starting with when a new keyspace is created and a table is added. Create a keyspace called ``CQLKeyspace`` (arbitrary name). - -:: - - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3} - -Create a table called ``t`` within the ``CQLKeyspace`` keyspace: - -:: - - cqlsh> USE CQLKeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Flush the keyspace and table: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - -Search for backups and a ``backups`` directory should get listed even though we have added no table data yet. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ find -name backups - - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - -Change directory to the ``backups`` directory and list files and no files get listed as no table data has been added yet: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 0 - -Next, add a row of data to table ``t`` that we created: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - -Run the ``nodetool flush`` command to flush table data: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t - -List the files and directories in the ``backups`` directory and SSTable files for an incremental backup get listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 36 - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:32 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 43 Aug 19 00:32 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:32 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:32 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:32 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:32 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:32 na-1-big-TOC.txt - -Add another row of data: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - -Again, run the ``nodetool flush`` command: - -:: - - [ec2-user@ip-10-0-2-238 backups]$ nodetool flush cqlkeyspace t - -A new incremental backup gets created for the new data added. List the files in the ``backups`` directory for table ``t`` and two sets of SSTable files get listed, one for each incremental backup. The SSTable files are timestamped, which distinguishes the first incremental backup from the second: - -:: - - [ec2-user@ip-10-0-2-238 backups]$ ls -l - total 72 - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:32 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 43 Aug 19 00:32 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:32 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:32 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:32 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:32 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:32 na-1-big-TOC.txt - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 00:35 na-2-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 41 Aug 19 00:35 na-2-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 00:35 na-2-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 00:35 na-2-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 8 Aug 19 00:35 na-2-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:35 na-2-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 00:35 na-2-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 00:35 na-2-big-TOC.txt - [ec2-user@ip-10-0-2-238 backups]$ - -The ``backups`` directory for table ``cqlkeyspace/t`` is created within the ``data`` directory for the table: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t- - d132e240c21711e9bbee19821dcea330 - [ec2-user@ip-10-0-2-238 t-d132e240c21711e9bbee19821dcea330]$ ls -l - total 36 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:30 backups - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 02:30 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 79 Aug 19 02:30 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 02:30 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:30 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:30 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4696 Aug 19 02:30 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 02:30 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 02:30 na-1-big-TOC.txt - -The incremental backups for the other keyspaces/tables get created similarly. As an example the ``backups`` directory for table ``catalogkeyspace/magazine`` is created within the data directory: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c - [ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l - total 36 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups - -rw-rw-r--. 2 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 2 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 2 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 2 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 2 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 2 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 2 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - - - - - -Restoring from Incremental Backups and Snapshots -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The two main tools/commands for restoring a table after it has been dropped are: - -- sstableloader -- nodetool import - -A snapshot contains essentially the same set of SSTable files as an incremental backup does with a few additional files. A snapshot includes a ``schema.cql`` file for the schema DDL to create a table in CQL. A table backup does not include DDL which must be obtained from a snapshot when restoring from an incremental backup. - - diff --git a/src/doc/4.0-beta1/_sources/operating/bloom_filters.rst.txt b/src/doc/4.0-beta1/_sources/operating/bloom_filters.rst.txt deleted file mode 100644 index 0b37c18da..000000000 --- a/src/doc/4.0-beta1/_sources/operating/bloom_filters.rst.txt +++ /dev/null @@ -1,65 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Bloom Filters -------------- - -In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter. - -Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file. - -While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the ``bloom_filter_fp_chance`` to a float between 0 and 1. - -The default value for ``bloom_filter_fp_chance`` is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases. - -Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the ``bloom_filter_fp_chance`` gets closer to 0), memory usage -increases non-linearly - the bloom filter for ``bloom_filter_fp_chance = 0.01`` will require about three times as much -memory as the same table with ``bloom_filter_fp_chance = 0.1``. - -Typical values for ``bloom_filter_fp_chance`` are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case: - -- Users with more RAM and slower disks may benefit from setting the ``bloom_filter_fp_chance`` to a numerically lower - number (such as 0.01) to avoid excess IO operations -- Users with less RAM, more dense nodes, or very fast disks may tolerate a higher ``bloom_filter_fp_chance`` in order to - save RAM at the expense of excess IO operations -- In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics - workloads), setting the ``bloom_filter_fp_chance`` to a much higher number is acceptable. - -Changing -^^^^^^^^ - -The bloom filter false positive chance is visible in the ``DESCRIBE TABLE`` output as the field -``bloom_filter_fp_chance``. Operators can change the value with an ``ALTER TABLE`` statement: -:: - - ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01 - -Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ``ALTER TABLE`` statement, new -files on disk will be written with the new ``bloom_filter_fp_chance``, but existing sstables will not be modified until -they are compacted - if an operator needs a change to ``bloom_filter_fp_chance`` to take effect, they can trigger an -SSTable rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress. diff --git a/src/doc/4.0-beta1/_sources/operating/bulk_loading.rst.txt b/src/doc/4.0-beta1/_sources/operating/bulk_loading.rst.txt deleted file mode 100644 index 850260ac0..000000000 --- a/src/doc/4.0-beta1/_sources/operating/bulk_loading.rst.txt +++ /dev/null @@ -1,660 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. -.. highlight:: none - -.. _bulk-loading: - -Bulk Loading -============== - -Bulk loading of data in Apache Cassandra is supported by different tools. The data to be bulk loaded must be in the form of SSTables. Cassandra does not support loading data in any other format such as CSV, JSON, and XML directly. Bulk loading could be used to: - -- Restore incremental backups and snapshots. Backups and snapshots are already in the form of SSTables. -- Load existing SSTables into another cluster, which could have a different number of nodes or replication strategy. -- Load external data into a cluster - -**Note*: CSV Data can be loaded via the cqlsh COPY command but we do not recommend this for bulk loading, which typically requires many GB or TB of data. - -Tools for Bulk Loading -^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra provides two commands or tools for bulk loading data. These are: - -- Cassandra Bulk loader, also called ``sstableloader`` -- The ``nodetool import`` command - -The ``sstableloader`` and ``nodetool import`` are accessible if the Cassandra installation ``bin`` directory is in the ``PATH`` environment variable. Or these may be accessed directly from the ``bin`` directory. We shall discuss each of these next. We shall use the example or sample keyspaces and tables created in the Backups section. - -Using sstableloader -^^^^^^^^^^^^^^^^^^^ - -The ``sstableloader`` is the main tool for bulk uploading data. The ``sstableloader`` streams SSTable data files to a running cluster. The ``sstableloader`` loads data conforming to the replication strategy and replication factor. The table to upload data to does need not to be empty. - -The only requirements to run ``sstableloader`` are: - -1. One or more comma separated initial hosts to connect to and get ring information. -2. A directory path for the SSTables to load. - -Its usage is as follows. - -:: - - sstableloader [options] - -Sstableloader bulk loads the SSTables found in the directory ```` to the configured cluster. The ```` is used as the target *keyspace/table* name. As an example, to load an SSTable named -``Standard1-g-1-Data.db`` into ``Keyspace1/Standard1``, you will need to have the -files ``Standard1-g-1-Data.db`` and ``Standard1-g-1-Index.db`` in a directory ``/path/to/Keyspace1/Standard1/``. - -Sstableloader Option to accept Target keyspace name -**************************************************** -Often as part of a backup strategy some Cassandra DBAs store an entire data directory. When corruption in data is found then they would like to restore data in the same cluster (for large clusters 200 nodes) but with different keyspace name. - -Currently ``sstableloader`` derives keyspace name from the folder structure. As an option to specify target keyspace name as part of ``sstableloader``, version 4.0 adds support for the ``--target-keyspace`` option (`CASSANDRA-13884 -`_). - -The supported options are as follows from which only ``-d,--nodes `` is required. - -:: - - -alg,--ssl-alg Client SSL: algorithm - - -ap,--auth-provider Custom - AuthProvider class name for - cassandra authentication - -ciphers,--ssl-ciphers Client SSL: - comma-separated list of - encryption suites to use - -cph,--connections-per-host Number of - concurrent connections-per-host. - -d,--nodes Required. - Try to connect to these hosts (comma separated) initially for ring information - - -f,--conf-path cassandra.yaml file path for streaming throughput and client/server SSL. - - -h,--help Display this help message - - -i,--ignore Don't stream to this (comma separated) list of nodes - - -idct,--inter-dc-throttle Inter-datacenter throttle speed in Mbits (default unlimited) - - -k,--target-keyspace Target - keyspace name - -ks,--keystore Client SSL: - full path to keystore - -kspw,--keystore-password Client SSL: - password of the keystore - --no-progress Don't - display progress - -p,--port Port used - for native connection (default 9042) - -prtcl,--ssl-protocol Client SSL: - connections protocol to use (default: TLS) - -pw,--password Password for - cassandra authentication - -sp,--storage-port Port used - for internode communication (default 7000) - -spd,--server-port-discovery Use ports - published by server to decide how to connect. With SSL requires StartTLS - to be used. - -ssp,--ssl-storage-port Port used - for TLS internode communication (default 7001) - -st,--store-type Client SSL: - type of store - -t,--throttle Throttle - speed in Mbits (default unlimited) - -ts,--truststore Client SSL: - full path to truststore - -tspw,--truststore-password Client SSL: - Password of the truststore - -u,--username Username for - cassandra authentication - -v,--verbose verbose - output - -The ``cassandra.yaml`` file could be provided on the command-line with ``-f`` option to set up streaming throughput, client and server encryption options. Only ``stream_throughput_outbound_megabits_per_sec``, ``server_encryption_options`` and ``client_encryption_options`` are read from yaml. You can override options read from ``cassandra.yaml`` with corresponding command line options. - -A sstableloader Demo -******************** -We shall demonstrate using ``sstableloader`` by uploading incremental backup data for table ``catalogkeyspace.magazine``. We shall also use a snapshot of the same table to bulk upload in a different run of ``sstableloader``. The backups and snapshots for the ``catalogkeyspace.magazine`` table are listed as follows. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c - [ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l - total 0 - drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups - drwxrwxr-x. 4 ec2-user ec2-user 40 Aug 19 02:45 snapshots - -The directory path structure of SSTables to be uploaded using ``sstableloader`` is used as the target keyspace/table. - -We could have directly uploaded from the ``backups`` and ``snapshots`` directories respectively if the directory structure were in the format used by ``sstableloader``. But the directory path of backups and snapshots for SSTables is ``/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups`` and ``/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots`` respectively, which cannot be used to upload SSTables to ``catalogkeyspace.magazine`` table. The directory path structure must be ``/catalogkeyspace/magazine/`` to use ``sstableloader``. We need to create a new directory structure to upload SSTables with ``sstableloader`` which is typical when using ``sstableloader``. Create a directory structure ``/catalogkeyspace/magazine`` and set its permissions. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine - -Bulk Loading from an Incremental Backup -+++++++++++++++++++++++++++++++++++++++ -An incremental backup does not include the DDL for a table. The table must already exist. If the table was dropped it may be created using the ``schema.cql`` generated with every snapshot of a table. As we shall be using ``sstableloader`` to load SSTables to the ``magazine`` table, the table must exist prior to running ``sstableloader``. The table does not need to be empty but we have used an empty table as indicated by a CQL query: - -:: - - cqlsh:catalogkeyspace> SELECT * FROM magazine; - - id | name | publisher - ----+------+----------- - - (0 rows) - -After the table to upload has been created copy the SSTable files from the ``backups`` directory to the ``/catalogkeyspace/magazine/`` directory that we created. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c/backups/* /catalogkeyspace/magazine/ - -Run the ``sstableloader`` to upload SSTables from the ``/catalogkeyspace/magazine/`` directory. - -:: - - sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - -The output from the ``sstableloader`` command should be similar to the listed: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - Opening SSTables and calculating sections to stream - Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db - /catalogkeyspace/magazine/na-2-big-Data.db to [35.173.233.153:7000, 10.0.2.238:7000, - 54.158.45.75:7000] - progress: [35.173.233.153:7000]0:1/2 88 % total: 88% 0.018KiB/s (avg: 0.018KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% total: 176% 33.807KiB/s (avg: 0.036KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% total: 176% 0.000KiB/s (avg: 0.029KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:1/2 39 % total: 81% 0.115KiB/s - (avg: 0.024KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % total: 108% - 97.683KiB/s (avg: 0.033KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:1/2 39 % total: 80% 0.233KiB/s (avg: 0.040KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 88.522KiB/s (avg: 0.049KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.045KiB/s) - progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % - [54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.044KiB/s) - -After the ``sstableloader`` has run query the ``magazine`` table and the loaded table should get listed when a query is run. - -:: - - cqlsh:catalogkeyspace> SELECT * FROM magazine; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - cqlsh:catalogkeyspace> - -Bulk Loading from a Snapshot -+++++++++++++++++++++++++++++ -In this section we shall demonstrate restoring a snapshot of the ``magazine`` table to the ``magazine`` table. As we used the same table to restore data from a backup the directory structure required by ``sstableloader`` should already exist. If the directory structure needed to load SSTables to ``catalogkeyspace.magazine`` does not exist create the directories and set their permissions. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine - -As we shall be copying the snapshot files to the directory remove any files that may be in the directory. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo rm /catalogkeyspace/magazine/* - [ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine/ - [ec2-user@ip-10-0-2-238 magazine]$ ls -l - total 0 - - -Copy the snapshot files to the ``/catalogkeyspace/magazine`` directory. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine- - 446eae30c22a11e9b1350d927649052c/snapshots/magazine/* /catalogkeyspace/magazine - -List the files in the ``/catalogkeyspace/magazine`` directory and a ``schema.cql`` should also get listed. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine - [ec2-user@ip-10-0-2-238 magazine]$ ls -l - total 44 - -rw-r--r--. 1 root root 31 Aug 19 04:13 manifest.json - -rw-r--r--. 1 root root 47 Aug 19 04:13 na-1-big-CompressionInfo.db - -rw-r--r--. 1 root root 97 Aug 19 04:13 na-1-big-Data.db - -rw-r--r--. 1 root root 10 Aug 19 04:13 na-1-big-Digest.crc32 - -rw-r--r--. 1 root root 16 Aug 19 04:13 na-1-big-Filter.db - -rw-r--r--. 1 root root 16 Aug 19 04:13 na-1-big-Index.db - -rw-r--r--. 1 root root 4687 Aug 19 04:13 na-1-big-Statistics.db - -rw-r--r--. 1 root root 56 Aug 19 04:13 na-1-big-Summary.db - -rw-r--r--. 1 root root 92 Aug 19 04:13 na-1-big-TOC.txt - -rw-r--r--. 1 root root 815 Aug 19 04:13 schema.cql - -Alternatively create symlinks to the snapshot folder instead of copying the data, something like: - -:: - - mkdir keyspace_name - ln -s _path_to_snapshot_folder keyspace_name/table_name - -If the ``magazine`` table was dropped run the DDL in the ``schema.cql`` to create the table. Run the ``sstableloader`` with the following command. - -:: - - sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - -As the output from the command indicates SSTables get streamed to the cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238 /catalogkeyspace/magazine/ - - Established connection to initial hosts - Opening SSTables and calculating sections to stream - Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db to - [35.173.233.153:7000, 10.0.2.238:7000, 54.158.45.75:7000] - progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.017KiB/s (avg: 0.017KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.000KiB/s (avg: 0.014KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % total: 108% 0.115KiB/s - (avg: 0.017KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.232KiB/s (avg: 0.024KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.022KiB/s) - progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % - [54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.021KiB/s) - -Some other requirements of ``sstableloader`` that should be kept into consideration are: - -- The SSTables to be loaded must be compatible with the Cassandra version being loaded into. -- Repairing tables that have been loaded into a different cluster does not repair the source tables. -- Sstableloader makes use of port 7000 for internode communication. -- Before restoring incremental backups run ``nodetool flush`` to backup any data in memtables - -Using nodetool import -^^^^^^^^^^^^^^^^^^^^^ -In this section we shall import SSTables into a table using the ``nodetool import`` command. The ``nodetool refresh`` command is deprecated, and it is recommended to use ``nodetool import`` instead. The ``nodetool refresh`` does not have an option to load new SSTables from a separate directory which the ``nodetool import`` does. - -The command usage is as follows. - -:: - - nodetool [(-h | --host )] [(-p | --port )] - [(-pp | --print-port)] [(-pw | --password )] - [(-pwf | --password-file )] - [(-u | --username )] import - [(-c | --no-invalidate-caches)] [(-e | --extended-verify)] - [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)] - [(-t | --no-tokens)] [(-v | --no-verify)] [--]
- ... - -The arguments ``keyspace``, ``table`` name and ``directory`` to import SSTables from are required. - -The supported options are as follows. - -:: - - -c, --no-invalidate-caches - Don't invalidate the row cache when importing - - -e, --extended-verify - Run an extended verify, verifying all values in the new SSTables - - -h , --host - Node hostname or ip address - - -l, --keep-level - Keep the level on the new SSTables - - -p , --port - Remote jmx agent port number - - -pp, --print-port - Operate in 4.0 mode with hosts disambiguated by port number - - -pw , --password - Remote jmx agent password - - -pwf , --password-file - Path to the JMX password file - - -q, --quick - Do a quick import without verifying SSTables, clearing row cache or - checking in which data directory to put the file - - -r, --keep-repaired - Keep any repaired information from the SSTables - - -t, --no-tokens - Don't verify that all tokens in the new SSTable are owned by the - current node - - -u , --username - Remote jmx agent username - - -v, --no-verify - Don't verify new SSTables - - -- - This option can be used to separate command-line options from the - list of argument, (useful when arguments might be mistaken for - command-line options - -As the keyspace and table are specified on the command line ``nodetool import`` does not have the same requirement that ``sstableloader`` does, which is to have the SSTables in a specific directory path. When importing snapshots or incremental backups with ``nodetool import`` the SSTables don’t need to be copied to another directory. - -Importing Data from an Incremental Backup -***************************************** - -In this section we shall demonstrate using ``nodetool import`` to import SSTables from an incremental backup. We shall use the example table ``cqlkeyspace.t``. Drop table ``t`` as we are demonstrating to restore the table. - -:: - - cqlsh:cqlkeyspace> DROP table t; - -An incremental backup for a table does not include the schema definition for the table. If the schema definition is not kept as a separate backup, the ``schema.cql`` from a backup of the table may be used to create the table as follows. - -:: - - cqlsh:cqlkeyspace> CREATE TABLE IF NOT EXISTS cqlkeyspace.t ( - ... id int PRIMARY KEY, - ... k int, - ... v text) - ... WITH ID = d132e240-c217-11e9-bbee-19821dcea330 - ... AND bloom_filter_fp_chance = 0.01 - ... AND crc_check_chance = 1.0 - ... AND default_time_to_live = 0 - ... AND gc_grace_seconds = 864000 - ... AND min_index_interval = 128 - ... AND max_index_interval = 2048 - ... AND memtable_flush_period_in_ms = 0 - ... AND speculative_retry = '99p' - ... AND additional_write_policy = '99p' - ... AND comment = '' - ... AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' } - ... AND compaction = { 'max_threshold': '32', 'min_threshold': '4', - 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } - ... AND compression = { 'chunk_length_in_kb': '16', 'class': - 'org.apache.cassandra.io.compress.LZ4Compressor' } - ... AND cdc = false - ... AND extensions = { }; - -Initially the table could be empty, but does not have to be. - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+--- - - (0 rows) - -Run the ``nodetool import`` command by providing the keyspace, table and the backups directory. We don’t need to copy the table backups to another directory to run ``nodetool import`` as we had to when using ``sstableloader``. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool import -- cqlkeyspace t - ./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups - [ec2-user@ip-10-0-2-238 ~]$ - -The SSTables get imported into the table. Run a query in cqlsh to list the data imported. - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - - -Importing Data from a Snapshot -******************************** -Importing SSTables from a snapshot with the ``nodetool import`` command is similar to importing SSTables from an incremental backup. To demonstrate we shall import a snapshot for table ``catalogkeyspace.journal``. Drop the table as we are demonstrating to restore the table from a snapshot. - -:: - - cqlsh:cqlkeyspace> use CATALOGKEYSPACE; - cqlsh:catalogkeyspace> DROP TABLE journal; - -We shall use the ``catalog-ks`` snapshot for the ``journal`` table. List the files in the snapshot. The snapshot includes a ``schema.cql``, which is the schema definition for the ``journal`` table. - -:: - - [ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l - total 44 - -rw-rw-r--. 1 ec2-user ec2-user 31 Aug 19 02:44 manifest.json - -rw-rw-r--. 3 ec2-user ec2-user 47 Aug 19 02:38 na-1-big-CompressionInfo.db - -rw-rw-r--. 3 ec2-user ec2-user 97 Aug 19 02:38 na-1-big-Data.db - -rw-rw-r--. 3 ec2-user ec2-user 10 Aug 19 02:38 na-1-big-Digest.crc32 - -rw-rw-r--. 3 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Filter.db - -rw-rw-r--. 3 ec2-user ec2-user 16 Aug 19 02:38 na-1-big-Index.db - -rw-rw-r--. 3 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db - -rw-rw-r--. 3 ec2-user ec2-user 56 Aug 19 02:38 na-1-big-Summary.db - -rw-rw-r--. 3 ec2-user ec2-user 92 Aug 19 02:38 na-1-big-TOC.txt - -rw-rw-r--. 1 ec2-user ec2-user 814 Aug 19 02:44 schema.cql - -Copy the DDL from the ``schema.cql`` and run in cqlsh to create the ``catalogkeyspace.journal`` table. - -:: - - cqlsh:catalogkeyspace> CREATE TABLE IF NOT EXISTS catalogkeyspace.journal ( - ... id int PRIMARY KEY, - ... name text, - ... publisher text) - ... WITH ID = 296a2d30-c22a-11e9-b135-0d927649052c - ... AND bloom_filter_fp_chance = 0.01 - ... AND crc_check_chance = 1.0 - ... AND default_time_to_live = 0 - ... AND gc_grace_seconds = 864000 - ... AND min_index_interval = 128 - ... AND max_index_interval = 2048 - ... AND memtable_flush_period_in_ms = 0 - ... AND speculative_retry = '99p' - ... AND additional_write_policy = '99p' - ... AND comment = '' - ... AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' } - ... AND compaction = { 'min_threshold': '4', 'max_threshold': - '32', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } - ... AND compression = { 'chunk_length_in_kb': '16', 'class': - 'org.apache.cassandra.io.compress.LZ4Compressor' } - ... AND cdc = false - ... AND extensions = { }; - - -Run the ``nodetool import`` command to import the SSTables for the snapshot. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool import -- catalogkeyspace journal - ./cassandra/data/data/catalogkeyspace/journal- - 296a2d30c22a11e9b1350d927649052c/snapshots/catalog-ks/ - [ec2-user@ip-10-0-2-238 ~]$ - -Subsequently run a CQL query on the ``journal`` table and the data imported gets listed. - -:: - - cqlsh:catalogkeyspace> - cqlsh:catalogkeyspace> SELECT * FROM journal; - - id | name | publisher - ----+---------------------------+------------------ - 1 | Couchbase Magazine | Couchbase - 0 | Apache Cassandra Magazine | Apache Cassandra - - (2 rows) - cqlsh:catalogkeyspace> - - -Bulk Loading External Data -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Bulk loading external data directly is not supported by any of the tools we have discussed which include ``sstableloader`` and ``nodetool import``. The ``sstableloader`` and ``nodetool import`` require data to be in the form of SSTables. Apache Cassandra supports a Java API for generating SSTables from input data. Subsequently the ``sstableloader`` or ``nodetool import`` could be used to bulk load the SSTables. Next, we shall discuss the ``org.apache.cassandra.io.sstable.CQLSSTableWriter`` Java class for generating SSTables. - -Generating SSTables with CQLSSTableWriter Java API -*************************************************** -To generate SSTables using the ``CQLSSTableWriter`` class the following need to be supplied at the least. - -- An output directory to generate the SSTable in -- The schema for the SSTable -- A prepared insert statement -- A partitioner - -The output directory must already have been created. Create a directory (``/sstables`` as an example) and set its permissions. - -:: - - sudo mkdir /sstables - sudo chmod 777 -R /sstables - -Next, we shall discuss To use ``CQLSSTableWriter`` could be used in a Java application. Create a Java constant for the output directory. - -:: - - public static final String OUTPUT_DIR = "./sstables"; - -``CQLSSTableWriter`` Java API has the provision to create a user defined type. Create a new type to store ``int`` data: - -:: - - String type = "CREATE TYPE CQLKeyspace.intType (a int, b int)"; - // Define a String variable for the SSTable schema. - String schema = "CREATE TABLE CQLKeyspace.t (" - + " id int PRIMARY KEY," - + " k int," - + " v1 text," - + " v2 intType," - + ")"; - -Define a ``String`` variable for the prepared insert statement to use: - -:: - - String insertStmt = "INSERT INTO CQLKeyspace.t (id, k, v1, v2) VALUES (?, ?, ?, ?)"; - -The partitioner to use does not need to be set as the default partitioner ``Murmur3Partitioner`` is used. - -All these variables or settings are used by the builder class ``CQLSSTableWriter.Builder`` to create a ``CQLSSTableWriter`` object. - -Create a File object for the output directory. - -:: - - File outputDir = new File(OUTPUT_DIR + File.separator + "CQLKeyspace" + File.separator + "t"); - -Next, obtain a ``CQLSSTableWriter.Builder`` object using ``static`` method ``CQLSSTableWriter.builder()``. Set the output -directory ``File`` object, user defined type, SSTable schema, buffer size, prepared insert statement, and optionally any of the other builder options, and invoke the ``build()`` method to create a ``CQLSSTableWriter`` object: - -:: - - CQLSSTableWriter writer = CQLSSTableWriter.builder() - .inDirectory(outputDir) - .withType(type) - .forTable(schema) - .withBufferSizeInMB(256) - .using(insertStmt).build(); - -Next, set the SSTable data. If any user define types are used obtain a ``UserType`` object for these: - -:: - - UserType userType = writer.getUDType("intType"); - -Add data rows for the resulting SSTable. - -:: - - writer.addRow(0, 0, "val0", userType.newValue().setInt("a", 0).setInt("b", 0)); - writer.addRow(1, 1, "val1", userType.newValue().setInt("a", 1).setInt("b", 1)); - writer.addRow(2, 2, "val2", userType.newValue().setInt("a", 2).setInt("b", 2)); - -Close the writer, finalizing the SSTable. - -:: - - writer.close(); - -All the public methods the ``CQLSSTableWriter`` class provides including some other methods that are not discussed in the preceding example are as follows. - -===================================================================== ============ -Method Description -===================================================================== ============ -addRow(java.util.List values) Adds a new row to the writer. Returns a CQLSSTableWriter object. Each provided value type should correspond to the types of the CQL column the value is for. The correspondence between java type and CQL type is the same one than the one documented at www.datastax.com/drivers/java/2.0/apidocs/com/datastax/driver/core/DataType.Name.html#asJavaC lass(). -addRow(java.util.Map values) Adds a new row to the writer. Returns a CQLSSTableWriter object. This is equivalent to the other addRow methods, but takes a map whose keys are the names of the columns to add instead of taking a list of the values in the order of the insert statement used during construction of this SSTable writer. The column names in the map keys must be in lowercase unless the declared column name is a case-sensitive quoted identifier in which case the map key must use the exact case of the column. The values parameter is a map of column name to column values representing the new row to add. If a column is not included in the map, it's value will be null. If the map contains keys that do not correspond to one of the columns of the insert statement used when creating this SSTable writer, the corresponding value is ignored. -addRow(java.lang.Object... values) Adds a new row to the writer. Returns a CQLSSTableWriter object. -CQLSSTableWriter.builder() Returns a new builder for a CQLSSTableWriter. -close() Closes the writer. -rawAddRow(java.nio.ByteBuffer... values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. -rawAddRow(java.util.List values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. | -rawAddRow(java.util.Map values) Adds a new row to the writer given already serialized binary values. Returns a CQLSSTableWriter object. The row values must correspond to the bind variables of the insertion statement used when creating by this SSTable writer. | -getUDType(String dataType) Returns the User Defined type used in this SSTable Writer that can be used to create UDTValue instances. -===================================================================== ============ - - -All the public methods the ``CQLSSTableWriter.Builder`` class provides including some other methods that are not discussed in the preceding example are as follows. - -============================================ ============ -Method Description -============================================ ============ -inDirectory(String directory) The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable. -inDirectory(File directory) The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable. -forTable(String schema) The schema (CREATE TABLE statement) for the table for which SSTable is to be created. The - provided CREATE TABLE statement must use a fully-qualified table name, one that includes the - keyspace name. This is a mandatory option. - -withPartitioner(IPartitioner partitioner) The partitioner to use. By default, Murmur3Partitioner will be used. If this is not the - partitioner used by the cluster for which the SSTables are created, the correct partitioner - needs to be provided. - -using(String insert) The INSERT or UPDATE statement defining the order of the values to add for a given CQL row. - The provided INSERT statement must use a fully-qualified table name, one that includes the - keyspace name. Moreover, said statement must use bind variables since these variables will - be bound to values by the resulting SSTable writer. This is a mandatory option. - -withBufferSizeInMB(int size) The size of the buffer to use. This defines how much data will be buffered before being - written as a new SSTable. This corresponds roughly to the data size that will have the - created SSTable. The default is 128MB, which should be reasonable for a 1GB heap. If - OutOfMemory exception gets generated while using the SSTable writer, should lower this - value. - -sorted() Creates a CQLSSTableWriter that expects sorted inputs. If this option is used, the resulting - SSTable writer will expect rows to be added in SSTable sorted order (and an exception will - be thrown if that is not the case during row insertion). The SSTable sorted order means that - rows are added such that their partition keys respect the partitioner order. This option - should only be used if the rows can be provided in order, which is rarely the case. If the - rows can be provided in order however, using this sorted might be more efficient. If this - option is used, some option like withBufferSizeInMB will be ignored. - -build() Builds a CQLSSTableWriter object. - -============================================ ============ - diff --git a/src/doc/4.0-beta1/_sources/operating/cdc.rst.txt b/src/doc/4.0-beta1/_sources/operating/cdc.rst.txt deleted file mode 100644 index a7177b544..000000000 --- a/src/doc/4.0-beta1/_sources/operating/cdc.rst.txt +++ /dev/null @@ -1,96 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Change Data Capture -------------------- - -Overview -^^^^^^^^ - -Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property ``cdc=true`` (either when :ref:`creating the table ` or -:ref:`altering it `). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in ``cassandra.yaml``. On segment fsync to disk, if CDC data is present anywhere in the segment a -_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word "COMPLETED" will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file. - -We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable. - -A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory. - -Configuration -^^^^^^^^^^^^^ - -Enabling or disabling CDC on a table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -CDC is enable or disable through the `cdc` table property, for instance:: - - CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true; - - ALTER TABLE foo WITH cdc=true; - - ALTER TABLE foo WITH cdc=false; - -cassandra.yaml parameters -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following `cassandra.yaml` are available for CDC: - -``cdc_enabled`` (default: false) - Enable or disable CDC operations node-wide. -``cdc_raw_directory`` (default: ``$CASSANDRA_HOME/data/cdc_raw``) - Destination for CommitLogSegments to be moved after all corresponding memtables are flushed. -``cdc_free_space_in_mb``: (default: min of 4096 and 1/8th volume space) - Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in - ``cdc_raw_directory``. -``cdc_free_space_check_interval_ms`` (default: 250) - When at capacity, we limit the frequency with which we re-calculate the space taken up by ``cdc_raw_directory`` to - prevent burning CPU cycles unnecessarily. Default is to check 4 times per second. - -.. _reading-commitlogsegments: - -Reading CommitLogSegments -^^^^^^^^^^^^^^^^^^^^^^^^^ -Use a `CommitLogReader.java -`__. -Usage is `fairly straightforward -`__ -with a `variety of signatures -`__ -available for use. In order to handle mutations read from disk, implement `CommitLogReadHandler -`__. - -Warnings -^^^^^^^^ - -**Do not enable CDC without some kind of consumption process in-place.** - -If CDC is enabled on a node and then on a table, the ``cdc_free_space_in_mb`` will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place. - -Further Reading -^^^^^^^^^^^^^^^ - -- `JIRA ticket `__ -- `JIRA ticket `__ diff --git a/src/doc/4.0-beta1/_sources/operating/compaction/index.rst.txt b/src/doc/4.0-beta1/_sources/operating/compaction/index.rst.txt deleted file mode 100644 index ea505dd47..000000000 --- a/src/doc/4.0-beta1/_sources/operating/compaction/index.rst.txt +++ /dev/null @@ -1,301 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _compaction: - -Compaction ----------- - -Strategies -^^^^^^^^^^ - -Picking the right compaction strategy for your workload will ensure the best performance for both querying and for compaction itself. - -:ref:`Size Tiered Compaction Strategy ` - The default compaction strategy. Useful as a fallback when other strategies don't fit the workload. Most useful for - non pure time series workloads with spinning disks, or when the I/O from :ref:`LCS ` is too high. - - -:ref:`Leveled Compaction Strategy ` - Leveled Compaction Strategy (LCS) is optimized for read heavy workloads, or workloads with lots of updates and deletes. It is not a good choice for immutable time series data. - - -:ref:`Time Window Compaction Strategy ` - Time Window Compaction Strategy is designed for TTL'ed, mostly immutable time series data. - - - -Types of compaction -^^^^^^^^^^^^^^^^^^^ - -The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are; - -Minor compaction - triggered automatically in Cassandra. -Major compaction - a user executes a compaction over all sstables on the node. -User defined compaction - a user triggers a compaction on a given set of sstables. -Scrub - try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you - will need to run a full repair on the node. -Upgradesstables - upgrade sstables to the latest version. Run this after upgrading to a new major version. -Cleanup - remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been - bootstrapped since that node will take ownership of some ranges from those nodes. -Secondary index rebuild - rebuild the secondary indexes on the node. -Anticompaction - after repair the ranges that were actually repaired are split out of the sstables that existed when repair started. -Sub range compaction - It is possible to only compact a given sub range - this could be useful if you know a token that has been - misbehaving - either gathering many updates or many deletes. (``nodetool compact -st x -et y``) will pick - all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will - most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS - the resulting sstable will end up in L0. - -When is a minor compaction triggered? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (``nodetool enableautocompaction``) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes. - -Merging sstables -^^^^^^^^^^^^^^^^ - -Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently. - -Tombstones and Garbage Collection (GC) Grace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Why Tombstones -~~~~~~~~~~~~~~ - -When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra. - -Deletes without tombstones -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Imagine a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:: - - [], [], [A] - -Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:: - - [A], [A], [A] - -This would cause our data to be resurrected even though it had been -deleted. - -Deletes with Tombstones -~~~~~~~~~~~~~~~~~~~~~~~ - -Starting again with a three node cluster which has the value [A] replicated to every node.:: - - [A], [A], [A] - -If instead of removing data we add a tombstone record, our single node failure situation will look like this.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A] - -Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:: - - [A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]] - -Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as ``gc_grace_seconds`` for every table in Cassandra. - -The gc_grace_seconds parameter and Tombstone Removal -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The table level ``gc_grace_seconds`` parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After ``gc_grace_seconds`` has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true; - -- The tombstone must be older than ``gc_grace_seconds`` -- If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older - than the tombstone containing X must be included in the same compaction. We don't need to care if the partition is in - an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older - than the data it cannot shadow that data. -- If the option ``only_purge_repaired_tombstones`` is enabled, tombstones are only removed if the data has also been - repaired. - -If a node remains down or disconnected for longer than ``gc_grace_seconds`` it's deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the "Deletes without Tombstones" section. -Note that tombstones will not be removed until a compaction event even if ``gc_grace_seconds`` has elapsed. - -The default value for ``gc_grace_seconds`` is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using ``WITH gc_grace_seconds``. - -TTL -^^^ - -Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least ``gc_grace_seconds``. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once. - -Fully expired sstables -^^^^^^^^^^^^^^^^^^^^^^ - -If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called ``sstableexpiredblockers`` that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -``TimeWindowCompactionStrategy`` (and the deprecated ``DateTieredCompactionStrategy``). With ``TimeWindowCompactionStrategy`` -it is possible to remove the guarantee (not check for shadowing data) by enabling ``unsafe_aggressive_sstable_expiration``. - -Repaired/unrepaired data -^^^^^^^^^^^^^^^^^^^^^^^^ - -With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables. - -Data directories -^^^^^^^^^^^^^^^^ - -Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted: - -- It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings - and each one can run compactions independently from the others. -- Users can backup and restore a single data directory. -- Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk - backing two data directories, the big one will be limited the by the small one. One work around to this is to create - more data directories backed by the big disk. - -Single sstable tombstone compaction -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option ``unchecked_tombstone_compaction`` can be enabled. - -.. _compaction-options: - -Common options -^^^^^^^^^^^^^^ - -There is a number of common options for all the compaction strategies; - -``enabled`` (default: true) - Whether minor compactions should run. Note that you can have 'enabled': true as a compaction option and then do - 'nodetool enableautocompaction' to start running compactions. -``tombstone_threshold`` (default: 0.2) - How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable. -``tombstone_compaction_interval`` (default: 86400s (1 day)) - Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure - that one sstable is not constantly getting recompacted - this option states how often we should try for a given - sstable. -``log_all`` (default: false) - New detailed compaction logging, see :ref:`below `. -``unchecked_tombstone_compaction`` (default: false) - The single sstable compaction has quite strict checks for whether it should be started, this option disables those - checks and for some usecases this might be needed. Note that this does not change anything for the actual - compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able - to drop any tombstones. -``only_purge_repaired_tombstone`` (default: false) - Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired. -``min_threshold`` (default: 4) - Lower limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. -``max_threshold`` (default: 32) - Upper limit of number of sstables before a compaction is triggered. Not used for ``LeveledCompactionStrategy``. - -Further, see the section on each strategy for specific additional options. - -Compaction nodetool commands -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :ref:`nodetool ` utility provides a number of commands related to compaction: - -``enableautocompaction`` - Enable compaction. -``disableautocompaction`` - Disable compaction. -``setcompactionthroughput`` - How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this - throughput. -``compactionstats`` - Statistics about current and pending compactions. -``compactionhistory`` - List details about the last compactions. -``setcompactionthreshold`` - Set the min/max sstable count for when to trigger compaction, defaults to 4/32. - -Switching the compaction strategy and options using JMX -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:: - - org.apache.cassandra.db:type=ColumnFamilies,keyspace=,columnfamily= - -and the attribute to change is ``CompactionParameters`` or ``CompactionParametersJson`` if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an :ref:`ALTER TABLE ` statement - -for example:: - - { 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10} - -The setting is kept until someone executes an :ref:`ALTER TABLE ` that touches the compaction -settings or restarts the node. - -.. _detailed-compaction-logging: - -More detailed compaction logging -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enable with the compaction option ``log_all`` and a more detailed compaction log file will be produced in your log -directory. - - - - - diff --git a/src/doc/4.0-beta1/_sources/operating/compaction/lcs.rst.txt b/src/doc/4.0-beta1/_sources/operating/compaction/lcs.rst.txt deleted file mode 100644 index 48c282eb7..000000000 --- a/src/doc/4.0-beta1/_sources/operating/compaction/lcs.rst.txt +++ /dev/null @@ -1,90 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - - -.. _LCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The idea of ``LeveledCompactionStrategy`` (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here. - -When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can't compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory. - -When deciding which level to compact LCS checks the higher levels first (with LCS, a "higher" level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level. - -Major compaction -~~~~~~~~~~~~~~~~ - -It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817. - -Bootstrapping -~~~~~~~~~~~~~ - -During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done. - -STCS in L0 -~~~~~~~~~~ - -If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better. - -Starved sstables -~~~~~~~~~~~~~~~~ - -If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable\_size\_in\_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved. - -.. _lcs-options: - -LCS options -~~~~~~~~~~~ - -``sstable_size_in_mb`` (default: 160MB) - The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very - large partitions on the node. - -``fanout_size`` (default: 10) - The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning - this option. - -LCS also support the ``cassandra.disable_stcs_in_l0`` startup option (``-Dcassandra.disable_stcs_in_l0=true``) to avoid -doing STCS in L0. - - diff --git a/src/doc/4.0-beta1/_sources/operating/compaction/stcs.rst.txt b/src/doc/4.0-beta1/_sources/operating/compaction/stcs.rst.txt deleted file mode 100644 index 658933757..000000000 --- a/src/doc/4.0-beta1/_sources/operating/compaction/stcs.rst.txt +++ /dev/null @@ -1,58 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -.. _STCS: - -Leveled Compaction Strategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The basic idea of ``SizeTieredCompactionStrategy`` (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within ``bucket_low`` and ``bucket_high`` of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket's sstables takes the most reads. - -Major compaction -~~~~~~~~~~~~~~~~ - -When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%... of the total size. - -.. _stcs-options: - -STCS options -~~~~~~~~~~~~ - -``min_sstable_size`` (default: 50MB) - Sstables smaller than this are put in the same bucket. -``bucket_low`` (default: 0.5) - How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``bucket_low * avg_bucket_size < sstable_size`` (and the ``bucket_high`` condition holds, see below), then - the sstable is added to the bucket. -``bucket_high`` (default: 1.5) - How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That - is, if ``sstable_size < bucket_high * avg_bucket_size`` (and the ``bucket_low`` condition holds, see above), then - the sstable is added to the bucket. - -Defragmentation -~~~~~~~~~~~~~~~ - -Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster. - - diff --git a/src/doc/4.0-beta1/_sources/operating/compaction/twcs.rst.txt b/src/doc/4.0-beta1/_sources/operating/compaction/twcs.rst.txt deleted file mode 100644 index 3641a5aab..000000000 --- a/src/doc/4.0-beta1/_sources/operating/compaction/twcs.rst.txt +++ /dev/null @@ -1,76 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - - -.. _TWCS: - -Time Window CompactionStrategy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``TimeWindowCompactionStrategy`` (TWCS) is designed specifically for workloads where it's beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -``SizeTieredCompactionStrategy`` or ``LeveledCompactionStrategy``. The basic concept is that -``TimeWindowCompactionStrategy`` will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options: - -``compaction_window_unit`` (default: DAYS) - A Java TimeUnit (MINUTES, HOURS, or DAYS). -``compaction_window_size`` (default: 1) - The number of units that make up a window. -``unsafe_aggressive_sstable_expiration`` (default: false) - Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially - risky option that can lead to data loss or deleted data re-appearing, going beyond what - `unchecked_tombstone_compaction` does for single sstable compaction. Due to the risk the jvm must also be - started with `-Dcassandra.unsafe_aggressive_sstable_expiration=true`. - -Taken together, the operator can specify windows of virtually any size, and `TimeWindowCompactionStrategy` will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using `SizeTieredCompactionStrategy`. - -Ideally, operators should select a ``compaction_window_unit`` and ``compaction_window_size`` pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -(``'compaction_window_unit':'DAYS','compaction_window_size':3``). - -TimeWindowCompactionStrategy Operational Concerns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways: - -- If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables - and flushed into the same SSTable, where it will remain comingled. -- If the user's read requests for old data cause read repairs that pull old data into the current memtable, that data - will be comingled and flushed into the same SSTable. - -While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL ``USING TIMESTAMP``. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled). - -Changing TimeWindowCompactionStrategy Options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Operators wishing to enable ``TimeWindowCompactionStrategy`` on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected. - -Operators wishing to change ``compaction_window_unit`` or ``compaction_window_size`` can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows. - diff --git a/src/doc/4.0-beta1/_sources/operating/compression.rst.txt b/src/doc/4.0-beta1/_sources/operating/compression.rst.txt deleted file mode 100644 index 74c992f5a..000000000 --- a/src/doc/4.0-beta1/_sources/operating/compression.rst.txt +++ /dev/null @@ -1,164 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Compression ------------ - -Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression ``chunk_length_in_kb``. As Cassandra SSTables -are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on). - -Compression algorithms typically trade off between the following three areas: - -- **Compression speed**: How fast does the compression algorithm compress data. This is critical in the flush and - compaction paths because data must be compressed before it is written to disk. -- **Decompression speed**: How fast does the compression algorithm de-compress data. This is critical in the read - and compaction paths as data must be read off disk in a full chunk and decompressed before it can be returned. -- **Ratio**: By what ratio is the uncompressed data reduced by. Cassandra typically measures this as the size of data - on disk relative to the uncompressed size. For example a ratio of ``0.5`` means that the data on disk is 50% the size - of the uncompressed data. Cassandra exposes this ratio per table as the ``SSTable Compression Ratio`` field of - ``nodetool tablestats``. - -Cassandra offers five compression algorithms by default that make different tradeoffs in these areas. While -benchmarking compression algorithms depends on many factors (algorithm parameters such as compression level, -the compressibility of the input data, underlying processor class, etc ...), the following table should help you pick -a starting point based on your application's requirements with an extremely rough grading of the different choices -by their performance in these areas (A is relatively good, F is relatively bad): - -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ -| Compression Algorithm | Cassandra Class | Compression | Decompression | Ratio | C* Version | -+=============================================+=======================+=============+===============+=======+=============+ -| `LZ4 `_ | ``LZ4Compressor`` | A+ | A+ | C+ | ``>=1.2.2`` | -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ -| `LZ4HC `_ | ``LZ4Compressor`` | C+ | A+ | B+ | ``>= 3.6`` | -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ -| `Zstd `_ | ``ZstdCompressor`` | A- | A- | A+ | ``>= 4.0`` | -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ -| `Snappy `_ | ``SnappyCompressor`` | A- | A | C | ``>= 1.0`` | -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ -| `Deflate (zlib) `_ | ``DeflateCompressor`` | C | C | A | ``>= 1.0`` | -+---------------------------------------------+-----------------------+-------------+---------------+-------+-------------+ - -Generally speaking for a performance critical (latency or throughput) application ``LZ4`` is the right choice as it -gets excellent ratio per CPU cycle spent. This is why it is the default choice in Cassandra. - -For storage critical applications (disk footprint), however, ``Zstd`` may be a better choice as it can get significant -additional ratio to ``LZ4``. - -``Snappy`` is kept for backwards compatibility and ``LZ4`` will typically be preferable. - -``Deflate`` is kept for backwards compatibility and ``Zstd`` will typically be preferable. - -Configuring Compression -^^^^^^^^^^^^^^^^^^^^^^^ - -Compression is configured on a per-table basis as an optional argument to ``CREATE TABLE`` or ``ALTER TABLE``. Three -options are available for all compressors: - -- ``class`` (default: ``LZ4Compressor``): specifies the compression class to use. The two "fast" - compressors are ``LZ4Compressor`` and ``SnappyCompressor`` and the two "good" ratio compressors are ``ZstdCompressor`` - and ``DeflateCompressor``. -- ``chunk_length_in_kb`` (default: ``16KiB``): specifies the number of kilobytes of data per compression chunk. The main - tradeoff here is that larger chunk sizes give compression algorithms more context and improve their ratio, but - require reads to deserialize and read more off disk. -- ``crc_check_chance`` (default: ``1.0``): determines how likely Cassandra is to verify the checksum on each compression - chunk during reads to protect against data corruption. Unless you have profiles indicating this is a performance - problem it is highly encouraged not to turn this off as it is Cassandra's only protection against bitrot. - -The ``LZ4Compressor`` supports the following additional options: - -- ``lz4_compressor_type`` (default ``fast``): specifies if we should use the ``high`` (a.k.a ``LZ4HC``) ratio version - or the ``fast`` (a.k.a ``LZ4``) version of ``LZ4``. The ``high`` mode supports a configurable level, which can allow - operators to tune the performance <-> ratio tradeoff via the ``lz4_high_compressor_level`` option. Note that in - ``4.0`` and above it may be preferable to use the ``Zstd`` compressor. -- ``lz4_high_compressor_level`` (default ``9``): A number between ``1`` and ``17`` inclusive that represents how much - CPU time to spend trying to get more compression ratio. Generally lower levels are "faster" but they get less ratio - and higher levels are slower but get more compression ratio. - -The ``ZstdCompressor`` supports the following options in addition: - -- ``compression_level`` (default ``3``): A number between ``-131072`` and ``22`` inclusive that represents how much CPU - time to spend trying to get more compression ratio. The lower the level, the faster the speed (at the cost of ratio). - Values from 20 to 22 are called "ultra levels" and should be used with caution, as they require more memory. - The default of ``3`` is a good choice for competing with ``Deflate`` ratios and ``1`` is a good choice for competing - with ``LZ4``. - - -Users can set compression using the following syntax: - -:: - - CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'}; - -Or - -:: - - ALTER TABLE keyspace.table WITH compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 64, 'crc_check_chance': 0.5}; - -Once enabled, compression can be disabled with ``ALTER TABLE`` setting ``enabled`` to ``false``: - -:: - - ALTER TABLE keyspace.table WITH compression = {'enabled':'false'}; - -Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ``ALTER TABLE``, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using ``nodetool scrub`` or ``nodetool upgradesstables -a``, both of which will rebuild the SSTables on disk, -re-compressing the data in the process. - -Benefits and Uses -^^^^^^^^^^^^^^^^^ - -Compression's primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk. - -Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. Tables containing data that has already -been compressed or random data (e.g. benchmark datasets) do not typically compress well. - -Operational Impact -^^^^^^^^^^^^^^^^^^ - -- Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per - terabyte of data on disk, though the exact usage varies with ``chunk_length_in_kb`` and compression ratios. - -- Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as - non-vnode bootstrap), the CPU overhead of compression can be a limiting factor. - -- To prevent slow compressors (``Zstd``, ``Deflate``, ``LZ4HC``) from blocking flushes for too long, all three - flush with the default fast ``LZ4`` compressor and then rely on normal compaction to re-compress the data into the - desired compression strategy. See `CASSANDRA-15379 ` for more - details. - -- The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a - way to ensure correctness of data on disk, compressed tables allow the user to set ``crc_check_chance`` (a float from - 0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt. - -Advanced Use -^^^^^^^^^^^^ - -Advanced users can provide their own compression class by implementing the interface at -``org.apache.cassandra.io.compress.ICompressor``. diff --git a/src/doc/4.0-beta1/_sources/operating/hardware.rst.txt b/src/doc/4.0-beta1/_sources/operating/hardware.rst.txt deleted file mode 100644 index d90550c80..000000000 --- a/src/doc/4.0-beta1/_sources/operating/hardware.rst.txt +++ /dev/null @@ -1,85 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Hardware Choices ----------------- - -Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM. - -CPU -^^^ -Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes. - -Memory -^^^^^^ -Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java's Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system's page -cache, storing recently accessed portions files in RAM for rapid re-use. - -For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest: - -- ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption -- The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM -- Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection -- Heaps larger than 12GB should consider G1GC - -Disks -^^^^^ -Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables. - -Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files. - -Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra's sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it's important that the commitlog -(``commitlog_directory``) be on one physical disk (not simply a partition, but a physical disk), and the data files -(``data_file_directories``) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk. - -In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it's typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5. - -Common Cloud Choices -^^^^^^^^^^^^^^^^^^^^ - -Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include: - -- i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs -- m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) - storage - -Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives. diff --git a/src/doc/4.0-beta1/_sources/operating/hints.rst.txt b/src/doc/4.0-beta1/_sources/operating/hints.rst.txt deleted file mode 100644 index 55c42a401..000000000 --- a/src/doc/4.0-beta1/_sources/operating/hints.rst.txt +++ /dev/null @@ -1,279 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _hints: - -Hints -===== - -Hinting is a data repair technique applied during write operations. When -replica nodes are unavailable to accept a mutation, either due to failure or -more commonly routine maintenance, coordinators attempting to write to those -replicas store temporary hints on their local filesystem for later application -to the unavailable replica. Hints are an important way to help reduce the -duration of data inconsistency. Coordinators replay hints quickly after -unavailable replica nodes return to the ring. Hints are best effort, however, -and do not guarantee eventual consistency like :ref:`anti-entropy repair -` does. - -Hints are useful because of how Apache Cassandra replicates data to provide -fault tolerance, high availability and durability. Cassandra :ref:`partitions -data across the cluster ` using consistent -hashing, and then replicates keys to multiple nodes along the hash ring. To -guarantee availability, all replicas of a key can accept mutations without -consensus, but this means it is possible for some replicas to accept a mutation -while others do not. When this happens an inconsistency is introduced. - -Hints are one of the three ways, in addition to read-repair and -full/incremental anti-entropy repair, that Cassandra implements the eventual -consistency guarantee that all updates are eventually received by all replicas. -Hints, like read-repair, are best effort and not an alternative to performing -full repair, but they do help reduce the duration of inconsistency between -replicas in practice. - -Hinted Handoff --------------- - -Hinted handoff is the process by which Cassandra applies hints to unavailable -nodes. - -For example, consider a mutation is to be made at ``Consistency Level`` -``LOCAL_QUORUM`` against a keyspace with ``Replication Factor`` of ``3``. -Normally the client sends the mutation to a single coordinator, who then sends -the mutation to all three replicas, and when two of the three replicas -acknowledge the mutation the coordinator responds successfully to the client. -If a replica node is unavailable, however, the coordinator stores a hint -locally to the filesystem for later application. New hints will be retained for -up to ``max_hint_window_in_ms`` of downtime (defaults to ``3 hours``). If the -unavailable replica does return to the cluster before the window expires, the -coordinator applies any pending hinted mutations against the replica to ensure -that eventual consistency is maintained. - -.. figure:: images/hints.svg - :alt: Hinted Handoff Example - - Hinted Handoff in Action - -* (``t0``): The write is sent by the client, and the coordinator sends it - to the three replicas. Unfortunately ``replica_2`` is restarting and cannot - receive the mutation. -* (``t1``): The client receives a quorum acknowledgement from the coordinator. - At this point the client believe the write to be durable and visible to reads - (which it is). -* (``t2``): After the write timeout (default ``2s``), the coordinator decides - that ``replica_2`` is unavailable and stores a hint to its local disk. -* (``t3``): Later, when ``replica_2`` starts back up it sends a gossip message - to all nodes, including the coordinator. -* (``t4``): The coordinator replays hints including the missed mutation - against ``replica_2``. - -If the node does not return in time, the destination replica will be -permanently out of sync until either read-repair or full/incremental -anti-entropy repair propagates the mutation. - -Application of Hints -^^^^^^^^^^^^^^^^^^^^ - -Hints are streamed in bulk, a segment at a time, to the target replica node and -the target node replays them locally. After the target node has replayed a -segment it deletes the segment and receives the next segment. This continues -until all hints are drained. - -Storage of Hints on Disk -^^^^^^^^^^^^^^^^^^^^^^^^ - -Hints are stored in flat files in the coordinator node’s -``$CASSANDRA_HOME/data/hints`` directory. A hint includes a hint id, the target -replica node on which the mutation is meant to be stored, the serialized -mutation (stored as a blob) that couldn't be delivered to the replica node, the -mutation timestamp, and the Cassandra version used to serialize the mutation. -By default hints are compressed using ``LZ4Compressor``. Multiple hints are -appended to the same hints file. - -Since hints contain the original unmodified mutation timestamp, hint application -is idempotent and cannot overwrite a future mutation. - -Hints for Timed Out Write Requests -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Hints are also stored for write requests that time out. The -``write_request_timeout_in_ms`` setting in ``cassandra.yaml`` configures the -timeout for write requests. - -:: - - write_request_timeout_in_ms: 2000 - -The coordinator waits for the configured amount of time for write requests to -complete, at which point it will time out and generate a hint for the timed out -request. The lowest acceptable value for ``write_request_timeout_in_ms`` is 10 ms. - - -Configuring Hints ------------------ - -Hints are enabled by default as they are critical for data consistency. The -``cassandra.yaml`` configuration file provides several settings for configuring -hints: - -Table 1. Settings for Hints - -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|Setting | Description |Default Value | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_enabled`` |Enables/Disables hinted handoffs | ``true`` | -| | | | -| | | | -| | | | -| | | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_disabled_datacenters`` |A list of data centers that do not perform | ``unset`` | -| |hinted handoffs even when handoff is | | -| |otherwise enabled. | | -| |Example: | | -| | | | -| | .. code-block:: yaml | | -| | | | -| | hinted_handoff_disabled_datacenters: | | -| | - DC1 | | -| | - DC2 | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hint_window_in_ms`` |Defines the maximum amount of time (ms) | ``10800000`` # 3 hours | -| |a node shall have hints generated after it | | -| |has failed. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hinted_handoff_throttle_in_kb`` |Maximum throttle in KBs per second, per | | -| |delivery thread. This will be reduced | ``1024`` | -| |proportionally to the number of nodes in | | -| |the cluster. | | -| |(If there are two nodes in the cluster, | | -| |each delivery thread will use the maximum | | -| |rate; if there are 3, each will throttle | | -| |to half of the maximum,since it is expected| | -| |for two nodes to be delivering hints | | -| |simultaneously.) | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hints_delivery_threads`` |Number of threads with which to deliver | ``2`` | -| |hints; Consider increasing this number when| | -| |you have multi-dc deployments, since | | -| |cross-dc handoff tends to be slower | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_directory`` |Directory where Cassandra stores hints. |``$CASSANDRA_HOME/data/hints`` | -| | | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_flush_period_in_ms`` |How often hints should be flushed from the | ``10000`` | -| |internal buffers to disk. Will *not* | | -| |trigger fsync. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``max_hints_file_size_in_mb`` |Maximum size for a single hints file, in | ``128`` | -| |megabytes. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ -|``hints_compression`` |Compression to apply to the hint files. | ``LZ4Compressor`` | -| |If omitted, hints files will be written | | -| |uncompressed. LZ4, Snappy, and Deflate | | -| |compressors are supported. | | -+--------------------------------------------+-------------------------------------------+-------------------------------+ - -Configuring Hints at Runtime with ``nodetool`` ----------------------------------------------- - -``nodetool`` provides several commands for configuring hints or getting hints -related information. The nodetool commands override the corresponding -settings if any in ``cassandra.yaml`` for the node running the command. - -Table 2. Nodetool Commands for Hints - -+--------------------------------+-------------------------------------------+ -|Command | Description | -+--------------------------------+-------------------------------------------+ -|``nodetool disablehandoff`` |Disables storing and delivering hints | -+--------------------------------+-------------------------------------------+ -|``nodetool disablehintsfordc`` |Disables storing and delivering hints to a | -| |data center | -+--------------------------------+-------------------------------------------+ -|``nodetool enablehandoff`` |Re-enables future hints storing and | -| |delivery on the current node | -+--------------------------------+-------------------------------------------+ -|``nodetool enablehintsfordc`` |Enables hints for a data center that was | -| |previously disabled | -+--------------------------------+-------------------------------------------+ -|``nodetool getmaxhintwindow`` |Prints the max hint window in ms. New in | -| |Cassandra 4.0. | -+--------------------------------+-------------------------------------------+ -|``nodetool handoffwindow`` |Prints current hinted handoff window | -+--------------------------------+-------------------------------------------+ -|``nodetool pausehandoff`` |Pauses hints delivery process | -+--------------------------------+-------------------------------------------+ -|``nodetool resumehandoff`` |Resumes hints delivery process | -+--------------------------------+-------------------------------------------+ -|``nodetool |Sets hinted handoff throttle in kb | -|sethintedhandoffthrottlekb`` |per second, per delivery thread | -+--------------------------------+-------------------------------------------+ -|``nodetool setmaxhintwindow`` |Sets the specified max hint window in ms | -+--------------------------------+-------------------------------------------+ -|``nodetool statushandoff`` |Status of storing future hints on the | -| |current node | -+--------------------------------+-------------------------------------------+ -|``nodetool truncatehints`` |Truncates all hints on the local node, or | -| |truncates hints for the endpoint(s) | -| |specified. | -+--------------------------------+-------------------------------------------+ - -Make Hints Play Faster at Runtime -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default of ``1024 kbps`` handoff throttle is conservative for most modern -networks, and it is entirely possible that in a simple node restart you may -accumulate many gigabytes hints that may take hours to play back. For example if -you are ingesting ``100 Mbps`` of data per node, a single 10 minute long -restart will create ``10 minutes * (100 megabit / second) ~= 7 GiB`` of data -which at ``(1024 KiB / second)`` would take ``7.5 GiB / (1024 KiB / second) = -2.03 hours`` to play back. The exact math depends on the load balancing strategy -(round robin is better than token aware), number of tokens per node (more -tokens is better than fewer), and naturally the cluster's write rate, but -regardless you may find yourself wanting to increase this throttle at runtime. - -If you find yourself in such a situation, you may consider raising -the ``hinted_handoff_throttle`` dynamically via the -``nodetool sethintedhandoffthrottlekb`` command. - -Allow a Node to be Down Longer at Runtime -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Sometimes a node may be down for more than the normal ``max_hint_window_in_ms``, -(default of three hours), but the hardware and data itself will still be -accessible. In such a case you may consider raising the -``max_hint_window_in_ms`` dynamically via the ``nodetool setmaxhintwindow`` -command added in Cassandra 4.0 (`CASSANDRA-11720 `_). -This will instruct Cassandra to continue holding hints for the down -endpoint for a longer amount of time. - -This command should be applied on all nodes in the cluster that may be holding -hints. If needed, the setting can be applied permanently by setting the -``max_hint_window_in_ms`` setting in ``cassandra.yaml`` followed by a rolling -restart. - -Monitoring Hint Delivery ------------------------- - -Cassandra 4.0 adds histograms available to understand how long it takes to deliver -hints which is useful for operators to better identify problems (`CASSANDRA-13234 -`_). - -There are also metrics available for tracking :ref:`Hinted Handoff ` -and :ref:`Hints Service ` metrics. diff --git a/src/doc/4.0-beta1/_sources/operating/index.rst.txt b/src/doc/4.0-beta1/_sources/operating/index.rst.txt deleted file mode 100644 index 78c7eb6ea..000000000 --- a/src/doc/4.0-beta1/_sources/operating/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Operating Cassandra -=================== - -.. toctree:: - :maxdepth: 2 - - snitch - topo_changes - repair - read_repair - hints - compaction/index - bloom_filters - compression - cdc - backups - bulk_loading - metrics - security - hardware - diff --git a/src/doc/4.0-beta1/_sources/operating/metrics.rst.txt b/src/doc/4.0-beta1/_sources/operating/metrics.rst.txt deleted file mode 100644 index fc37440d3..000000000 --- a/src/doc/4.0-beta1/_sources/operating/metrics.rst.txt +++ /dev/null @@ -1,793 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _monitoring-metrics: - -Monitoring ----------- - -Metrics in Cassandra are managed using the `Dropwizard Metrics `__ library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of `built in -`__ and `third party -`__ reporter plugins. - -Metrics are collected for a single node. It's up to the operator to use an external monitoring system to aggregate them. - -Metric Types -^^^^^^^^^^^^ -All metrics reported by cassandra fit into one of the following types. - -``Gauge`` - An instantaneous measurement of a value. - -``Counter`` - A gauge for an ``AtomicLong`` instance. Typically this is consumed by monitoring the change since the last call to - see if there is a large increase compared to the norm. - -``Histogram`` - Measures the statistical distribution of values in a stream of data. - - In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th - percentiles. - -``Timer`` - Measures both the rate that a particular piece of code is called and the histogram of its duration. - -``Latency`` - Special type that tracks latency (in microseconds) with a ``Timer`` plus a ``Counter`` that tracks the total latency - accrued since starting. The former is useful if you track the change in total latency since the last check. Each - metric name of this type will have 'Latency' and 'TotalLatency' appended to it. - -``Meter`` - A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving - average throughputs. - -.. _table-metrics: - -Table Metrics -^^^^^^^^^^^^^ - -Each table in Cassandra has metrics responsible for tracking its state and performance. - -The metric names are all appended with the specific ``Keyspace`` and ``Table`` name. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Table...
`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Table keyspace= scope=
name=`` - -.. NOTE:: - There is a special table called '``all``' without a keyspace. This represents the aggregation of metrics across - **all** tables and keyspaces on the node. - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -MemtableOnHeapSize Gauge Total amount of data stored in the memtable that resides **on**-heap, including column related overhead and partitions overwritten. -MemtableOffHeapSize Gauge Total amount of data stored in the memtable that resides **off**-heap, including column related overhead and partitions overwritten. -MemtableLiveDataSize Gauge Total amount of live data stored in the memtable, excluding any data structure overhead. -AllMemtablesOnHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **on**-heap. -AllMemtablesOffHeapSize Gauge Total amount of data stored in the memtables (2i and pending flush memtables included) that resides **off**-heap. -AllMemtablesLiveDataSize Gauge Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead. -MemtableColumnsCount Gauge Total number of columns present in the memtable. -MemtableSwitchCount Counter Number of times flush has resulted in the memtable being switched out. -CompressionRatio Gauge Current compression ratio for all SSTables. -EstimatedPartitionSizeHistogram Gauge Histogram of estimated partition size (in bytes). -EstimatedPartitionCount Gauge Approximate number of keys in table. -EstimatedColumnCountHistogram Gauge Histogram of estimated number of columns. -SSTablesPerReadHistogram Histogram Histogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount. -ReadLatency Latency Local read latency for this table. -RangeLatency Latency Local range scan latency for this table. -WriteLatency Latency Local write latency for this table. -CoordinatorReadLatency Timer Coordinator read latency for this table. -CoordinatorWriteLatency Timer Coordinator write latency for this table. -CoordinatorScanLatency Timer Coordinator range scan latency for this table. -PendingFlushes Counter Estimated number of flush tasks pending for this table. -BytesFlushed Counter Total number of bytes flushed since server [re]start. -CompactionBytesWritten Counter Total number of bytes written by compaction since server [re]start. -PendingCompactions Gauge Estimate of number of pending compactions for this table. -LiveSSTableCount Gauge Number of SSTables on disk for this table. -LiveDiskSpaceUsed Counter Disk space used by SSTables belonging to this table (in bytes). -TotalDiskSpaceUsed Counter Total disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC'd. -MinPartitionSize Gauge Size of the smallest compacted partition (in bytes). -MaxPartitionSize Gauge Size of the largest compacted partition (in bytes). -MeanPartitionSize Gauge Size of the average compacted partition (in bytes). -BloomFilterFalsePositives Gauge Number of false positives on table's bloom filter. -BloomFilterFalseRatio Gauge False positive ratio of table's bloom filter. -BloomFilterDiskSpaceUsed Gauge Disk space used by bloom filter (in bytes). -BloomFilterOffHeapMemoryUsed Gauge Off-heap memory used by bloom filter. -IndexSummaryOffHeapMemoryUsed Gauge Off-heap memory used by index summary. -CompressionMetadataOffHeapMemoryUsed Gauge Off-heap memory used by compression meta data. -KeyCacheHitRate Gauge Key cache hit rate for this table. -TombstoneScannedHistogram Histogram Histogram of tombstones scanned in queries on this table. -LiveScannedHistogram Histogram Histogram of live cells scanned in queries on this table. -ColUpdateTimeDeltaHistogram Histogram Histogram of column update time delta on this table. -ViewLockAcquireTime Timer Time taken acquiring a partition lock for materialized view updates on this table. -ViewReadTime Timer Time taken during the local read of a materialized view update. -TrueSnapshotsSize Gauge Disk space used by snapshots of this table including all SSTable components. -RowCacheHitOutOfRange Counter Number of table row cache hits that do not satisfy the query filter, thus went to disk. -RowCacheHit Counter Number of table row cache hits. -RowCacheMiss Counter Number of table row cache misses. -CasPrepare Latency Latency of paxos prepare round. -CasPropose Latency Latency of paxos propose round. -CasCommit Latency Latency of paxos commit round. -PercentRepaired Gauge Percent of table data that is repaired on disk. -BytesRepaired Gauge Size of table data repaired on disk -BytesUnrepaired Gauge Size of table data unrepaired on disk -BytesPendingRepair Gauge Size of table data isolated for an ongoing incremental repair -SpeculativeRetries Counter Number of times speculative retries were sent for this table. -SpeculativeFailedRetries Counter Number of speculative retries that failed to prevent a timeout -SpeculativeInsufficientReplicas Counter Number of speculative retries that couldn't be attempted due to lack of replicas -SpeculativeSampleLatencyNanos Gauge Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency. -WaitingOnFreeMemtableSpace Histogram Histogram of time spent waiting for free memtable space, either on- or off-heap. -DroppedMutations Counter Number of dropped mutations on this table. -AnticompactionTime Timer Time spent anticompacting before a consistent repair. -ValidationTime Timer Time spent doing validation compaction during repair. -SyncTime Timer Time spent doing streaming during repair. -BytesValidated Histogram Histogram over the amount of bytes read during validation. -PartitionsValidated Histogram Histogram over the number of partitions read during validation. -BytesAnticompacted Counter How many bytes we anticompacted. -BytesMutatedAnticompaction Counter How many bytes we avoided anticompacting because the sstable was fully contained in the repaired range. -MutatedAnticompactionGauge Gauge Ratio of bytes mutated vs total bytes repaired. -======================================= ============== =========== - -Keyspace Metrics -^^^^^^^^^^^^^^^^ -Each keyspace in Cassandra has metrics responsible for tracking its state and performance. - -Most of these metrics are the same as the ``Table Metrics`` above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.keyspace..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Keyspace scope= name=`` - - -======================================= ============== =========== -Name Type Description -======================================= ============== =========== -WriteFailedIdeaCL Counter Number of writes that failed to achieve the configured ideal consistency level or 0 if none is configured -IdealCLWriteLatency Latency Coordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured -RepairTime Timer Total time spent as repair coordinator. -RepairPrepareTime Timer Total time spent preparing for repair. -======================================= ============== =========== - -ThreadPool Metrics -^^^^^^^^^^^^^^^^^^ - -Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It's important to monitor the state of these thread pools since they can tell you how saturated a -node is. - -The metric names are all appended with the specific ``ThreadPool`` name. The thread pools are also categorized under a -specific type. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ThreadPools...`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ThreadPools path= scope= name=`` - -===================== ============== =========== -Name Type Description -===================== ============== =========== -ActiveTasks Gauge Number of tasks being actively worked on by this pool. -PendingTasks Gauge Number of queued tasks queued up on this pool. -CompletedTasks Counter Number of tasks completed. -TotalBlockedTasks Counter Number of tasks that were blocked due to queue saturation. -CurrentlyBlockedTask Counter Number of tasks that are currently blocked due to queue saturation but on retry will become unblocked. -MaxPoolSize Gauge The maximum number of threads in this pool. -MaxTasksQueued Gauge The maximum number of tasks queued before a task get blocked. -===================== ============== =========== - -The following thread pools can be monitored. - -============================ ============== =========== -Name Type Description -============================ ============== =========== -Native-Transport-Requests transport Handles client CQL requests -CounterMutationStage request Responsible for counter writes -ViewMutationStage request Responsible for materialized view writes -MutationStage request Responsible for all other writes -ReadRepairStage request ReadRepair happens on this thread pool -ReadStage request Local reads run on this thread pool -RequestResponseStage request Coordinator requests to the cluster run on this thread pool -AntiEntropyStage internal Builds merkle tree for repairs -CacheCleanupExecutor internal Cache maintenance performed on this thread pool -CompactionExecutor internal Compactions are run on these threads -GossipStage internal Handles gossip requests -HintsDispatcher internal Performs hinted handoff -InternalResponseStage internal Responsible for intra-cluster callbacks -MemtableFlushWriter internal Writes memtables to disk -MemtablePostFlush internal Cleans up commit log after memtable is written to disk -MemtableReclaimMemory internal Memtable recycling -MigrationStage internal Runs schema migrations -MiscStage internal Misceleneous tasks run here -PendingRangeCalculator internal Calculates token range -PerDiskMemtableFlushWriter_0 internal Responsible for writing a spec (there is one of these per disk 0-N) -Sampler internal Responsible for re-sampling the index summaries of SStables -SecondaryIndexManagement internal Performs updates to secondary indexes -ValidationExecutor internal Performs validation compaction or scrubbing -ViewBuildExecutor internal Performs materialized views initial build -============================ ============== =========== - -.. |nbsp| unicode:: 0xA0 .. nonbreaking space - -Client Request Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Client requests have their own set of metrics that encapsulate the work happening at coordinator level. - -Different types of client requests are broken down by ``RequestType``. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.ClientRequest..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=ClientRequest scope= name=`` - - -:RequestType: CASRead -:Description: Metrics related to transactional read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction read latency. - Unavailables Counter Number of unavailable exceptions encountered. - UnfinishedCommit Counter Number of transactions that were committed on read. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended reads were encountered - ===================== ============== ============================================================= - -:RequestType: CASWrite -:Description: Metrics related to transactional write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - |nbsp| Latency Transaction write latency. - UnfinishedCommit Counter Number of transactions that were committed on write. - ConditionNotMet Counter Number of transaction preconditions did not match current values. - ContentionHistogram Histogram How many contended writes were encountered - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: Read -:Description: Metrics related to standard read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of read failures encountered. - |nbsp| Latency Read latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: RangeSlice -:Description: Metrics related to token range read requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of range query failures encountered. - |nbsp| Latency Range query latency. - Unavailables Counter Number of unavailable exceptions encountered. - ===================== ============== ============================================================= - -:RequestType: Write -:Description: Metrics related to regular write requests. -:Metrics: - ===================== ============== ============================================================= - Name Type Description - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of write failures encountered. - |nbsp| Latency Write latency. - Unavailables Counter Number of unavailable exceptions encountered. - MutationSizeHistogram Histogram Total size in bytes of the requests mutations. - ===================== ============== ============================================================= - - -:RequestType: ViewWrite -:Description: Metrics related to materialized view write wrtes. -:Metrics: - ===================== ============== ============================================================= - Timeouts Counter Number of timeouts encountered. - Failures Counter Number of transaction failures encountered. - Unavailables Counter Number of unavailable exceptions encountered. - ViewReplicasAttempted Counter Total number of attempted view replica writes. - ViewReplicasSuccess Counter Total number of succeded view replica writes. - ViewPendingMutations Gauge ViewReplicasAttempted - ViewReplicasSuccess. - ViewWriteLatency Timer Time between when mutation is applied to base table and when CL.ONE is achieved on view. - ===================== ============== ============================================================= - -Cache Metrics -^^^^^^^^^^^^^ - -Cassandra caches have metrics to track the effectivness of the caches. Though the ``Table Metrics`` might be more useful. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Cache..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Cache scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Cache capacity in bytes. -Entries Gauge Total number of cache entries. -FifteenMinuteCacheHitRate Gauge 15m cache hit rate. -FiveMinuteCacheHitRate Gauge 5m cache hit rate. -OneMinuteCacheHitRate Gauge 1m cache hit rate. -HitRate Gauge All time cache hit rate. -Hits Meter Total number of cache hits. -Misses Meter Total number of cache misses. -MissLatency Timer Latency of misses. -Requests Gauge Total number of cache requests. -Size Gauge Total size of occupied cache, in bytes. -========================== ============== =========== - -The following caches are covered: - -============================ =========== -Name Description -============================ =========== -CounterCache Keeps hot counters in memory for performance. -ChunkCache In process uncompressed page cache. -KeyCache Cache for partition to sstable offsets. -RowCache Cache for rows kept in memory. -============================ =========== - -.. NOTE:: - Misses and MissLatency are only defined for the ChunkCache - -CQL Metrics -^^^^^^^^^^^ - -Metrics specific to CQL prepared statement caching. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CQL.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CQL name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -PreparedStatementsCount Gauge Number of cached prepared statements. -PreparedStatementsEvicted Counter Number of prepared statements evicted from the prepared statement cache -PreparedStatementsExecuted Counter Number of prepared statements executed. -RegularStatementsExecuted Counter Number of **non** prepared statements executed. -PreparedStatementsRatio Gauge Percentage of statements that are prepared vs unprepared. -========================== ============== =========== - -.. _dropped-metrics: - -DroppedMessage Metrics -^^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by ``Hinted Handoff`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.DroppedMessage..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=DroppedMessage scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CrossNodeDroppedLatency Timer The dropped latency across nodes. -InternalDroppedLatency Timer The dropped latency within node. -Dropped Meter Number of dropped messages. -========================== ============== =========== - -The different types of messages tracked are: - -============================ =========== -Name Description -============================ =========== -BATCH_STORE Batchlog write -BATCH_REMOVE Batchlog cleanup (after succesfully applied) -COUNTER_MUTATION Counter writes -HINT Hint replay -MUTATION Regular writes -READ Regular reads -READ_REPAIR Read repair -PAGED_SLICE Paged read -RANGE_SLICE Token range read -REQUEST_RESPONSE RPC Callbacks -_TRACE Tracing writes -============================ =========== - -Streaming Metrics -^^^^^^^^^^^^^^^^^ - -Metrics reported during ``Streaming`` operations, such as repair, bootstrap, rebuild. - -These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Streaming..`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Streaming scope= name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -IncomingBytes Counter Number of bytes streamed to this node from the peer. -OutgoingBytes Counter Number of bytes streamed to the peer endpoint from this node. -========================== ============== =========== - - -Compaction Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to ``Compaction`` work. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Compaction.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Compaction name=`` - -========================== ======================================== =============================================== -Name Type Description -========================== ======================================== =============================================== -BytesCompacted Counter Total number of bytes compacted since server [re]start. -PendingTasks Gauge Estimated number of compactions remaining to perform. -CompletedTasks Gauge Number of completed compactions since server [re]start. -TotalCompactionsCompleted Meter Throughput of completed compactions since server [re]start. -PendingTasksByTableName Gauge>> Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in ``Table Metrics``. -========================== ======================================== =============================================== - -CommitLog Metrics -^^^^^^^^^^^^^^^^^ - -Metrics specific to the ``CommitLog`` - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.CommitLog.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=CommitLog name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -CompletedTasks Gauge Total number of commit log messages written since [re]start. -PendingTasks Gauge Number of commit log messages written but yet to be fsync'd. -TotalCommitLogSize Gauge Current size, in bytes, used by all the commit log segments. -WaitingOnSegmentAllocation Timer Time spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero. -WaitingOnCommit Timer The time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval. -========================== ============== =========== - -Storage Metrics -^^^^^^^^^^^^^^^ - -Metrics specific to the storage engine. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Storage.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Storage name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Exceptions Counter Number of internal exceptions caught. Under normal exceptions this should be zero. -Load Counter Size, in bytes, of the on disk data size this node manages. -TotalHints Counter Number of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint. -TotalHintsInProgress Counter Number of hints attemping to be sent currently. -========================== ============== =========== - -.. _handoff-metrics: - -HintedHandoff Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintedHandOffManager.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintedHandOffManager name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Hints_created- Counter Number of hints on disk for this peer. -Hints_not_stored- Counter Number of hints not stored for this peer, due to being down past the configured hint window. -=========================== ============== =========== - -.. _hintsservice-metrics: - -HintsService Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in ``Storage Metrics`` - -These metrics include the peer endpoint **in the metric name** - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.HintsService.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=HintsService name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -HintsSucceeded Meter A meter of the hints successfully delivered -HintsFailed Meter A meter of the hints that failed deliver -HintsTimedOut Meter A meter of the hints that timed out -Hint_delays Histogram Histogram of hint delivery delays (in milliseconds) -Hint_delays- Histogram Histogram of hint delivery delays (in milliseconds) per peer -=========================== ============== =========== - -SSTable Index Metrics -^^^^^^^^^^^^^^^^^^^^^ - -Metrics specific to the SSTable index metadata. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Index..RowIndexEntry`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -IndexedEntrySize Histogram Histogram of the on-heap size, in bytes, of the index across all SSTables. -IndexInfoCount Histogram Histogram of the number of on-heap index entries managed across all SSTables. -IndexInfoGets Histogram Histogram of the number index seeks performed per SSTable. -=========================== ============== =========== - -BufferPool Metrics -^^^^^^^^^^^^^^^^^^ - -Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.BufferPool.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=BufferPool name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -Size Gauge Size, in bytes, of the managed buffer pool -Misses Meter The rate of misses in the pool. The higher this is the more allocations incurred. -=========================== ============== =========== - - -Client Metrics -^^^^^^^^^^^^^^ - -Metrics specifc to client managment. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Client.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Client name=`` - -============================== =============================== =========== -Name Type Description -============================== =============================== =========== -connectedNativeClients Gauge Number of clients connected to this nodes native protocol server -connections Gauge> List of all connections and their state information -connectedNativeClientsByUser Gauge Number of connnective native clients by username -============================== =============================== =========== - - -Batch Metrics -^^^^^^^^^^^^^ - -Metrics specifc to batch statements. - -Reported name format: - -**Metric Name** - ``org.apache.cassandra.metrics.Batch.`` - -**JMX MBean** - ``org.apache.cassandra.metrics:type=Batch name=`` - -=========================== ============== =========== -Name Type Description -=========================== ============== =========== -PartitionsPerCounterBatch Histogram Distribution of the number of partitions processed per counter batch -PartitionsPerLoggedBatch Histogram Distribution of the number of partitions processed per logged batch -PartitionsPerUnloggedBatch Histogram Distribution of the number of partitions processed per unlogged batch -=========================== ============== =========== - - -JVM Metrics -^^^^^^^^^^^ - -JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using `Metric Reporters`_. - -BufferPool -++++++++++ - -**Metric Name** - ``jvm.buffers..`` - -**JMX MBean** - ``java.nio:type=BufferPool name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Capacity Gauge Estimated total capacity of the buffers in this pool -Count Gauge Estimated number of buffers in the pool -Used Gauge Estimated memory that the Java virtual machine is using for this buffer pool -========================== ============== =========== - -FileDescriptorRatio -+++++++++++++++++++ - -**Metric Name** - ``jvm.fd.`` - -**JMX MBean** - ``java.lang:type=OperatingSystem name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Usage Ratio Ratio of used to total file descriptors -========================== ============== =========== - -GarbageCollector -++++++++++++++++ - -**Metric Name** - ``jvm.gc..`` - -**JMX MBean** - ``java.lang:type=GarbageCollector name=`` - -========================== ============== =========== -Name Type Description -========================== ============== =========== -Count Gauge Total number of collections that have occurred -Time Gauge Approximate accumulated collection elapsed time in milliseconds -========================== ============== =========== - -Memory -++++++ - -**Metric Name** - ``jvm.memory..`` - -**JMX MBean** - ``java.lang:type=Memory`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -MemoryPool -++++++++++ - -**Metric Name** - ``jvm.memory.pools..`` - -**JMX MBean** - ``java.lang:type=MemoryPool name=`` - -========================== ============== =========== -Committed Gauge Amount of memory in bytes that is committed for the JVM to use -Init Gauge Amount of memory in bytes that the JVM initially requests from the OS -Max Gauge Maximum amount of memory in bytes that can be used for memory management -Usage Ratio Ratio of used to maximum memory -Used Gauge Amount of used memory in bytes -========================== ============== =========== - -JMX -^^^ - -Any JMX based client can access metrics from cassandra. - -If you wish to access JMX metrics over http it's possible to download `Mx4jTool `__ and -place ``mx4j-tools.jar`` into the classpath. On startup you will see in the log:: - - HttpAdaptor version 3.0.2 started on port 8081 - -To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -``conf/cassandra-env.sh`` and uncomment:: - - #MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0" - - #MX4J_PORT="-Dmx4jport=8081" - - -Metric Reporters -^^^^^^^^^^^^^^^^ - -As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of `built in `__ and `third party -`__ reporter plugins. - -The configuration of these plugins is managed by the `metrics reporter config project -`__. There is a sample configuration file located at -``conf/metrics-reporter-config-sample.yaml``. - -Once configured, you simply start cassandra with the flag -``-Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml``. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra's classpath. diff --git a/src/doc/4.0-beta1/_sources/operating/read_repair.rst.txt b/src/doc/4.0-beta1/_sources/operating/read_repair.rst.txt deleted file mode 100644 index d280162b8..000000000 --- a/src/doc/4.0-beta1/_sources/operating/read_repair.rst.txt +++ /dev/null @@ -1,169 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _read-repair: - -Read repair -============== -Read Repair is the process of repairing data replicas during a read request. If all replicas involved in a read request at the given read consistency level are consistent the data is returned to the client and no read repair is needed. But if the replicas involved in a read request at the given consistency level are not consistent a read repair is performed to make replicas involved in the read request consistent. The most up-to-date data is returned to the client. The read repair runs in the foreground and is blocking in that a response is not returned to the client until the read repair has completed and up-to-date data is constructed. - -Expectation of Monotonic Quorum Reads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Cassandra uses a blocking read repair to ensure the expectation of "monotonic quorum reads" i.e. that in 2 successive quorum reads, it’s guaranteed the 2nd one won't get something older than the 1st one, and this even if a failed quorum write made a write of the most up to date value only to a minority of replicas. "Quorum" means majority of nodes among replicas. - -Table level configuration of monotonic reads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Cassandra 4.0 adds support for table level configuration of monotonic reads (`CASSANDRA-14635 -`_). The ``read_repair`` table option has been added to table schema, with the options ``blocking`` (default), and ``none``. - -The ``read_repair`` option configures the read repair behavior to allow tuning for various performance and consistency behaviors. Two consistency properties are affected by read repair behavior. - -- Monotonic Quorum Reads: Provided by ``BLOCKING``. Monotonic quorum reads prevents reads from appearing to go back in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of replicas, it may be visible in one read, and then disappear in a subsequent read. -- Write Atomicity: Provided by ``NONE``. Write atomicity prevents reads from returning partially applied writes. Cassandra attempts to provide partition level write atomicity, but since only the data covered by a ``SELECT`` statement is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a batch, but then select a single row by specifying the clustering column in a ``SELECT`` statement. - -The available read repair settings are: - -Blocking -********* -The default setting. When ``read_repair`` is set to ``BLOCKING``, and a read repair is started, the read will block on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition level write atomicity. - -None -********* -When ``read_repair`` is set to ``NONE``, the coordinator will reconcile any differences between replicas, but will not attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads. - -An example of using the ``NONE`` setting for the ``read_repair`` option is as follows: - -:: - - CREATE TABLE ks.tbl (k INT, c INT, v INT, PRIMARY KEY (k,c)) with read_repair='NONE'"); - -Read Repair Example -^^^^^^^^^^^^^^^^^^^^^^^^^^ -To illustrate read repair with an example, consider that a client sends a read request with read consistency level ``TWO`` to a 5-node cluster as illustrated in Figure 1. Read consistency level determines how many replica nodes must return a response before the read request is considered successful. - - -.. figure:: Figure_1_read_repair.jpg - - -Figure 1. Client sends read request to a 5-node Cluster - -Three nodes host replicas for the requested data as illustrated in Figure 2. With a read consistency level of ``TWO`` two replica nodes must return a response for the read request to be considered successful. If the node the client sends request to hosts a replica of the data requested only one other replica node needs to be sent a read request to. But if the receiving node does not host a replica for the requested data the node becomes a coordinator node and forwards the read request to a node that hosts a replica. A direct read request is forwarded to the fastest node (as determined by dynamic snitch) as shown in Figure 2. A direct read request is a full read and returns the requested data. - -.. figure:: Figure_2_read_repair.jpg - -Figure 2. Direct Read Request sent to Fastest Replica Node - -Next, the coordinator node sends the requisite number of additional requests to satisfy the consistency level, which is ``TWO``. The coordinator node needs to send one more read request for a total of two. All read requests additional to the first direct read request are digest read requests. A digest read request is not a full read and only returns the hash value of the data. Only a hash value is returned to reduce the network data traffic. In the example being discussed the coordinator node sends one digest read request to a node hosting a replica as illustrated in Figure 3. - -.. figure:: Figure_3_read_repair.jpg - -Figure 3. Coordinator Sends a Digest Read Request - -The coordinator node has received a full copy of data from one node and a hash value for the data from another node. To compare the data returned a hash value is calculated for the full copy of data. The two hash values are compared. If the hash values are the same no read repair is needed and the full copy of requested data is returned to the client. The coordinator node only performed a total of two replica read request because the read consistency level is ``TWO`` in the example. If the consistency level were higher such as ``THREE``, three replica nodes would need to respond to a read request and only if all digest or hash values were to match with the hash value of the full copy of data would the read request be considered successful and the data returned to the client. - -But, if the hash value/s from the digest read request/s are not the same as the hash value of the data from the full read request of the first replica node it implies that an inconsistency in the replicas exists. To fix the inconsistency a read repair is performed. - -For example, consider that that digest request returns a hash value that is not the same as the hash value of the data from the direct full read request. We would need to make the replicas consistent for which the coordinator node sends a direct (full) read request to the replica node that it sent a digest read request to earlier as illustrated in Figure 4. - -.. figure:: Figure_4_read_repair.jpg - -Figure 4. Coordinator sends Direct Read Request to Replica Node it had sent Digest Read Request to - -After receiving the data from the second replica node the coordinator has data from two of the replica nodes. It only needs two replicas as the read consistency level is ``TWO`` in the example. Data from the two replicas is compared and based on the timestamps the most recent replica is selected. Data may need to be merged to construct an up-to-date copy of data if one replica has data for only some of the columns. In the example, if the data from the first direct read request is found to be outdated and the data from the second full read request to be the latest read, repair needs to be performed on Replica 2. If a new up-to-date data is constructed by merging the two replicas a read repair would be needed on both the replicas involved. For example, a read repair is performed on Replica 2 as illustrated in Figure 5. - -.. figure:: Figure_5_read_repair.jpg - -Figure 5. Coordinator performs Read Repair - - -The most up-to-date data is returned to the client as illustrated in Figure 6. From the three replicas Replica 1 is not even read and thus not repaired. Replica 2 is repaired. Replica 3 is the most up-to-date and returned to client. - -.. figure:: Figure_6_read_repair.jpg - -Figure 6. Most up-to-date Data returned to Client - -Read Consistency Level and Read Repair -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The read consistency is most significant in determining if a read repair needs to be performed. As discussed in Table 1 a read repair is not needed for all of the consistency levels. - -Table 1. Read Repair based on Read Consistency Level - -+----------------------+-------------------------------------------+ -|Read Consistency Level| Description | -+----------------------+-------------------------------------------+ -| ONE |Read repair is not performed as the | -| |data from the first direct read request | -| |satisfies the consistency level ONE. | -| |No digest read requests are involved | -| |for finding mismatches in data. | -+----------------------+-------------------------------------------+ -| TWO |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -| THREE |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -|LOCAL_ONE |Read repair is not performed as the data | -| |from the direct read request from the | -| |closest replica satisfies the consistency | -| |level LOCAL_ONE.No digest read requests are| -| |involved for finding mismatches in data. | -+----------------------+-------------------------------------------+ -|LOCAL_QUORUM |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ -|QUORUM |Read repair is performed if inconsistencies| -| |in data are found as determined by the | -| |direct and digest read requests. | -+----------------------+-------------------------------------------+ - -If read repair is performed it is made only on the replicas that are not up-to-date and that are involved in the read request. The number of replicas involved in a read request would be based on the read consistency level; in the example it is two. - -Improved Read Repair Blocking Behavior in Cassandra 4.0 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 makes two improvements to read repair blocking behavior (`CASSANDRA-10726 -`_). - -1. Speculative Retry of Full Data Read Requests. Cassandra 4.0 makes use of speculative retry in sending read requests (full, not digest) to replicas if a full data response is not received, whether in the initial full read request or a full data read request during read repair. With speculative retry if it looks like a response may not be received from the initial set of replicas Cassandra sent messages to, to satisfy the consistency level, it speculatively sends additional read request to un-contacted replica/s. Cassandra 4.0 will also speculatively send a repair mutation to a minority of nodes not involved in the read repair data read / write cycle with the combined contents of all un-acknowledged mutations if it looks like one may not respond. Cassandra accepts acks from them in lieu of acks from the initial mutations sent out, so long as it receives the same number of acks as repair mutations transmitted. - -2. Only blocks on Full Data Responses to satisfy the Consistency Level. Cassandra 4.0 only blocks for what is needed for resolving the digest mismatch and wait for enough full data responses to meet the consistency level, no matter whether it’s speculative retry or read repair chance. As an example, if it looks like Cassandra might not receive full data requests from everyone in time, it sends additional requests to additional replicas not contacted in the initial full data read. If the collection of nodes that end up responding in time end up agreeing on the data, the response from the disagreeing replica that started the read repair is not considered, and won't be included in the response to the client, preserving the expectation of monotonic quorum reads. - -Diagnostic Events for Read Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 adds diagnostic events for read repair (`CASSANDRA-14668 -`_) that can be used for exposing information such as: - -- Contacted endpoints -- Digest responses by endpoint -- Affected partition keys -- Speculated reads / writes -- Update oversized - -Background Read Repair -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Background read repair, which was configured using ``read_repair_chance`` and ``dclocal_read_repair_chance`` settings in ``cassandra.yaml`` is removed Cassandra 4.0 (`CASSANDRA-13910 -`_). - -Read repair is not an alternative for other kind of repairs such as full repairs or replacing a node that keeps failing. The data returned even after a read repair has been performed may not be the most up-to-date data if consistency level is other than one requiring response from all replicas. diff --git a/src/doc/4.0-beta1/_sources/operating/repair.rst.txt b/src/doc/4.0-beta1/_sources/operating/repair.rst.txt deleted file mode 100644 index 94fdc1109..000000000 --- a/src/doc/4.0-beta1/_sources/operating/repair.rst.txt +++ /dev/null @@ -1,208 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _repair: - -Repair ------- - -Cassandra is designed to remain available if one of it's nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren't guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire. - -These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes. - -Incremental and Full Repairs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that's been written since the previous incremental repair. - -Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it's important to understand that once an incremental repair marks data as repaired, it won't -try to repair it again. This is fine for syncing up missed writes, but it doesn't protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally. - -Usage and Best Practices -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since repair can result in a lot of disk and network io, it's not run automatically by Cassandra. It is run by the operator -via nodetool. - -Incremental repair is the default and is run with the following command: - -:: - - nodetool repair - -A full repair can be run with the following command: - -:: - - nodetool repair --full - -Additionally, repair can be run on a single keyspace: - -:: - - nodetool repair [options] - -Or even on specific tables: - -:: - - nodetool repair [options] - - -The repair command only repairs token ranges on the node being repaired, it doesn't repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you're running repair on, which will cause duplicate work if you run it -on every node. The ``-pr`` flag will only repair the "primary" ranges on a node, so you can repair your entire cluster by running -``nodetool repair -pr`` on each node in a single datacenter. - -The specific frequency of repair that's right for your cluster, of course, depends on several factors. However, if you're -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don't want to run incremental repairs, a full repair every 5 days is a good place -to start. - -At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays. - -Other Options -^^^^^^^^^^^^^ - -``-pr, --partitioner-range`` - Restricts repair to the 'primary' token ranges of the node being repaired. A primary range is just a token range for - which a node is the first replica in the ring. - -``-prv, --preview`` - Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints - the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, - add the ``--full`` flag to estimate a full repair. - -``-vd, --validate`` - Verifies that the repaired data is the same across all nodes. Similiar to ``--preview``, this builds and compares merkle - trees of repaired data, but doesn't do any streaming. This is useful for troubleshooting. If this shows that the repaired - data is out of sync, a full repair should be run. - -.. seealso:: - :ref:`nodetool repair docs ` - -Full Repair Example -^^^^^^^^^^^^^^^^^^^^ -Full repair is typically needed to redistribute data after increasing the replication factor of a keyspace or after adding a node to the cluster. Full repair involves streaming SSTables. To demonstrate full repair start with a three node cluster. - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool status - Datacenter: us-east-1 - ===================== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns Host ID Rack - UN 10.0.1.115 547 KiB 256 ? b64cb32a-b32a-46b4-9eeb-e123fa8fc287 us-east-1b - UN 10.0.3.206 617.91 KiB 256 ? 74863177-684b-45f4-99f7-d1006625dc9e us-east-1d - UN 10.0.2.238 670.26 KiB 256 ? 4dcdadd2-41f9-4f34-9892-1f20868b27c7 us-east-1c - -Create a keyspace with replication factor 3: - -:: - - cqlsh> DROP KEYSPACE cqlkeyspace; - cqlsh> CREATE KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; - -Add a table to the keyspace: - -:: - - cqlsh> use cqlkeyspace; - cqlsh:cqlkeyspace> CREATE TABLE t ( - ... id int, - ... k int, - ... v text, - ... PRIMARY KEY (id) - ... ); - -Add table data: - -:: - - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1'); - cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (2, 2, 'val2'); - -A query lists the data added: - -:: - - cqlsh:cqlkeyspace> SELECT * FROM t; - - id | k | v - ----+---+------ - 1 | 1 | val1 - 0 | 0 | val0 - 2 | 2 | val2 - (3 rows) - -Make the following changes to a three node cluster: - -1. Increase the replication factor from 3 to 4. -2. Add a 4th node to the cluster - -When the replication factor is increased the following message gets output indicating that a full repair is needed as per (`CASSANDRA-13079 -`_): - -:: - - cqlsh:cqlkeyspace> ALTER KEYSPACE CQLKeyspace - ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4}; - Warnings : - When increasing replication factor you need to run a full (-full) repair to distribute the - data. - -Perform a full repair on the keyspace ``cqlkeyspace`` table ``t`` with following command: - -:: - - nodetool repair -full cqlkeyspace t - -Full repair completes in about a second as indicated by the output: - -:: - -[ec2-user@ip-10-0-2-238 ~]$ nodetool repair -full cqlkeyspace t -[2019-08-17 03:06:21,445] Starting repair command #1 (fd576da0-c09b-11e9-b00c-1520e8c38f00), repairing keyspace cqlkeyspace with repair options (parallelism: parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [t], dataCenters: [], hosts: [], previewKind: NONE, # of ranges: 1024, pull repair: false, force repair: false, optimise streams: false) -[2019-08-17 03:06:23,059] Repair session fd8e5c20-c09b-11e9-b00c-1520e8c38f00 for range [(-8792657144775336505,-8786320730900698730], (-5454146041421260303,-5439402053041523135], (4288357893651763201,4324309707046452322], ... , (4350676211955643098,4351706629422088296]] finished (progress: 0%) -[2019-08-17 03:06:23,077] Repair completed successfully -[2019-08-17 03:06:23,077] Repair command #1 finished in 1 second -[ec2-user@ip-10-0-2-238 ~]$ - -The ``nodetool tpstats`` command should list a repair having been completed as ``Repair-Task`` > ``Completed`` column value of 1: - -:: - - [ec2-user@ip-10-0-2-238 ~]$ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 0 0 99 0 0 - … - Repair-Task 0 0 1 0 0 - RequestResponseStage 0 0 2078 0 0 diff --git a/src/doc/4.0-beta1/_sources/operating/security.rst.txt b/src/doc/4.0-beta1/_sources/operating/security.rst.txt deleted file mode 100644 index 12f2d24c2..000000000 --- a/src/doc/4.0-beta1/_sources/operating/security.rst.txt +++ /dev/null @@ -1,441 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -Security --------- -There are three main components to the security features provided by Cassandra: - -- TLS/SSL encryption for client and inter-node communication -- Client authentication -- Authorization - -By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still: - -- Craft internode messages to insert users into authentication schema -- Craft internode messages to truncate or drop schema -- Use tools such as ``sstableloader`` to overwrite ``system_auth`` tables -- Attach to the cluster directly to capture write traffic - -Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra's -security features is crucial to configuring your cluster to meet your security needs. - - -TLS/SSL Encryption -^^^^^^^^^^^^^^^^^^ -Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently. - -In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in ``cassandra.yaml``, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated. - -FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See `the java document on FIPS `__ -for more details. - -For information on generating the keystore and truststore files used in SSL communications, see the -`java documentation on creating keystores `__ - -SSL Certificate Hot Reloading -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes. - -Certificate Hot reloading may also be triggered using the ``nodetool reloadssl`` command. Use this if you want to Cassandra to -immediately notice the changed certificates. - -Inter-node Encryption -~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing inter-node encryption are found in ``cassandra.yaml`` in the ``server_encryption_options`` -section. To enable inter-node encryption, change the ``internode_encryption`` setting from its default value of ``none`` -to one value from: ``rack``, ``dc`` or ``all``. - -Client to Node Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The settings for managing client to node encryption are found in ``cassandra.yaml`` in the ``client_encryption_options`` -section. There are two primary toggles here for enabling encryption, ``enabled`` and ``optional``. - -- If neither is set to ``true``, client connections are entirely unencrypted. -- If ``enabled`` is set to ``true`` and ``optional`` is set to ``false``, all client connections must be secured. -- If both options are set to ``true``, both encrypted and unencrypted connections are supported using the same port. - Client connections using encryption with this configuration will be automatically detected and handled by the server. - -As an alternative to the ``optional`` setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set ``optional`` to false and use the ``native_transport_port_ssl`` -setting in ``cassandra.yaml`` to specify the port to be used for secure client communication. - -.. _operation-roles: - -Roles -^^^^^ - -Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -``role_manager`` setting in ``cassandra.yaml``. The default setting uses ``CassandraRoleManager``, an implementation -which stores role information in the tables of the ``system_auth`` keyspace. - -See also the :ref:`CQL documentation on roles `. - -Authentication -^^^^^^^^^^^^^^ - -Authentication is pluggable in Cassandra and is configured using the ``authenticator`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthenticator`` which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra's permissions subsystem, so if authentication is disabled, effectively so are permissions. - -The default distribution also includes ``PasswordAuthenticator``, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication. - -.. _password-authentication: - -Enabling Password Authentication -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster. - -Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps: - -1. Open a ``cqlsh`` session and change the replication factor of the ``system_auth`` keyspace. By default, this keyspace - uses ``SimpleReplicationStrategy`` and a ``replication_factor`` of 1. It is recommended to change this for any - non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to - configure a replication factor of 3 to 5 per-DC. - -:: - - ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3}; - -2. Edit ``cassandra.yaml`` to change the ``authenticator`` option like so: - -:: - - authenticator: PasswordAuthenticator - -3. Restart the node. - -4. Open a new ``cqlsh`` session using the credentials of the default superuser: - -:: - - cqlsh -u cassandra -p cassandra - -5. During login, the credentials for the default superuser are read with a consistency level of ``QUORUM``, whereas - those for all other users (including superusers) are read at ``LOCAL_ONE``. In the interests of performance and - availability, as well as security, operators should create another superuser and disable the default one. This step - is optional, but highly recommended. While logged in as the default superuser, create another superuser role which - can be used to bootstrap further configuration. - -:: - - # create a new superuser - CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super'; - -6. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser. - -:: - - ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false; - -7. Finally, set up the roles and credentials for your application users with :ref:`CREATE ROLE ` - statements. - -At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster. - -Note that using ``PasswordAuthenticator`` also requires the use of :ref:`CassandraRoleManager `. - -See also: :ref:`setting-credentials-for-internal-authentication`, :ref:`CREATE ROLE `, -:ref:`ALTER ROLE `, :ref:`ALTER KEYSPACE ` and :ref:`GRANT PERMISSION -`, - -.. _authorization: - -Authorization -^^^^^^^^^^^^^ - -Authorization is pluggable in Cassandra and is configured using the ``authorizer`` setting in ``cassandra.yaml``. -Cassandra ships with two options included in the default distribution. - -By default, Cassandra is configured with ``AllowAllAuthorizer`` which performs no checking and so effectively grants all -permissions to all roles. This must be used if ``AllowAllAuthenticator`` is the configured authenticator. - -The default distribution also includes ``CassandraAuthorizer``, which does implement full permissions management -functionality and stores its data in Cassandra system tables. - -Enabling Internal Authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests. - -The following assumes that authentication has already been enabled via the process outlined in -:ref:`password-authentication`. Perform these steps to enable internal authorization across the cluster: - -1. On the selected node, edit ``cassandra.yaml`` to change the ``authorizer`` option like so: - -:: - - authorizer: CassandraAuthorizer - -2. Restart the node. - -3. Open a new ``cqlsh`` session using the credentials of a role with superuser credentials: - -:: - - cqlsh -u dba -p super - -4. Configure the appropriate access privileges for your clients using `GRANT PERMISSION `_ - statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so - disruption to clients is avoided. - -:: - - GRANT SELECT ON ks.t1 TO db_user; - -5. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node - restarts and clients reconnect, the enforcement of the granted permissions will begin. - -See also: :ref:`GRANT PERMISSION `, `GRANT ALL ` and :ref:`REVOKE PERMISSION -` - -.. _auth-caching: - -Caching -^^^^^^^ - -Enabling authentication and authorization places additional load on the cluster by frequently reading from the -``system_auth`` tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from ``cassandra.yaml`` -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from ``cassandra.yaml`` when the node is restarted. - -Each cache has 3 options which can be set: - -Validity Period - Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache. -Refresh Rate - Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these - async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a - shorter time than the validity period. -Max Entries - Controls the upper bound on cache size. - -The naming for these options in ``cassandra.yaml`` follows the convention: - -* ``_validity_in_ms`` -* ``_update_interval_in_ms`` -* ``_cache_max_entries`` - -Where ```` is one of ``credentials``, ``permissions``, or ``roles``. - -As mentioned, these are also exposed via JMX in the mbeans under the ``org.apache.cassandra.auth`` domain. - -JMX access -^^^^^^^^^^ - -Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra's own auth subsystem. - -The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to change the ``LOCAL_JMX`` setting to ``no``. Under the -standard configuration, when remote JMX connections are enabled, :ref:`standard JMX authentication ` -is also switched on. - -Note that by default, local-only connections are not subject to authentication, but this can be enabled. - -If enabling remote connections, it is recommended to also use :ref:`SSL ` connections. - -Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as :ref:`nodetool `, are -correctly configured and working as expected. - -.. _standard-jmx-auth: - -Standard JMX Auth -~~~~~~~~~~~~~~~~~ - -Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -``cassandra-env.sh`` by the line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -Edit the password file to add username/password pairs: - -:: - - jmx_user jmx_password - -Secure the credentials file so that only the user running the Cassandra process can read it : - -:: - - $ chown cassandra:cassandra /etc/cassandra/jmxremote.password - $ chmod 400 /etc/cassandra/jmxremote.password - -Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in ``cassandra-env.sh``: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -Then edit the access file to grant your JMX user readwrite permission: - -:: - - jmx_user readwrite - -Cassandra must be restarted to pick up the new settings. - -See also : `Using File-Based Password Authentication In JMX -`__ - - -Cassandra Integrated Auth -~~~~~~~~~~~~~~~~~~~~~~~~~ - -An alternative to the out-of-the-box JMX auth is to useeCassandra's own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until `after` a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete. - -With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just ``cqlsh``. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via :ref:`GRANT PERMISSION `. - -To enable integrated authentication, edit ``cassandra-env.sh`` to uncomment these lines: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin" - #JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config" - -And disable the JMX standard auth by commenting this line: - -:: - - JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" - -To enable integrated authorization, uncomment this line: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy" - -Check standard access control is off by ensuring this line is commented out: - -:: - - #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access" - -With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as: - -:: - - CREATE ROLE jmx WITH LOGIN = false; - GRANT SELECT ON ALL MBEANS TO jmx; - GRANT DESCRIBE ON ALL MBEANS TO jmx; - GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx; - GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx; - - # Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx; - - # Grant the jmx role to one with login permissions so that it can access the JMX tooling - CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false; - GRANT jmx TO ks_user; - -Fine grained access control to individual MBeans is also supported: - -:: - - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user; - GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner; - -This permits the ``ks_user`` role to invoke methods on the MBean representing a single table in ``test_keyspace``, while -granting the same permission for all table level MBeans in that keyspace to the ``ks_owner`` role. - -Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered. - -See also: :ref:`Permissions `. - -.. _jmx-with-ssl: - -JMX With SSL -~~~~~~~~~~~~ - -JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in ``cassandra-env.sh`` (or ``cassandra-env.ps1`` on Windows) to uncomment and set the values of these -properties as required: - -``com.sun.management.jmxremote.ssl`` - set to true to enable SSL -``com.sun.management.jmxremote.ssl.need.client.auth`` - set to true to enable validation of client certificates -``com.sun.management.jmxremote.registry.ssl`` - enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub -``com.sun.management.jmxremote.ssl.enabled.protocols`` - by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is - not usually necessary and using the defaults is the preferred option. -``com.sun.management.jmxremote.ssl.enabled.cipher.suites`` - by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that - this is not usually necessary and using the defaults is the preferred option. -``javax.net.ssl.keyStore`` - set the path on the local filesystem of the keystore containing server private keys and public certificates -``javax.net.ssl.keyStorePassword`` - set the password of the keystore file -``javax.net.ssl.trustStore`` - if validation of client certificates is required, use this property to specify the path of the truststore containing - the public certificates of trusted clients -``javax.net.ssl.trustStorePassword`` - set the password of the truststore file - -See also: `Oracle Java7 Docs `__, -`Monitor Java with JMX `__ diff --git a/src/doc/4.0-beta1/_sources/operating/snitch.rst.txt b/src/doc/4.0-beta1/_sources/operating/snitch.rst.txt deleted file mode 100644 index b716e8290..000000000 --- a/src/doc/4.0-beta1/_sources/operating/snitch.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _snitch: - -Snitch ------- - -In cassandra, the snitch has two functions: - -- it teaches Cassandra enough about your network topology to route requests efficiently. -- it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping - machines into "datacenters" and "racks." Cassandra will do its best not to have more than one replica on the same - "rack" (which may not actually be a physical location). - -Dynamic snitching -^^^^^^^^^^^^^^^^^ - -The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on ``cassandra.yaml``: - -- ``dynamic_snitch``: whether the dynamic snitch should be enabled or disabled. -- ``dynamic_snitch_update_interval_in_ms``: controls how often to perform the more expensive part of host score - calculation. -- ``dynamic_snitch_reset_interval_in_ms``: if set greater than zero, this will allow 'pinning' of replicas to hosts - in order to increase cache capacity. -- ``dynamic_snitch_badness_threshold:``: The badness threshold will control how much worse the pinned host has to be - before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a - percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned - host was 20% worse than the fastest. - -Snitch classes -^^^^^^^^^^^^^^ - -The ``endpoint_snitch`` parameter in ``cassandra.yaml`` should be set to the class that implements -``IEndPointSnitch`` which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations: - -GossipingPropertyFileSnitch - This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in - cassandra-rackdc.properties and propagated to other nodes via gossip. If ``cassandra-topology.properties`` exists, - it is used as a fallback, allowing migration from the PropertyFileSnitch. - -SimpleSnitch - Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for - single-datacenter deployments. - -PropertyFileSnitch - Proximity is determined by rack and data center, which are explicitly configured in - ``cassandra-topology.properties``. - -Ec2Snitch - Appropriate for EC2 deployments in a single Region, or in multiple regions with inter-region VPC enabled (available - since the end of 2017, see `AWS announcement `_). - Loads Region and Availability Zone information from the EC2 API. The Region is treated as the datacenter, and the - Availability Zone as the rack. Only private IPs are used, so this will work across multiple regions only if - inter-region VPC is enabled. - -Ec2MultiRegionSnitch - Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the - public IP as well). You will need to open the ``storage_port`` or ``ssl_storage_port`` on the public IP firewall - (For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection). - -RackInferringSnitch - Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each - node's IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an - example of writing a custom Snitch class and is provided in that spirit. diff --git a/src/doc/4.0-beta1/_sources/operating/topo_changes.rst.txt b/src/doc/4.0-beta1/_sources/operating/topo_changes.rst.txt deleted file mode 100644 index 6c8f8ecdf..000000000 --- a/src/doc/4.0-beta1/_sources/operating/topo_changes.rst.txt +++ /dev/null @@ -1,129 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: none - -.. _topology-changes: - -Adding, replacing, moving and removing nodes --------------------------------------------- - -Bootstrap -^^^^^^^^^ - -Adding new nodes is called "bootstrapping". The ``num_tokens`` parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for. - -Token allocation -~~~~~~~~~~~~~~~~ - -With the default token allocation algorithm the new node will pick ``num_tokens`` random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead. - -On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option ``-Dcassandra.allocate_tokens_for_keyspace=``, where -```` is the keyspace from which the algorithm can find the load information to optimize token assignment for. - -Manual token assignment -""""""""""""""""""""""" - -You may specify a comma-separated list of tokens manually with the ``initial_token`` ``cassandra.yaml`` parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens. - -Range streaming -~~~~~~~~~~~~~~~~ - -After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state. - -In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag ``-Dcassandra.consistent.rangemovement=false``. - -Resuming failed/hanged bootstrap -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On 2.2+, if the bootstrap process fails, it's possible to resume bootstrap from the previous saved state by calling -``nodetool bootstrap resume``. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag -``-Dcassandra.reset_bootstrap_progress=true``. - -On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again. - -Manual bootstrapping -~~~~~~~~~~~~~~~~~~~~ - -It's possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -``auto_bootstrap: false``. This may be useful when restoring a node from a backup or creating a new data-center. - -Removing nodes -^^^^^^^^^^^^^^ - -You can take a node out of the cluster with ``nodetool decommission`` to a live node, or ``nodetool removenode`` (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas. - -No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually. - -Moving nodes -^^^^^^^^^^^^ - -When ``num_tokens: 1`` it's possible to move the node position in the ring with ``nodetool move``. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, ``nodetool cleanup`` should be -run to remove any unnecessary data. - -Replacing a dead node -^^^^^^^^^^^^^^^^^^^^^ - -In order to replace a dead node, start cassandra with the JVM startup flag -``-Dcassandra.replace_address_first_boot=``. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in ``nodetool netstats``. - -The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344) - -Once the bootstrapping is complete the node will be marked "UP". - -.. Note:: If any of the following cases apply, you **MUST** run repair to make the replaced node consistent again, since - it missed ongoing writes during/prior to bootstrapping. The *replacement* timeframe refers to the period from when the - node initially dies to when a new node completes the replacement process. - - 1. The node is down for longer than ``max_hint_window_in_ms`` before being replaced. - 2. You are replacing using the same IP address as the dead node **and** replacement takes longer than ``max_hint_window_in_ms``. - -Monitoring progress -^^^^^^^^^^^^^^^^^^^ - -Bootstrap, replace, move and remove progress can be monitored using ``nodetool netstats`` which will show the progress -of the streaming operations. - -Cleanup data after range movements -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As a safety measure, Cassandra does not automatically remove data from nodes that "lose" part of their token range due -to a range movement operation (bootstrap, move, replace). Run ``nodetool cleanup`` on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node. diff --git a/src/doc/4.0-beta1/_sources/plugins/index.rst.txt b/src/doc/4.0-beta1/_sources/plugins/index.rst.txt deleted file mode 100644 index 4073a92cb..000000000 --- a/src/doc/4.0-beta1/_sources/plugins/index.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Third-Party Plugins -=================== - -Available third-party plugins for Apache Cassandra - -CAPI-Rowcache -------------- - -The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments. - -The official page for the `CAPI-Rowcache plugin `__ contains further details how to build/run/download the plugin. - - -Stratio’s Cassandra Lucene Index --------------------------------- - -Stratio’s Lucene index is a Cassandra secondary index implementation based on `Apache Lucene `__. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or `Apache Solr `__, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed. - -The official Github repository `Cassandra Lucene Index `__ contains everything you need to build/run/configure the plugin. \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/tools/cassandra_stress.rst.txt b/src/doc/4.0-beta1/_sources/tools/cassandra_stress.rst.txt deleted file mode 100644 index bcac54ec1..000000000 --- a/src/doc/4.0-beta1/_sources/tools/cassandra_stress.rst.txt +++ /dev/null @@ -1,269 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. highlight:: yaml - -.. _cassandra_stress: - -Cassandra Stress ----------------- - -cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model. - -This documentation focuses on user mode as this allows the testing of your -actual schema. - -Usage -^^^^^ -There are several operation types: - - * write-only, read-only, and mixed workloads of standard data - * write-only and read-only workloads for counter columns - * user configured workloads, running custom queries on custom schemas - -The syntax is `cassandra-stress [options]`. If you want more information on a given command -or options, just run `cassandra-stress help `. - -Commands: - read: - Multiple concurrent reads - the cluster must first be populated by a write test - write: - Multiple concurrent writes against the cluster - mixed: - Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test - counter_write: - Multiple concurrent updates of counters. - counter_read: - Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. - user: - Interleaving of user provided queries, with configurable ratio and distribution. - help: - Print help for a command or option - print: - Inspect the output of a distribution definition - legacy: - Legacy support mode - -Primary Options: - -pop: - Population distribution and intra-partition visit order - -insert: - Insert specific options relating to various methods for batching and splitting partition updates - -col: - Column details such as size and count distribution, data generator, names, comparator and if super columns should be used - -rate: - Thread count, rate limit or automatic mode (default is auto) - -mode: - Thrift or CQL with options - -errors: - How to handle errors when encountered during stress - -sample: - Specify the number of samples to collect for measuring latency - -schema: - Replication settings, compression, compaction, etc. - -node: - Nodes to connect to - -log: - Where to log progress to, and the interval at which to do it - -transport: - Custom transport factories - -port: - The port to connect to cassandra nodes on - -sendto: - Specify a stress server to send this command to - -graph: - Graph recorded metrics - -tokenrange: - Token range settings - - -Suboptions: - Every command and primary option has its own collection of suboptions. These are too numerous to list here. - For information on the suboptions for each command or option, please use the help command, - `cassandra-stress help `. - -User mode -^^^^^^^^^ - -User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn't scale. - -Profile -+++++++ - -User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname. - -An identifier for the profile:: - - specname: staff_activities - -The keyspace for the test:: - - keyspace: staff - -CQL for the keyspace. Optional if the keyspace already exists:: - - keyspace_definition: | - CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}; - -The table to be stressed:: - - table: staff_activities - -CQL for the table. Optional if the table already exists:: - - table_definition: | - CREATE TABLE staff_activities ( - name text, - when timeuuid, - what text, - PRIMARY KEY(name, when, what) - ) - - -Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:: - - columnspec: - - name: name - size: uniform(5..10) # The names of the staff members are between 5-10 characters - population: uniform(1..10) # 10 possible staff members to pick from - - name: when - cluster: uniform(20..500) # Staff members do between 20 and 500 events - - name: what - size: normal(10..100,50) - -Supported types are: - -An exponential distribution over the range [min..max]:: - - EXP(min..max) - -An extreme value (Weibull) distribution over the range [min..max]:: - - EXTREME(min..max,shape) - -A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:: - - GAUSSIAN(min..max,stdvrng) - -A gaussian/normal distribution, with explicitly defined mean and stdev:: - - GAUSSIAN(min..max,mean,stdev) - -A uniform distribution over the range [min, max]:: - - UNIFORM(min..max) - -A fixed distribution, always returning the same value:: - - FIXED(val) - -If preceded by ~, the distribution is inverted - -Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) - -Insert distributions:: - - insert: - # How many partition to insert per batch - partitions: fixed(1) - # How many rows to update per partition - select: fixed(1)/500 - # UNLOGGED or LOGGED batch for insert - batchtype: UNLOGGED - - -Currently all inserts are done inside batches. - -Read statements to use during the test:: - - queries: - events: - cql: select * from staff_activities where name = ? - fields: samerow - latest_event: - cql: select * from staff_activities where name = ? LIMIT 1 - fields: samerow - -Running a user mode test:: - - cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once - -This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test. - -The full example can be found here :download:`yaml <./stress-example.yaml>` - -Running a user mode test with multiple yaml files:: - cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m "ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)" truncate=once - -This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table - although care must be taken that the table definition is identical (data generation specs can be different). - -Lightweight transaction support -+++++++++++++++++++++++++++++++ - -cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s). - -Lightweight transaction update query:: - - queries: - regularupdate: - cql: update blogposts set author = ? where domain = ? and published_date = ? - fields: samerow - updatewithlwt: - cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ? - fields: samerow - -The full example can be found here :download:`yaml <./stress-lwt-example.yaml>` - -Graphing -^^^^^^^^ - -Graphs can be generated for each run of stress. - -.. image:: example-stress-graph.png - -To create a new graph:: - - cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" - -To add a new run to an existing graph point to an existing file and add a revision name:: - - cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run" - -FAQ -^^^^ - -**How do you use NetworkTopologyStrategy for the keyspace?** - -Use the schema option making sure to either escape the parenthesis or enclose in quotes:: - - cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)" - -**How do you use SSL?** - -Use the transport option:: - - cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra" \ No newline at end of file diff --git a/src/doc/4.0-beta1/_sources/tools/cqlsh.rst.txt b/src/doc/4.0-beta1/_sources/tools/cqlsh.rst.txt deleted file mode 100644 index b800b88f4..000000000 --- a/src/doc/4.0-beta1/_sources/tools/cqlsh.rst.txt +++ /dev/null @@ -1,458 +0,0 @@ -.. highlight:: none - -.. _cqlsh: - -cqlsh: the CQL shell --------------------- - -cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line. - - -Compatibility -^^^^^^^^^^^^^ - -cqlsh is compatible with Python 2.7. - -In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported. - - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh. - -pytz -~~~~ - -By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the `pytz `__ library must be installed. See the ``timezone`` option in cqlshrc_ for -specifying a timezone to use. - -cython -~~~~~~ - -The performance of cqlsh's ``COPY`` operations can be improved by installing `cython `__. This will -compile the python modules that are central to the performance of ``COPY``. - -cqlshrc -^^^^^^^ - -The ``cqlshrc`` file holds configuration options for cqlsh. By default this is in the user's home directory at -``~/.cassandra/cqlsh``, but a custom location can be specified with the ``--cqlshrc`` option. - -Example config values and documentation can be found in the ``conf/cqlshrc.sample`` file of a tarball installation. You -can also view the latest version of `cqlshrc online `__. - - -Command Line Options -^^^^^^^^^^^^^^^^^^^^ - -Usage: - -``cqlsh [options] [host [port]]`` - -Options: - -``-C`` ``--color`` - Force color output - -``--no-color`` - Disable color output - -``--browser`` - Specify the browser to use for displaying cqlsh help. This can be one of the `supported browser names - `__ (e.g. ``firefox``) or a browser path followed by ``%s`` (e.g. - ``/usr/bin/google-chrome-stable %s``). - -``--ssl`` - Use SSL when connecting to Cassandra - -``-u`` ``--user`` - Username to authenticate against Cassandra with - -``-p`` ``--password`` - Password to authenticate against Cassandra with, should - be used in conjunction with ``--user`` - -``-k`` ``--keyspace`` - Keyspace to authenticate to, should be used in conjunction - with ``--user`` - -``-f`` ``--file`` - Execute commands from the given file, then exit - -``--debug`` - Print additional debugging information - -``--encoding`` - Specify a non-default encoding for output (defaults to UTF-8) - -``--cqlshrc`` - Specify a non-default location for the ``cqlshrc`` file - -``-e`` ``--execute`` - Execute the given statement, then exit - -``--connect-timeout`` - Specify the connection timeout in seconds (defaults to 2s) - -``--python /path/to/python`` - Specify the full path to Python interpreter to override default on systems with multiple interpreters installed - -``--request-timeout`` - Specify the request timeout in seconds (defaults to 10s) - -``-t`` ``--tty`` - Force tty mode (command prompt) - - -Special Commands -^^^^^^^^^^^^^^^^ - -In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below. - -``CONSISTENCY`` -~~~~~~~~~~~~~~~ - -`Usage`: ``CONSISTENCY `` - -Sets the consistency level for operations to follow. Valid arguments include: - -- ``ANY`` -- ``ONE`` -- ``TWO`` -- ``THREE`` -- ``QUORUM`` -- ``ALL`` -- ``LOCAL_QUORUM`` -- ``LOCAL_ONE`` -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -``SERIAL CONSISTENCY`` -~~~~~~~~~~~~~~~~~~~~~~ - -`Usage`: ``SERIAL CONSISTENCY `` - -Sets the serial consistency level for operations to follow. Valid arguments include: - -- ``SERIAL`` -- ``LOCAL_SERIAL`` - -The serial consistency level is only used by conditional updates (``INSERT``, ``UPDATE`` and ``DELETE`` with an ``IF`` -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of ``QUORUM`` (and -is successful), then a ``QUORUM`` read is guaranteed to see that write. But if the regular consistency level of that -write is ``ANY``, then only a read with a consistency level of ``SERIAL`` is guaranteed to see it (even a read with -consistency ``ALL`` is not guaranteed to be enough). - -``SHOW VERSION`` -~~~~~~~~~~~~~~~~ -Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:: - - cqlsh> SHOW VERSION - [cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4] - -``SHOW HOST`` -~~~~~~~~~~~~~ - -Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:: - - cqlsh> SHOW HOST - Connected to Prod_Cluster at 192.0.0.1:9042. - -``SHOW SESSION`` -~~~~~~~~~~~~~~~~ - -Pretty prints a specific tracing session. - -`Usage`: ``SHOW SESSION `` - -Example usage:: - - cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8 - - Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8 - - activity | timestamp | source | source_elapsed | client - -----------------------------------------------------------+----------------------------+-----------+----------------+----------- - Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 | 0 | 127.0.0.1 - Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 | 3843 | 127.0.0.1 - ... - - -``SOURCE`` -~~~~~~~~~~ - -Reads the contents of a file and executes each line as a CQL statement or special cqlsh command. - -`Usage`: ``SOURCE `` - -Example usage:: - - cqlsh> SOURCE '/home/thobbs/commands.cql' - -``CAPTURE`` -~~~~~~~~~~~ - -Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured. - -`Usage`:: - - CAPTURE ''; - CAPTURE OFF; - CAPTURE; - -That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation (``'~/mydir'``) is supported for referring to ``$HOME``. - -Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session. - -To stop capturing output and show it in the cqlsh session again, use ``CAPTURE OFF``. - -To inspect the current capture configuration, use ``CAPTURE`` with no arguments. - -``HELP`` -~~~~~~~~ - -Gives information about cqlsh commands. To see available topics, enter ``HELP`` without any arguments. To see help on a -topic, use ``HELP ``. Also see the ``--browser`` argument for controlling what browser is used to display help. - -``TRACING`` -~~~~~~~~~~~ - -Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed. - -`Usage`:: - - TRACING ON - TRACING OFF - -``PAGING`` -~~~~~~~~~~ - -Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it's a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once. - -`Usage`:: - - PAGING ON - PAGING OFF - PAGING - -``EXPAND`` -~~~~~~~~~~ - -Enables or disables vertical printing of rows. Enabling ``EXPAND`` is useful when many columns are fetched, or the -contents of a single column are large. - -`Usage`:: - - EXPAND ON - EXPAND OFF - -``LOGIN`` -~~~~~~~~~ - -Authenticate as a specified Cassandra user for the current session. - -`Usage`:: - - LOGIN [] - -``EXIT`` -~~~~~~~~~ - -Ends the current session and terminates the cqlsh process. - -`Usage`:: - - EXIT - QUIT - -``CLEAR`` -~~~~~~~~~ - -Clears the console. - -`Usage`:: - - CLEAR - CLS - -``DESCRIBE`` -~~~~~~~~~~~~ - -Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema. - -`Usage`:: - - DESCRIBE CLUSTER - DESCRIBE SCHEMA - DESCRIBE KEYSPACES - DESCRIBE KEYSPACE - DESCRIBE TABLES - DESCRIBE TABLE
- DESCRIBE INDEX - DESCRIBE MATERIALIZED VIEW - DESCRIBE TYPES - DESCRIBE TYPE - DESCRIBE FUNCTIONS - DESCRIBE FUNCTION - DESCRIBE AGGREGATES - DESCRIBE AGGREGATE - -In any of the commands, ``DESC`` may be used in place of ``DESCRIBE``. - -The ``DESCRIBE CLUSTER`` command prints the cluster name and partitioner:: - - cqlsh> DESCRIBE CLUSTER - - Cluster: Test Cluster - Partitioner: Murmur3Partitioner - -The ``DESCRIBE SCHEMA`` command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup. - -``COPY TO`` -~~~~~~~~~~~ - -Copies data from a table to a CSV file. - -`Usage`:: - - COPY
[(, ...)] TO WITH [AND ...] - -If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name. - - -The ```` should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value ``STDOUT`` (without single quotes) to print the CSV to stdout. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``MAXREQUESTS`` - The maximum number token ranges to fetch simultaneously. Defaults to 6. - -``PAGESIZE`` - The number of rows to fetch in a single page. Defaults to 1000. - -``PAGETIMEOUT`` - By default the page timeout is 10 seconds per 1000 entries - in the page size or 10 seconds if pagesize is smaller. - -``BEGINTOKEN``, ``ENDTOKEN`` - Token range to export. Defaults to exporting the full ring. - -``MAXOUTPUTSIZE`` - The maximum size of the output file measured in number of lines; - beyond this maximum the output file will be split into segments. - -1 means unlimited, and is the default. - -``ENCODING`` - The encoding used for characters. Defaults to ``utf8``. - -``COPY FROM`` -~~~~~~~~~~~~~ -Copies data from a CSV file to table. - -`Usage`:: - - COPY
[(, ...)] FROM WITH [AND ...] - -If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name. - -The ```` should be a string literal (with single quotes) representing a path to the -source file. This can also the special value ``STDIN`` (without single quotes) to read the -CSV data from stdin. - -See :ref:`shared-copy-options` for options that apply to both ``COPY TO`` and ``COPY FROM``. - -Options for ``COPY TO`` -``````````````````````` - -``INGESTRATE`` - The maximum number of rows to process per second. Defaults to 100000. - -``MAXROWS`` - The maximum number of rows to import. -1 means unlimited, and is the default. - -``SKIPROWS`` - A number of initial rows to skip. Defaults to 0. - -``SKIPCOLS`` - A comma-separated list of column names to ignore. By default, no columns are skipped. - -``MAXPARSEERRORS`` - The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default. - -``MAXINSERTERRORS`` - The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000. - -``ERRFILE`` = - A file to store all rows that could not be imported, by default this is ``import__
.err`` where ```` is - your keyspace and ``
`` is your table name. - -``MAXBATCHSIZE`` - The max number of rows inserted in a single batch. Defaults to 20. - -``MINBATCHSIZE`` - The min number of rows inserted in a single batch. Defaults to 2. - -``CHUNKSIZE`` - The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000. - -.. _shared-copy-options: - -Shared COPY Options -``````````````````` - -Options that are common to both ``COPY TO`` and ``COPY FROM``. - -``NULLVAL`` - The string placeholder for null values. Defaults to ``null``. - -``HEADER`` - For ``COPY TO``, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, - specifies whether the first line in the CSV input file contains column names. Defaults to ``false``. - -``DECIMALSEP`` - The character that is used as the decimal point separator. Defaults to ``.``. - -``THOUSANDSSEP`` - The character that is used to separate thousands. Defaults to the empty string. - -``BOOLSTYlE`` - The string literal format for boolean values. Defaults to ``True,False``. - -``NUMPROCESSES`` - The number of child worker processes to create for ``COPY`` tasks. Defaults to a max of 4 for ``COPY FROM`` and 16 - for ``COPY TO``. However, at most (num_cores - 1) processes will be created. - -``MAXATTEMPTS`` - The maximum number of failed attempts to fetch a range of data (when using ``COPY TO``) or insert a chunk of data - (when using ``COPY FROM``) before giving up. Defaults to 5. - -``REPORTFREQUENCY`` - How often status updates are refreshed, in seconds. Defaults to 0.25. - -``RATEFILE`` - An optional file to output rate statistics to. By default, statistics are not output to a file. diff --git a/src/doc/4.0-beta1/_sources/tools/index.rst.txt b/src/doc/4.0-beta1/_sources/tools/index.rst.txt deleted file mode 100644 index d28929c84..000000000 --- a/src/doc/4.0-beta1/_sources/tools/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Cassandra Tools -=============== - -This section describes the command line tools provided with Apache Cassandra. - -.. toctree:: - :maxdepth: 3 - - cqlsh - nodetool/nodetool - sstable/index - cassandra_stress diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/assassinate.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/assassinate.rst.txt deleted file mode 100644 index 0ec5dc8f4..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/assassinate.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_assassinate: - -assassinate ------------ - -Usage ---------- - -.. include:: assassinate.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/bootstrap.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/bootstrap.rst.txt deleted file mode 100644 index e280fee01..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/bootstrap.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_bootstrap: - -bootstrap ---------- - -Usage ---------- - -.. include:: bootstrap.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/cleanup.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/cleanup.rst.txt deleted file mode 100644 index 70ba8f9f7..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/cleanup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_cleanup: - -cleanup -------- - -Usage ---------- - -.. include:: cleanup.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/clearsnapshot.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/clearsnapshot.rst.txt deleted file mode 100644 index 878147511..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/clearsnapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clearsnapshot: - -clearsnapshot -------------- - -Usage ---------- - -.. include:: clearsnapshot.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/clientstats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/clientstats.rst.txt deleted file mode 100644 index 7f5e47ab7..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/clientstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_clientstats: - -clientstats ------------ - -Usage ---------- - -.. include:: clientstats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/compact.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/compact.rst.txt deleted file mode 100644 index a26347c57..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/compact.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compact: - -compact -------- - -Usage ---------- - -.. include:: compact.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/compactionhistory.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/compactionhistory.rst.txt deleted file mode 100644 index 896433e86..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/compactionhistory.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionhistory: - -compactionhistory ------------------ - -Usage ---------- - -.. include:: compactionhistory.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/compactionstats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/compactionstats.rst.txt deleted file mode 100644 index 612822535..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/compactionstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_compactionstats: - -compactionstats ---------------- - -Usage ---------- - -.. include:: compactionstats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/decommission.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/decommission.rst.txt deleted file mode 100644 index 8b00ff40c..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/decommission.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_decommission: - -decommission ------------- - -Usage ---------- - -.. include:: decommission.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/describecluster.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/describecluster.rst.txt deleted file mode 100644 index 55df135de..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/describecluster.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describecluster: - -describecluster ---------------- - -Usage ---------- - -.. include:: describecluster.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/describering.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/describering.rst.txt deleted file mode 100644 index 3a964e8ee..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/describering.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_describering: - -describering ------------- - -Usage ---------- - -.. include:: describering.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disableauditlog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disableauditlog.rst.txt deleted file mode 100644 index 4b20b0a9b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableauditlog: - -disableauditlog ---------------- - -Usage ---------- - -.. include:: disableauditlog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disableautocompaction.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disableautocompaction.rst.txt deleted file mode 100644 index 16549f202..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableautocompaction: - -disableautocompaction ---------------------- - -Usage ---------- - -.. include:: disableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablebackup.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablebackup.rst.txt deleted file mode 100644 index c9537e011..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebackup: - -disablebackup -------------- - -Usage ---------- - -.. include:: disablebackup.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablebinary.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablebinary.rst.txt deleted file mode 100644 index 86e981f6e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablebinary: - -disablebinary -------------- - -Usage ---------- - -.. include:: disablebinary.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablefullquerylog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablefullquerylog.rst.txt deleted file mode 100644 index d68cd492e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablefullquerylog: - -disablefullquerylog -------------------- - -Usage ---------- - -.. include:: disablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablegossip.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablegossip.rst.txt deleted file mode 100644 index 76f3d064b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablegossip: - -disablegossip -------------- - -Usage ---------- - -.. include:: disablegossip.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablehandoff.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablehandoff.rst.txt deleted file mode 100644 index 35f11334b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehandoff: - -disablehandoff --------------- - -Usage ---------- - -.. include:: disablehandoff.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disablehintsfordc.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disablehintsfordc.rst.txt deleted file mode 100644 index 892c31ba5..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disablehintsfordc: - -disablehintsfordc ------------------ - -Usage ---------- - -.. include:: disablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt deleted file mode 100644 index 09d8a150b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/disableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_disableoldprotocolversions: - -disableoldprotocolversions --------------------------- - -Usage ---------- - -.. include:: disableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/drain.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/drain.rst.txt deleted file mode 100644 index 03093ac7c..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/drain.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_drain: - -drain ------ - -Usage ---------- - -.. include:: drain.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enableauditlog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enableauditlog.rst.txt deleted file mode 100644 index 7936c7a86..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enableauditlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableauditlog: - -enableauditlog --------------- - -Usage ---------- - -.. include:: enableauditlog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enableautocompaction.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enableautocompaction.rst.txt deleted file mode 100644 index 2ddf60f8e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enableautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableautocompaction: - -enableautocompaction --------------------- - -Usage ---------- - -.. include:: enableautocompaction.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablebackup.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablebackup.rst.txt deleted file mode 100644 index 9a06c6669..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablebackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebackup: - -enablebackup ------------- - -Usage ---------- - -.. include:: enablebackup.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablebinary.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablebinary.rst.txt deleted file mode 100644 index 5b1c6da72..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablebinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablebinary: - -enablebinary ------------- - -Usage ---------- - -.. include:: enablebinary.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablefullquerylog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablefullquerylog.rst.txt deleted file mode 100644 index ec871c283..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablefullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablefullquerylog: - -enablefullquerylog ------------------- - -Usage ---------- - -.. include:: enablefullquerylog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablegossip.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablegossip.rst.txt deleted file mode 100644 index ae66186ca..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablegossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablegossip: - -enablegossip ------------- - -Usage ---------- - -.. include:: enablegossip.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablehandoff.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablehandoff.rst.txt deleted file mode 100644 index d764c3a9a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehandoff: - -enablehandoff -------------- - -Usage ---------- - -.. include:: enablehandoff.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enablehintsfordc.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enablehintsfordc.rst.txt deleted file mode 100644 index 6c42087c3..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enablehintsfordc.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enablehintsfordc: - -enablehintsfordc ----------------- - -Usage ---------- - -.. include:: enablehintsfordc.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt deleted file mode 100644 index e44dc377a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/enableoldprotocolversions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_enableoldprotocolversions: - -enableoldprotocolversions -------------------------- - -Usage ---------- - -.. include:: enableoldprotocolversions.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/failuredetector.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/failuredetector.rst.txt deleted file mode 100644 index 25d02b04a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/failuredetector.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_failuredetector: - -failuredetector ---------------- - -Usage ---------- - -.. include:: failuredetector.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/flush.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/flush.rst.txt deleted file mode 100644 index 45e9b2c5e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/flush.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_flush: - -flush ------ - -Usage ---------- - -.. include:: flush.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/garbagecollect.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/garbagecollect.rst.txt deleted file mode 100644 index 3af605aad..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/garbagecollect.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_garbagecollect: - -garbagecollect --------------- - -Usage ---------- - -.. include:: garbagecollect.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/gcstats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/gcstats.rst.txt deleted file mode 100644 index 3e4b45930..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/gcstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gcstats: - -gcstats -------- - -Usage ---------- - -.. include:: gcstats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 6f56f7d6d..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getbatchlogreplaythrottle: - -getbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: getbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthreshold.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthreshold.rst.txt deleted file mode 100644 index e17f4b2e4..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthreshold: - -getcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: getcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthroughput.rst.txt deleted file mode 100644 index ef41115ee..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getcompactionthroughput: - -getcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: getcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrency.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrency.rst.txt deleted file mode 100644 index 9b0373480..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrency: - -getconcurrency --------------- - -Usage ---------- - -.. include:: getconcurrency.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt deleted file mode 100644 index 8e137e0ed..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentcompactors: - -getconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: getconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt deleted file mode 100644 index e113d74c5..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getconcurrentviewbuilders: - -getconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: getconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getendpoints.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getendpoints.rst.txt deleted file mode 100644 index ac0d43c7a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getendpoints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getendpoints: - -getendpoints ------------- - -Usage ---------- - -.. include:: getendpoints.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt deleted file mode 100644 index abdd7e7f0..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getinterdcstreamthroughput: - -getinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: getinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getlogginglevels.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getlogginglevels.rst.txt deleted file mode 100644 index 9936e8d45..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getlogginglevels.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getlogginglevels: - -getlogginglevels ----------------- - -Usage ---------- - -.. include:: getlogginglevels.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getmaxhintwindow.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getmaxhintwindow.rst.txt deleted file mode 100644 index 1a0fe017b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getmaxhintwindow: - -getmaxhintwindow ----------------- - -Usage ---------- - -.. include:: getmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getreplicas.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getreplicas.rst.txt deleted file mode 100644 index 342e72b57..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getreplicas.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getreplicas: - -getreplicas ------------ - -Usage ---------- - -.. include:: getreplicas.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getseeds.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getseeds.rst.txt deleted file mode 100644 index e3156300e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getseeds: - -getseeds --------- - -Usage ---------- - -.. include:: getseeds.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getsstables.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getsstables.rst.txt deleted file mode 100644 index 1a866ccec..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getsstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getsstables: - -getsstables ------------ - -Usage ---------- - -.. include:: getsstables.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/getstreamthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/getstreamthroughput.rst.txt deleted file mode 100644 index 6d7dbc422..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/getstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_getstreamthroughput: - -getstreamthroughput -------------------- - -Usage ---------- - -.. include:: getstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/gettimeout.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/gettimeout.rst.txt deleted file mode 100644 index 9d2e9edd8..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/gettimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettimeout: - -gettimeout ----------- - -Usage ---------- - -.. include:: gettimeout.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/gettraceprobability.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/gettraceprobability.rst.txt deleted file mode 100644 index 3f5783fd0..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/gettraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gettraceprobability: - -gettraceprobability -------------------- - -Usage ---------- - -.. include:: gettraceprobability.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/gossipinfo.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/gossipinfo.rst.txt deleted file mode 100644 index cd3513a89..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/gossipinfo.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_gossipinfo: - -gossipinfo ----------- - -Usage ---------- - -.. include:: gossipinfo.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/handoffwindow.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/handoffwindow.rst.txt deleted file mode 100644 index 87fe61dc2..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/handoffwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_handoffwindow: - -handoffwindow -------------- - -Usage ---------- - -.. include:: handoffwindow.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/help.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/help.rst.txt deleted file mode 100644 index 8cab88f74..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/help.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_help: - -help ----- - -Usage ---------- - -.. include:: help.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/import.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/import.rst.txt deleted file mode 100644 index 532ba2dcd..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/import.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_import: - -import ------- - -Usage ---------- - -.. include:: import.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/info.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/info.rst.txt deleted file mode 100644 index 74012e93f..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/info.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_info: - -info ----- - -Usage ---------- - -.. include:: info.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatecountercache.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatecountercache.rst.txt deleted file mode 100644 index ce1a94d1e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatecountercache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatecountercache: - -invalidatecountercache ----------------------- - -Usage ---------- - -.. include:: invalidatecountercache.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatekeycache.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatekeycache.rst.txt deleted file mode 100644 index 3e80511a6..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidatekeycache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidatekeycache: - -invalidatekeycache ------------------- - -Usage ---------- - -.. include:: invalidatekeycache.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidaterowcache.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/invalidaterowcache.rst.txt deleted file mode 100644 index fd84f1d5a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/invalidaterowcache.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_invalidaterowcache: - -invalidaterowcache ------------------- - -Usage ---------- - -.. include:: invalidaterowcache.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/join.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/join.rst.txt deleted file mode 100644 index a2819eb6a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/join.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_join: - -join ----- - -Usage ---------- - -.. include:: join.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/listsnapshots.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/listsnapshots.rst.txt deleted file mode 100644 index d897cfa2b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/listsnapshots.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_listsnapshots: - -listsnapshots -------------- - -Usage ---------- - -.. include:: listsnapshots.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/move.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/move.rst.txt deleted file mode 100644 index 04b3bdba1..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/move.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_move: - -move ----- - -Usage ---------- - -.. include:: move.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/netstats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/netstats.rst.txt deleted file mode 100644 index b94a09e7d..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/netstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_netstats: - -netstats --------- - -Usage ---------- - -.. include:: netstats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/nodetool.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/nodetool.rst.txt deleted file mode 100644 index c20d0ac21..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/nodetool.rst.txt +++ /dev/null @@ -1,256 +0,0 @@ -.. _nodetool - -Nodetool --------- - -Usage ---------- - -usage: nodetool [(-u | --username )] - [(-h | --host )] [(-p | --port )] - [(-pw | --password )] - [(-pwf | --password-file )] - [(-pp | --print-port)] [] - -The most commonly used nodetool commands are: - - :doc:`assassinate` - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode - - :doc:`bootstrap` - Monitor/manage node's bootstrap process - - :doc:`cleanup` - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces - - :doc:`clearsnapshot` - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots - - :doc:`clientstats` - Print information about connected clients - - :doc:`compact` - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables - - :doc:`compactionhistory` - Print history of compaction - - :doc:`compactionstats` - Print statistics on compactions - - :doc:`decommission` - Decommission the *node I am connecting to* - - :doc:`describecluster` - Print the name, snitch, partitioner and schema version of a cluster - - :doc:`describering` - Shows the token ranges info of a given keyspace - - :doc:`disableauditlog` - Disable the audit log - - :doc:`disableautocompaction` - Disable autocompaction for the given keyspace and table - - :doc:`disablebackup` - Disable incremental backup - - :doc:`disablebinary` - Disable native transport (binary protocol) - - :doc:`disablefullquerylog` - Disable the full query log - - :doc:`disablegossip` - Disable gossip (effectively marking the node down) - - :doc:`disablehandoff` - Disable storing hinted handoffs - - :doc:`disablehintsfordc` - Disable hints for a data center - - :doc:`disableoldprotocolversions` - Disable old protocol versions - - :doc:`drain` - Drain the node (stop accepting writes and flush all tables) - - :doc:`enableauditlog` - Enable the audit log - - :doc:`enableautocompaction` - Enable autocompaction for the given keyspace and table - - :doc:`enablebackup` - Enable incremental backup - - :doc:`enablebinary` - Reenable native transport (binary protocol) - - :doc:`enablefullquerylog` - Enable full query logging, defaults for the options are configured in cassandra.yaml - - :doc:`enablegossip` - Reenable gossip - - :doc:`enablehandoff` - Reenable future hints storing on the current node - - :doc:`enablehintsfordc` - Enable hints for a data center that was previsouly disabled - - :doc:`enableoldprotocolversions` - Enable old protocol versions - - :doc:`failuredetector` - Shows the failure detector information for the cluster - - :doc:`flush` - Flush one or more tables - - :doc:`garbagecollect` - Remove deleted data from one or more tables - - :doc:`gcstats` - Print GC Statistics - - :doc:`getbatchlogreplaythrottle` - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster. - - :doc:`getcompactionthreshold` - Print min and max compaction thresholds for a given table - - :doc:`getcompactionthroughput` - Print the MB/s throughput cap for compaction in the system - - :doc:`getconcurrency` - Get maximum concurrency for processing stages - - :doc:`getconcurrentcompactors` - Get the number of concurrent compactors in the system. - - :doc:`getconcurrentviewbuilders` - Get the number of concurrent view builders in the system - - :doc:`getendpoints` - Print the end points that owns the key - - :doc:`getinterdcstreamthroughput` - Print the Mb/s throughput cap for inter-datacenter streaming in the system - - :doc:`getlogginglevels` - Get the runtime logging levels - - :doc:`getmaxhintwindow` - Print the max hint window in ms - - :doc:`getreplicas` - Print replicas for a given key - - :doc:`getseeds` - Get the currently in use seed node IP list excluding the node IP - - :doc:`getsstables` - Print the sstable filenames that own the key - - :doc:`getstreamthroughput` - Print the Mb/s throughput cap for streaming in the system - - :doc:`gettimeout` - Print the timeout of the given type in ms - - :doc:`gettraceprobability` - Print the current trace probability value - - :doc:`gossipinfo` - Shows the gossip information for the cluster - - :doc:`handoffwindow` - Print current hinted handoff window - - :doc:`help` - Display help information - - :doc:`import` - Import new SSTables to the system - - :doc:`info` - Print node information (uptime, load, ...) - - :doc:`invalidatecountercache` - Invalidate the counter cache - - :doc:`invalidatekeycache` - Invalidate the key cache - - :doc:`invalidaterowcache` - Invalidate the row cache - - :doc:`join` - Join the ring - - :doc:`listsnapshots` - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication. - - :doc:`move` - Move node on the token ring to a new token - - :doc:`netstats` - Print network information on provided host (connecting node by default) - - :doc:`pausehandoff` - Pause hints delivery process - - :doc:`profileload` - Low footprint profiling of activity for a period of time - - :doc:`proxyhistograms` - Print statistic histograms for network operations - - :doc:`rangekeysample` - Shows the sampled keys held across all keyspaces - - :doc:`rebuild` - Rebuild data by streaming from other nodes (similarly to bootstrap) - - :doc:`rebuild_index` - A full rebuild of native secondary indexes for a given table - - :doc:`refresh` - Load newly placed SSTables to the system without restart - - :doc:`refreshsizeestimates` - Refresh system.size_estimates - - :doc:`reloadlocalschema` - Reload local node schema from system tables - - :doc:`reloadseeds` - Reload the seed node list from the seed node provider - - :doc:`reloadssl` - Signals Cassandra to reload SSL certificates - - :doc:`reloadtriggers` - Reload trigger classes - - :doc:`relocatesstables` - Relocates sstables to the correct disk - - :doc:`removenode` - Show status of current node removal, force completion of pending removal or remove provided ID - - :doc:`repair` - Repair one or more tables - - :doc:`repair_admin` - - :doc:`list` - and fail incremental repair sessions - - :doc:`replaybatchlog` - Kick off batchlog replay and wait for finish - - :doc:`resetfullquerylog` - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX - - :doc:`resetlocalschema` - Reset node's local schema and resync - - :doc:`resumehandoff` - Resume hints delivery process - - :doc:`ring` - Print information about the token ring - - :doc:`scrub` - Scrub (rebuild sstables for) one or more tables - - :doc:`setbatchlogreplaythrottle` - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster. - - :doc:`setcachecapacity` - Set global key, row, and counter cache capacities (in MB units) - - :doc:`setcachekeystosave` - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable - - :doc:`setcompactionthreshold` - Set min and max compaction thresholds for a given table - - :doc:`setcompactionthroughput` - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling - - :doc:`setconcurrency` - Set maximum concurrency for processing stage - - :doc:`setconcurrentcompactors` - Set number of concurrent compactors in the system. - - :doc:`setconcurrentviewbuilders` - Set the number of concurrent view builders in the system - - :doc:`sethintedhandoffthrottlekb` - Set hinted handoff throttle in kb per second, per delivery thread. - - :doc:`setinterdcstreamthroughput` - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling - - :doc:`setlogginglevel` - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters. - - :doc:`setmaxhintwindow` - Set the specified max hint window in ms - - :doc:`setstreamthroughput` - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling - - :doc:`settimeout` - Set the specified timeout in ms, or 0 to disable timeout - - :doc:`settraceprobability` - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default - - :doc:`sjk` - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk --help' for more information. - - :doc:`snapshot` - Take a snapshot of specified keyspaces or a snapshot of the specified table - - :doc:`status` - Print cluster information (state, load, IDs, ...) - - :doc:`statusautocompaction` - - :doc:`status` - of autocompaction of the given keyspace and table - - :doc:`statusbackup` - Status of incremental backup - - :doc:`statusbinary` - Status of native transport (binary protocol) - - :doc:`statusgossip` - Status of gossip - - :doc:`statushandoff` - Status of storing future hints on the current node - - :doc:`stop` - Stop compaction - - :doc:`stopdaemon` - Stop cassandra daemon - - :doc:`tablehistograms` - Print statistic histograms for a given table - - :doc:`tablestats` - Print statistics on tables - - :doc:`toppartitions` - Sample and print the most active partitions - - :doc:`tpstats` - Print usage statistics of thread pools - - :doc:`truncatehints` - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified. - - :doc:`upgradesstables` - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version) - - :doc:`verify` - Verify (check data checksum for) one or more tables - - :doc:`version` - Print cassandra version - - :doc:`viewbuildstatus` - Show progress of a materialized view build - -See 'nodetool help ' for more information on a specific command. - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/pausehandoff.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/pausehandoff.rst.txt deleted file mode 100644 index 85ea996f9..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/pausehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_pausehandoff: - -pausehandoff ------------- - -Usage ---------- - -.. include:: pausehandoff.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/profileload.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/profileload.rst.txt deleted file mode 100644 index aff289f9f..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/profileload.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_profileload: - -profileload ------------ - -Usage ---------- - -.. include:: profileload.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/proxyhistograms.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/proxyhistograms.rst.txt deleted file mode 100644 index c4f333fb7..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/proxyhistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_proxyhistograms: - -proxyhistograms ---------------- - -Usage ---------- - -.. include:: proxyhistograms.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/rangekeysample.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/rangekeysample.rst.txt deleted file mode 100644 index 983ce93d0..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/rangekeysample.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rangekeysample: - -rangekeysample --------------- - -Usage ---------- - -.. include:: rangekeysample.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild.rst.txt deleted file mode 100644 index 7a94ce4ed..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild: - -rebuild -------- - -Usage ---------- - -.. include:: rebuild.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild_index.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild_index.rst.txt deleted file mode 100644 index a1ea4f5a2..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/rebuild_index.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_rebuild_index: - -rebuild_index -------------- - -Usage ---------- - -.. include:: rebuild_index.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/refresh.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/refresh.rst.txt deleted file mode 100644 index f68f040cd..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/refresh.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refresh: - -refresh -------- - -Usage ---------- - -.. include:: refresh.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/refreshsizeestimates.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/refreshsizeestimates.rst.txt deleted file mode 100644 index 2f3610afe..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/refreshsizeestimates.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_refreshsizeestimates: - -refreshsizeestimates --------------------- - -Usage ---------- - -.. include:: refreshsizeestimates.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadlocalschema.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/reloadlocalschema.rst.txt deleted file mode 100644 index 7ccc0c5e3..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadlocalschema: - -reloadlocalschema ------------------ - -Usage ---------- - -.. include:: reloadlocalschema.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadseeds.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/reloadseeds.rst.txt deleted file mode 100644 index 5c6751d77..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadseeds.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadseeds: - -reloadseeds ------------ - -Usage ---------- - -.. include:: reloadseeds.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadssl.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/reloadssl.rst.txt deleted file mode 100644 index 9781b295b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadssl.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadssl: - -reloadssl ---------- - -Usage ---------- - -.. include:: reloadssl.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadtriggers.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/reloadtriggers.rst.txt deleted file mode 100644 index 2f7959d8c..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/reloadtriggers.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_reloadtriggers: - -reloadtriggers --------------- - -Usage ---------- - -.. include:: reloadtriggers.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/relocatesstables.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/relocatesstables.rst.txt deleted file mode 100644 index 9951d3398..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/relocatesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_relocatesstables: - -relocatesstables ----------------- - -Usage ---------- - -.. include:: relocatesstables.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/removenode.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/removenode.rst.txt deleted file mode 100644 index fe0a041d1..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/removenode.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_removenode: - -removenode ----------- - -Usage ---------- - -.. include:: removenode.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/repair.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/repair.rst.txt deleted file mode 100644 index b43baba71..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/repair.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair: - -repair ------- - -Usage ---------- - -.. include:: repair.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/repair_admin.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/repair_admin.rst.txt deleted file mode 100644 index 1212c399d..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/repair_admin.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_repair_admin: - -repair_admin ------------- - -Usage ---------- - -.. include:: repair_admin.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/replaybatchlog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/replaybatchlog.rst.txt deleted file mode 100644 index 073f091db..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/replaybatchlog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_replaybatchlog: - -replaybatchlog --------------- - -Usage ---------- - -.. include:: replaybatchlog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/resetfullquerylog.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/resetfullquerylog.rst.txt deleted file mode 100644 index a7661ec98..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/resetfullquerylog.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetfullquerylog: - -resetfullquerylog ------------------ - -Usage ---------- - -.. include:: resetfullquerylog.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/resetlocalschema.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/resetlocalschema.rst.txt deleted file mode 100644 index cd1b75d33..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/resetlocalschema.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resetlocalschema: - -resetlocalschema ----------------- - -Usage ---------- - -.. include:: resetlocalschema.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/resumehandoff.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/resumehandoff.rst.txt deleted file mode 100644 index 48a0451a3..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/resumehandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_resumehandoff: - -resumehandoff -------------- - -Usage ---------- - -.. include:: resumehandoff.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/ring.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/ring.rst.txt deleted file mode 100644 index 7b3c195bd..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/ring.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_ring: - -ring ----- - -Usage ---------- - -.. include:: ring.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/scrub.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/scrub.rst.txt deleted file mode 100644 index fc926eb14..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/scrub.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_scrub: - -scrub ------ - -Usage ---------- - -.. include:: scrub.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt deleted file mode 100644 index 2ae628a35..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setbatchlogreplaythrottle.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setbatchlogreplaythrottle: - -setbatchlogreplaythrottle -------------------------- - -Usage ---------- - -.. include:: setbatchlogreplaythrottle.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setcachecapacity.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setcachecapacity.rst.txt deleted file mode 100644 index 92c7d6389..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setcachecapacity.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachecapacity: - -setcachecapacity ----------------- - -Usage ---------- - -.. include:: setcachecapacity.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setcachekeystosave.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setcachekeystosave.rst.txt deleted file mode 100644 index 639179f99..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setcachekeystosave.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcachekeystosave: - -setcachekeystosave ------------------- - -Usage ---------- - -.. include:: setcachekeystosave.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthreshold.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthreshold.rst.txt deleted file mode 100644 index 3a3e88b08..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthreshold.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthreshold: - -setcompactionthreshold ----------------------- - -Usage ---------- - -.. include:: setcompactionthreshold.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthroughput.rst.txt deleted file mode 100644 index 27185da30..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setcompactionthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setcompactionthroughput: - -setcompactionthroughput ------------------------ - -Usage ---------- - -.. include:: setcompactionthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrency.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrency.rst.txt deleted file mode 100644 index 75b09531f..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrency.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrency: - -setconcurrency --------------- - -Usage ---------- - -.. include:: setconcurrency.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt deleted file mode 100644 index 75969de4e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentcompactors.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentcompactors: - -setconcurrentcompactors ------------------------ - -Usage ---------- - -.. include:: setconcurrentcompactors.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt deleted file mode 100644 index 26f53a171..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setconcurrentviewbuilders.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setconcurrentviewbuilders: - -setconcurrentviewbuilders -------------------------- - -Usage ---------- - -.. include:: setconcurrentviewbuilders.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt deleted file mode 100644 index 9986ca29a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/sethintedhandoffthrottlekb.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sethintedhandoffthrottlekb: - -sethintedhandoffthrottlekb --------------------------- - -Usage ---------- - -.. include:: sethintedhandoffthrottlekb.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt deleted file mode 100644 index ed406a7bc..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setinterdcstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setinterdcstreamthroughput: - -setinterdcstreamthroughput --------------------------- - -Usage ---------- - -.. include:: setinterdcstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setlogginglevel.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setlogginglevel.rst.txt deleted file mode 100644 index eaa4030c3..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setlogginglevel.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setlogginglevel: - -setlogginglevel ---------------- - -Usage ---------- - -.. include:: setlogginglevel.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setmaxhintwindow.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setmaxhintwindow.rst.txt deleted file mode 100644 index 0c62c3289..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setmaxhintwindow.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setmaxhintwindow: - -setmaxhintwindow ----------------- - -Usage ---------- - -.. include:: setmaxhintwindow.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/setstreamthroughput.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/setstreamthroughput.rst.txt deleted file mode 100644 index 76447f112..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/setstreamthroughput.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_setstreamthroughput: - -setstreamthroughput -------------------- - -Usage ---------- - -.. include:: setstreamthroughput.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/settimeout.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/settimeout.rst.txt deleted file mode 100644 index 4ec9a6e4d..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/settimeout.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settimeout: - -settimeout ----------- - -Usage ---------- - -.. include:: settimeout.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/settraceprobability.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/settraceprobability.rst.txt deleted file mode 100644 index a95b48560..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/settraceprobability.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_settraceprobability: - -settraceprobability -------------------- - -Usage ---------- - -.. include:: settraceprobability.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/sjk.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/sjk.rst.txt deleted file mode 100644 index 19bf1d605..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/sjk.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_sjk: - -sjk ---- - -Usage ---------- - -.. include:: sjk.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/snapshot.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/snapshot.rst.txt deleted file mode 100644 index 097a655b2..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/snapshot.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_snapshot: - -snapshot --------- - -Usage ---------- - -.. include:: snapshot.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/status.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/status.rst.txt deleted file mode 100644 index 4d3050ea1..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/status.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_status: - -status ------- - -Usage ---------- - -.. include:: status.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/statusautocompaction.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/statusautocompaction.rst.txt deleted file mode 100644 index 3748e0e4e..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/statusautocompaction.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusautocompaction: - -statusautocompaction --------------------- - -Usage ---------- - -.. include:: statusautocompaction.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/statusbackup.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/statusbackup.rst.txt deleted file mode 100644 index 6546ec07f..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/statusbackup.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbackup: - -statusbackup ------------- - -Usage ---------- - -.. include:: statusbackup.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/statusbinary.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/statusbinary.rst.txt deleted file mode 100644 index 0bb5011c3..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/statusbinary.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusbinary: - -statusbinary ------------- - -Usage ---------- - -.. include:: statusbinary.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/statusgossip.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/statusgossip.rst.txt deleted file mode 100644 index 7dc57eda7..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/statusgossip.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statusgossip: - -statusgossip ------------- - -Usage ---------- - -.. include:: statusgossip.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/statushandoff.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/statushandoff.rst.txt deleted file mode 100644 index aa1c4eb6b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/statushandoff.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_statushandoff: - -statushandoff -------------- - -Usage ---------- - -.. include:: statushandoff.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/stop.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/stop.rst.txt deleted file mode 100644 index 1e44dbe79..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/stop.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stop: - -stop ----- - -Usage ---------- - -.. include:: stop.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/stopdaemon.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/stopdaemon.rst.txt deleted file mode 100644 index 4ae951098..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/stopdaemon.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_stopdaemon: - -stopdaemon ----------- - -Usage ---------- - -.. include:: stopdaemon.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/tablehistograms.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/tablehistograms.rst.txt deleted file mode 100644 index 79d2b4ccb..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/tablehistograms.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablehistograms: - -tablehistograms ---------------- - -Usage ---------- - -.. include:: tablehistograms.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/tablestats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/tablestats.rst.txt deleted file mode 100644 index 5b2c02d98..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/tablestats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tablestats: - -tablestats ----------- - -Usage ---------- - -.. include:: tablestats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/toppartitions.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/toppartitions.rst.txt deleted file mode 100644 index 711816313..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/toppartitions.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_toppartitions: - -toppartitions -------------- - -Usage ---------- - -.. include:: toppartitions.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/tpstats.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/tpstats.rst.txt deleted file mode 100644 index c6b662012..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/tpstats.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_tpstats: - -tpstats -------- - -Usage ---------- - -.. include:: tpstats.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/truncatehints.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/truncatehints.rst.txt deleted file mode 100644 index 4b75391a6..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/truncatehints.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_truncatehints: - -truncatehints -------------- - -Usage ---------- - -.. include:: truncatehints.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/upgradesstables.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/upgradesstables.rst.txt deleted file mode 100644 index 505cc148a..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/upgradesstables.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_upgradesstables: - -upgradesstables ---------------- - -Usage ---------- - -.. include:: upgradesstables.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/verify.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/verify.rst.txt deleted file mode 100644 index dbd152cfb..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/verify.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_verify: - -verify ------- - -Usage ---------- - -.. include:: verify.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/version.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/version.rst.txt deleted file mode 100644 index fca4e3f44..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/version.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_version: - -version -------- - -Usage ---------- - -.. include:: version.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/nodetool/viewbuildstatus.rst.txt b/src/doc/4.0-beta1/_sources/tools/nodetool/viewbuildstatus.rst.txt deleted file mode 100644 index 758fe502b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/nodetool/viewbuildstatus.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _nodetool_viewbuildstatus: - -viewbuildstatus ---------------- - -Usage ---------- - -.. include:: viewbuildstatus.txt - :literal: - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/index.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/index.rst.txt deleted file mode 100644 index b9e483f45..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -SSTable Tools -============= - -This section describes the functionality of the various sstable tools. - -Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped. - -.. toctree:: - :maxdepth: 2 - - sstabledump - sstableexpiredblockers - sstablelevelreset - sstableloader - sstablemetadata - sstableofflinerelevel - sstablerepairedset - sstablescrub - sstablesplit - sstableupgrade - sstableutil - sstableverify - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstabledump.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstabledump.rst.txt deleted file mode 100644 index 8f38afa09..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstabledump.rst.txt +++ /dev/null @@ -1,294 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstabledump ------------ - -Dump contents of a given SSTable to standard output in JSON format. - -You must supply exactly one sstable. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstabledump - -=================================== ================================================================================ --d CQL row per line internal representation --e Enumerate partition keys only --k Partition key --x Excluded partition key(s) --t Print raw timestamps instead of iso8601 date strings --l Output each row as a separate JSON object -=================================== ================================================================================ - -If necessary, use sstableutil first to find out the sstables used by a table. - -Dump entire table -^^^^^^^^^^^^^^^^^ - -Dump the entire table without any options. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26 - - cat eventlog_dump_2018Jul26 - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - ] - -Dump table in a more manageable format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848 - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines - - cat eventlog_dump_2018Jul26_justlines - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], - "position" : 62 - }, - "rows" : [ - { - "type" : "row", - "position" : 123, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - }, - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Dump only keys -^^^^^^^^^^^^^^ - -Dump only the keys by using the -e option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys - - cat eventlog_dump_2018Jul26b - [ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ] - -Dump row for a single key -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a single key using the -k option. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey - - cat eventlog_dump_2018Jul26_singlekey - [ - { - "partition" : { - "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 61, - "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Exclude a key or keys in dump of rows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump a table except for the rows excluded with the -x option. Multiple keys can be used. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e > eventlog_dump_2018Jul26_excludekeys - - cat eventlog_dump_2018Jul26_excludekeys - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 0 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - -Display raw timestamps -^^^^^^^^^^^^^^^^^^^^^^ - -By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times - - cat eventlog_dump_2018Jul26_times - [ - { - "partition" : { - "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ], - "position" : 124 - }, - "rows" : [ - { - "type" : "row", - "position" : 182, - "liveness_info" : { "tstamp" : "1532118147028809" }, - "cells" : [ - { "name" : "event", "value" : "party" }, - { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" }, - { "name" : "source", "value" : "asdf" } - ] - } - ] - } - - -Display internal structure in output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dump the table in a format that reflects the internal structure. - -Example:: - - sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d - - cat eventlog_dump_2018Jul26_d - [3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]: | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711] - [d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]: | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522] - [cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]: | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809] - - - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableexpiredblockers.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableexpiredblockers.rst.txt deleted file mode 100644 index ec837944c..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableexpiredblockers.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableexpiredblockers ----------------------- - -During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable. - -This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-10015 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableexpiredblockers
- -Output blocked sstables -^^^^^^^^^^^^^^^^^^^^^^^ - -If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing. - -Otherwise, the script will return ` blocks <#> expired sstables from getting dropped` followed by a list of the blocked sstables. - -Example:: - - sstableexpiredblockers keyspace1 standard1 - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)], blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)], - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstablelevelreset.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstablelevelreset.rst.txt deleted file mode 100644 index 7069094dd..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstablelevelreset.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablelevelreset ------------------ - -If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration. - -See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5271 - -Usage -^^^^^ - -sstablelevelreset --really-reset
- -The really-reset flag is required, to ensure this intrusive command is not run accidentally. - -Table not found -^^^^^^^^^^^^^^^ - -If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error. - -Example:: - - ColumnFamily not found: keyspace/evenlog. - -Table has no sstables -^^^^^^^^^^^^^^^^^^^^^ - -Example:: - - Found no sstables, did you give the correct keyspace/table? - - -Table already at level 0 -^^^^^^^^^^^^^^^^^^^^^^^^ - -The script will not set the level if it is already set to 0. - -Example:: - - Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0 - -Table levels reduced to 0 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the level is not already 0, then this will reset it to 0. - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 1 - - sstablelevelreset --really-reset keyspace eventlog - Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level - SSTable Level: 0 - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableloader.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableloader.rst.txt deleted file mode 100644 index a9b37342c..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableloader.rst.txt +++ /dev/null @@ -1,273 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableloader ---------------- - -Bulk-load the sstables found in the directory to the configured cluster. The parent directories of are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files. - -Several of the options listed below don't work quite as intended, and in those cases, workarounds are mentioned for specific use cases. - -To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-1278 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableloader - -=================================================== ================================================================================ --d, --nodes Required. Try to connect to these hosts (comma-separated) - initially for ring information --u, --username username for Cassandra authentication --pw, --password password for Cassandra authentication --p, --port port used for native connection (default 9042) --sp, --storage-port port used for internode communication (default 7000) --ssp, --ssl-storage-port port used for TLS internode communication (default 7001) ---no-progress don't display progress --t, --throttle throttle speed in Mbits (default unlimited) --idct, --inter-dc-throttle inter-datacenter throttle speed in Mbits (default unlimited) --cph, --connections-per-host number of concurrent connections-per-host --i, --ignore don't stream to this (comma separated) list of nodes --alg, --ssl-alg Client SSL: algorithm (default: SunX509) --ciphers, --ssl-ciphers Client SSL: comma-separated list of encryption suites to use --ks, --keystore Client SSL: full path to keystore --kspw, --keystore-password Client SSL: password of the keystore --st, --store-type Client SSL: type of store --ts, --truststore Client SSL: full path to truststore --tspw, --truststore-password Client SSL: password of the truststore --prtcl, --ssl-protocol Client SSL: connections protocol to use (default: TLS) --ap, --auth-provider custom AuthProvider class name for cassandra authentication --f, --conf-path cassandra.yaml file path for streaming throughput and client/server SSL --v, --verbose verbose output --h, --help display this help message -=================================================== ================================================================================ - -You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options. - -Load sstables from a Snapshot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Copy the snapshot sstables into an accessible directory and use sstableloader to restore them. - -Example:: - - cp snapshots/1535397029191/* /path/to/keyspace1/standard1/ - - sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4700000 - Total duration (ms): : 4390 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -The -d or --nodes option is required, or the script will not run. - -Example:: - - sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Initial hosts must be specified (-d) - -Use a Config File for SSL Clusters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If SSL encryption is enabled in the cluster, use the --conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line. - -Example:: - - sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 9.165KiB/s (avg: 9.165KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 5.147MiB/s (avg: 18.299KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 9.751MiB/s (avg: 27.423KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 8.203MiB/s (avg: 36.524KiB/s) - ... - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 9356 ms - Average transfer rate : 480.105KiB/s - Peak transfer rate : 586.410KiB/s - -Hide Progress Output -^^^^^^^^^^^^^^^^^^^^ - -To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the --no-progress option. - -Example:: - - sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2] - -Get More Detail -^^^^^^^^^^^^^^^ - -Using the --verbose option will provide much more progress output. - -Example:: - - sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:0/1 1 % total: 1% 12.056KiB/s (avg: 12.056KiB/s) - progress: [/172.17.0.2]0:0/1 2 % total: 2% 9.092MiB/s (avg: 24.081KiB/s) - progress: [/172.17.0.2]0:0/1 4 % total: 4% 18.832MiB/s (avg: 36.099KiB/s) - progress: [/172.17.0.2]0:0/1 5 % total: 5% 2.253MiB/s (avg: 47.882KiB/s) - progress: [/172.17.0.2]0:0/1 7 % total: 7% 6.388MiB/s (avg: 59.743KiB/s) - progress: [/172.17.0.2]0:0/1 8 % total: 8% 14.606MiB/s (avg: 71.635KiB/s) - progress: [/172.17.0.2]0:0/1 9 % total: 9% 8.880MiB/s (avg: 83.465KiB/s) - progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s) - progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s) - progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s) - progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s) - progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s) - progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s) - progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s) - progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s) - progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s) - progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s) - progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s) - progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s) - progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s) - progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s) - progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s) - progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s) - progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s) - progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s) - progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s) - progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s) - progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s) - progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s) - progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s) - progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s) - progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s) - progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s) - progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s) - progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s) - progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s) - progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s) - progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s) - progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s) - progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s) - progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s) - progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s) - progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s) - progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s) - progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s) - progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s) - progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s) - progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s) - progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s) - progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s) - progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s) - progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s) - progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s) - progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s) - progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s) - progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s) - progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s) - progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s) - progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s) - progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s) - progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s) - progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s) - progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s) - progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s) - progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s) - progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s) - progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s) - progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s) - progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s) - progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s) - progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s) - - Summary statistics: - Connections per host : 1 - Total files transferred : 1 - Total bytes transferred : 4.387MiB - Total duration : 6706 ms - Average transfer rate : 669.835KiB/s - Peak transfer rate : 767.802KiB/s - - -Throttling Load -^^^^^^^^^^^^^^^ - -To prevent the table loader from overloading the system resources, you can throttle the process with the --throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below. - -Example:: - - sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 0 MB/s) - Summary statistics: - Connections per host: : 1 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 37634 - Average transfer rate (MB/s): : 0 - Peak transfer rate (MB/s): : 0 - -Speeding up Load -^^^^^^^^^^^^^^^^ - -To speed up the load process, the number of connections per host can be increased. - -Example:: - - sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ - Established connection to initial hosts - Opening sstables and calculating sections to stream - Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2] - progress: [/172.17.0.2]0:1/1 100% total: 100% 0 MB/s(avg: 1 MB/s) - Summary statistics: - Connections per host: : 100 - Total files transferred: : 1 - Total bytes transferred: : 4595705 - Total duration (ms): : 3486 - Average transfer rate (MB/s): : 1 - Peak transfer rate (MB/s): : 1 - -This small data set doesn't benefit much from the increase in connections per host, but note that the total duration has decreased in this example. - - - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstablemetadata.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstablemetadata.rst.txt deleted file mode 100644 index 0a7a42211..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstablemetadata.rst.txt +++ /dev/null @@ -1,300 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablemetadata ---------------- - -Print information about an sstable from the related Statistics.db and Summary.db files to standard output. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstablemetadata - -========================= ================================================================================ ---gc_grace_seconds The gc_grace_seconds to use when calculating droppable tombstones -========================= ================================================================================ - -Print all the metadata -^^^^^^^^^^^^^^^^^^^^^^ - -Run sstablemetadata against the *Data.db file(s) related to a table. If necessary, find the *Data.db file(s) using sstableutil. - -Example:: - - sstableutil keyspace1 standard1 | grep Data - /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big-Data.db - - SSTable: /var/lib/cassandra/data/keyspace1/standard1-f6845640a6cb11e8b6836d2c86545d91/mc-1-big - Partitioner: org.apache.cassandra.dht.Murmur3Partitioner - Bloom Filter FP chance: 0.010000 - Minimum timestamp: 1535025576141000 - Maximum timestamp: 1535025604309000 - SSTable min local deletion time: 2147483647 - SSTable max local deletion time: 2147483647 - Compressor: org.apache.cassandra.io.compress.LZ4Compressor - TTL min: 86400 - TTL max: 86400 - First token: -9223004712949498654 (key=39373333373831303130) - Last token: 9222554117157811897 (key=4f3438394e39374d3730) - Estimated droppable tombstones: 0.9188263888888889 - SSTable Level: 0 - Repaired at: 0 - Replay positions covered: {CommitLogPosition(segmentId=1535025390651, position=226400)=CommitLogPosition(segmentId=1535025390651, position=6849139)} - totalColumnsSet: 100000 - totalRows: 20000 - Estimated tombstone drop times: - 1535039100: 80390 - 1535039160: 5645 - 1535039220: 13965 - Count Row Size Cell Count - 1 0 0 - 2 0 0 - 3 0 0 - 4 0 0 - 5 0 20000 - 6 0 0 - 7 0 0 - 8 0 0 - 10 0 0 - 12 0 0 - 14 0 0 - 17 0 0 - 20 0 0 - 24 0 0 - 29 0 0 - 35 0 0 - 42 0 0 - 50 0 0 - 60 0 0 - 72 0 0 - 86 0 0 - 103 0 0 - 124 0 0 - 149 0 0 - 179 0 0 - 215 0 0 - 258 20000 0 - 310 0 0 - 372 0 0 - 446 0 0 - 535 0 0 - 642 0 0 - 770 0 0 - 924 0 0 - 1109 0 0 - 1331 0 0 - 1597 0 0 - 1916 0 0 - 2299 0 0 - 2759 0 0 - 3311 0 0 - 3973 0 0 - 4768 0 0 - 5722 0 0 - 6866 0 0 - 8239 0 0 - 9887 0 0 - 11864 0 0 - 14237 0 0 - 17084 0 0 - 20501 0 0 - 24601 0 0 - 29521 0 0 - 35425 0 0 - 42510 0 0 - 51012 0 0 - 61214 0 0 - 73457 0 0 - 88148 0 0 - 105778 0 0 - 126934 0 0 - 152321 0 0 - 182785 0 0 - 219342 0 0 - 263210 0 0 - 315852 0 0 - 379022 0 0 - 454826 0 0 - 545791 0 0 - 654949 0 0 - 785939 0 0 - 943127 0 0 - 1131752 0 0 - 1358102 0 0 - 1629722 0 0 - 1955666 0 0 - 2346799 0 0 - 2816159 0 0 - 3379391 0 0 - 4055269 0 0 - 4866323 0 0 - 5839588 0 0 - 7007506 0 0 - 8409007 0 0 - 10090808 0 0 - 12108970 0 0 - 14530764 0 0 - 17436917 0 0 - 20924300 0 0 - 25109160 0 0 - 30130992 0 0 - 36157190 0 0 - 43388628 0 0 - 52066354 0 0 - 62479625 0 0 - 74975550 0 0 - 89970660 0 0 - 107964792 0 0 - 129557750 0 0 - 155469300 0 0 - 186563160 0 0 - 223875792 0 0 - 268650950 0 0 - 322381140 0 0 - 386857368 0 0 - 464228842 0 0 - 557074610 0 0 - 668489532 0 0 - 802187438 0 0 - 962624926 0 0 - 1155149911 0 0 - 1386179893 0 0 - 1663415872 0 0 - 1996099046 0 0 - 2395318855 0 0 - 2874382626 0 - 3449259151 0 - 4139110981 0 - 4966933177 0 - 5960319812 0 - 7152383774 0 - 8582860529 0 - 10299432635 0 - 12359319162 0 - 14831182994 0 - 17797419593 0 - 21356903512 0 - 25628284214 0 - 30753941057 0 - 36904729268 0 - 44285675122 0 - 53142810146 0 - 63771372175 0 - 76525646610 0 - 91830775932 0 - 110196931118 0 - 132236317342 0 - 158683580810 0 - 190420296972 0 - 228504356366 0 - 274205227639 0 - 329046273167 0 - 394855527800 0 - 473826633360 0 - 568591960032 0 - 682310352038 0 - 818772422446 0 - 982526906935 0 - 1179032288322 0 - 1414838745986 0 - Estimated cardinality: 20196 - EncodingStats minTTL: 0 - EncodingStats minLocalDeletionTime: 1442880000 - EncodingStats minTimestamp: 1535025565275000 - KeyType: org.apache.cassandra.db.marshal.BytesType - ClusteringTypes: [org.apache.cassandra.db.marshal.UTF8Type] - StaticColumns: {C3:org.apache.cassandra.db.marshal.BytesType, C4:org.apache.cassandra.db.marshal.BytesType, C0:org.apache.cassandra.db.marshal.BytesType, C1:org.apache.cassandra.db.marshal.BytesType, C2:org.apache.cassandra.db.marshal.BytesType} - RegularColumns: {} - -Specify gc grace seconds -^^^^^^^^^^^^^^^^^^^^^^^^ - -To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn't access the schema directly, this is a way to more accurately estimate droppable tombstones -- for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds). - -ref: https://issues.apache.org/jira/browse/CASSANDRA-12208 - -Example:: - - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4 - Estimated tombstone drop times: - 1536599100: 1 - 1536599640: 1 - 1536599700: 2 - - echo $(date +%s) - 1536602005 - - # if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 4.0E-5 - - # if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 9.61111111111111E-6 - - # if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable - sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones" - Estimated droppable tombstones: 0.0 - -Explanation of each value printed above -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -=================================== ================================================================================ - Value Explanation -=================================== ================================================================================ -SSTable prefix of the sstable filenames related to this sstable -Partitioner partitioner type used to distribute data across nodes; defined in cassandra.yaml -Bloom Filter FP precision of Bloom filter used in reads; defined in the table definition -Minimum timestamp minimum timestamp of any entry in this sstable, in epoch microseconds -Maximum timestamp maximum timestamp of any entry in this sstable, in epoch microseconds -SSTable min local deletion time minimum timestamp of deletion date, based on TTL, in epoch seconds -SSTable max local deletion time maximum timestamp of deletion date, based on TTL, in epoch seconds -Compressor blank (-) by default; if not blank, indicates type of compression enabled on the table -TTL min time-to-live in seconds; default 0 unless defined in the table definition -TTL max time-to-live in seconds; default 0 unless defined in the table definition -First token lowest token and related key found in the sstable summary -Last token highest token and related key found in the sstable summary -Estimated droppable tombstones ratio of tombstones to columns, using configured gc grace seconds if relevant -SSTable level compaction level of this sstable, if leveled compaction (LCS) is used -Repaired at the timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds -Replay positions covered the interval of time and commitlog positions related to this sstable -totalColumnsSet number of cells in the table -totalRows number of rows in the table -Estimated tombstone drop times approximate number of rows that will expire, ordered by epoch seconds -Count Row Size Cell Count two histograms in two columns; one represents distribution of Row Size - and the other represents distribution of Cell Count -Estimated cardinality an estimate of unique values, used for compaction -EncodingStats* minTTL in epoch milliseconds -EncodingStats* minLocalDeletionTime in epoch seconds -EncodingStats* minTimestamp in epoch microseconds -KeyType the type of partition key, useful in reading and writing data - from/to storage; defined in the table definition -ClusteringTypes the type of clustering key, useful in reading and writing data - from/to storage; defined in the table definition -StaticColumns a list of the shared columns in the table -RegularColumns a list of non-static, non-key columns in the table -=================================== ================================================================================ -* For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way. - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableofflinerelevel.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableofflinerelevel.rst.txt deleted file mode 100644 index c031d2987..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableofflinerelevel.rst.txt +++ /dev/null @@ -1,95 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableofflinerelevel ---------------------- - -When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-8301 - -The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):: - - L3 [][][][][][][][][][][] - L2 [ ][ ][ ][ ] - L1 [ ][ ] - L0 [ ] - -Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):: - - [][][] - [ ][][][] - [ ] - [ ] - ... - -Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below. - -If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ - -sstableofflinerelevel [--dry-run]
- -Doing a dry run -^^^^^^^^^^^^^^^ - -Use the --dry-run option to see the current level distribution and predicted level after the change. - -Example:: - - sstableofflinerelevel --dry-run keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - Potential leveling: - L0=1 - L1=1 - -Running a relevel -^^^^^^^^^^^^^^^^^ - -Example:: - - sstableofflinerelevel keyspace eventlog - For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753: - Current leveling: - L0=2 - New leveling: - L0=1 - L1=1 - -Keyspace or table not found -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an invalid keyspace and/or table is provided, an exception will be thrown. - -Example:: - - sstableofflinerelevel --dry-run keyspace evenlog - - Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog - at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96) - - - - - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstablerepairedset.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstablerepairedset.rst.txt deleted file mode 100644 index ebacef335..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstablerepairedset.rst.txt +++ /dev/null @@ -1,79 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablerepairedset ------------------- - -Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired. - -Note that running a repair (e.g., via nodetool repair) doesn't set the status of this metadata. Only setting the status of this metadata via this tool does. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5351 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablerepairedset --really-set [-f | ] - -=================================== ================================================================================ ---really-set required if you want to really set the status ---is-repaired set the repairedAt status to the last modified time ---is-unrepaired set the repairedAt status to 0 --f use a file containing a list of sstables as the input -=================================== ================================================================================ - -Set a lot of sstables to unrepaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many ways to do this programmatically. This way would likely include variables for the keyspace and table. - -Example:: - - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired % - -Set one to many sstables to repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice. - -Example:: - - nodetool repair keyspace1 standard1 - find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired % - -Print metadata showing repaired status -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -sstablemetadata can be used to view the status set or unset using this command. - -Example: - - sstablerepairedset --really-set --is-repaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 1534443974000 - - sstablerepairedset --really-set --is-unrepaired /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db - sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/mc-1-big-Data.db | grep "Repaired at" - Repaired at: 0 - -Using command in a script -^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you know you ran repair 2 weeks ago, you can do something like the following:: - - sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstablescrub.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstablescrub.rst.txt deleted file mode 100644 index 0bbda9f32..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstablescrub.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablescrub ------------- - -Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4321 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablescrub
- -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --m,--manifest-check only check and repair the leveled manifest, without actually scrubbing the sstables --n,--no-validate do not validate columns using column validator --r,--reinsert-overflowed-ttl Rewrites rows with overflowed expiration date affected by CASSANDRA-14092 - with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows. --s,--skip-corrupted skip corrupt rows in counter tables --v,--verbose verbose output -=================================== ================================================================================ - -Basic Scrub -^^^^^^^^^^^ - -The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable. - -Example:: - - sstablescrub keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped - Checking leveled manifest - -Scrub without Validation -^^^^^^^^^^^^^^^^^^^^^^^^ -ref: https://issues.apache.org/jira/browse/CASSANDRA-9406 - -Use the --no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client. - -Example:: - - sstablescrub --no-validate keyspace1 standard1 - Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517 - Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB) - Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned - -Skip Corrupted Counter Tables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5930 - -If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the --skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+. - -Example:: - - sstablescrub --skip-corrupted keyspace1 counter1 - -Dealing with Overflow Dates -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -ref: https://issues.apache.org/jira/browse/CASSANDRA-14092 - -Using the option --reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow). - -Example:: - - sstablescrub --reinsert-overflowed-ttl keyspace1 counter1 - -Manifest Check -^^^^^^^^^^^^^^ - -As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata. - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstablesplit.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstablesplit.rst.txt deleted file mode 100644 index 5386fa48b..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstablesplit.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstablesplit ------------- - -Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-4766 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstablesplit - -=================================== ================================================================================ ---debug display stack traces --h, --help display this help message ---no-snapshot don't snapshot the sstables before splitting --s, --size maximum size in MB for the output sstables (default: 50) -=================================== ================================================================================ - -This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped. - -Split a File -^^^^^^^^^^^^ - -Split a large sstable into smaller sstables. By default, unless the option --no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - - Pre-split sstables snapshotted into snapshot pre-split-1533144514795 - -Split Multiple Files -^^^^^^^^^^^^^^^^^^^^ - -Wildcards can be used in the filename portion of the command to split multiple files. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1* - -Attempt to Split a Small File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the file is already smaller than the split size provided, the sstable will not be split. - -Example:: - - sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db - Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB) - No sstables needed splitting. - -Split a File into Specified Size -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default size used for splitting is 50MB. Specify another size with the --size option. The size is in megabytes (MB). Specify only the number, not the units. For example --size 50 is correct, but --size 50MB is not. - -Example:: - - sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db - Pre-split sstables snapshotted into snapshot pre-split-1533144996008 - - -Split Without Snapshot -^^^^^^^^^^^^^^^^^^^^^^ - -By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the --no-snapshot option to skip it. - -Example:: - - sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db - -Note: There is no output, but you can see the results in your file system. - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableupgrade.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableupgrade.rst.txt deleted file mode 100644 index 66386aca1..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableupgrade.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableupgrade --------------- - -Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version. - -The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables. - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableupgrade
[snapshot_name] - -=================================== ================================================================================ ---debug display stack traces --h,--help display this help message --k,--keep-source do not delete the source sstables -=================================== ================================================================================ - -Rewrite tables to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start with a set of sstables in one version of Cassandra:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables:: - - sstableupgrade keyspace1 standard1 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 13:48 backups - -rw-r--r-- 1 user wheel 292 Aug 22 13:48 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4599475 Aug 22 13:48 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:48 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 13:48 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330807 Aug 22 13:48 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 13:48 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 13:48 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 13:48 mc-2-big-TOC.txt - -Rewrite tables to the current Cassandra version, and keep tables in old version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Again, starting with a set of sstables in one version:: - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - -rw-r--r-- 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r-- 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r-- 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r-- 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r-- 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r-- 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r-- 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:: - - sstableupgrade keyspace1 standard1 -k - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete. - - ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/ - ... - drwxr-xr-x 2 user wheel 64 Aug 22 14:00 backups - -rw-r--r--@ 1 user wheel 348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db - -rw-r--r--@ 1 user wheel 5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db - -rw-r--r--@ 1 user wheel 10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1 - -rw-r--r--@ 1 user wheel 25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db - -rw-r--r--@ 1 user wheel 480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db - -rw-r--r--@ 1 user wheel 9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db - -rw-r--r--@ 1 user wheel 3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db - -rw-r--r--@ 1 user wheel 79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt - -rw-r--r-- 1 user wheel 292 Aug 22 14:01 mc-2-big-CRC.db - -rw-r--r-- 1 user wheel 4596370 Aug 22 14:01 mc-2-big-Data.db - -rw-r--r-- 1 user wheel 10 Aug 22 14:01 mc-2-big-Digest.crc32 - -rw-r--r-- 1 user wheel 25256 Aug 22 14:01 mc-2-big-Filter.db - -rw-r--r-- 1 user wheel 330801 Aug 22 14:01 mc-2-big-Index.db - -rw-r--r-- 1 user wheel 10312 Aug 22 14:01 mc-2-big-Statistics.db - -rw-r--r-- 1 user wheel 3506 Aug 22 14:01 mc-2-big-Summary.db - -rw-r--r-- 1 user wheel 80 Aug 22 14:01 mc-2-big-TOC.txt - - -Rewrite a snapshot to the current Cassandra version -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Find the snapshot name:: - - nodetool listsnapshots - - Snapshot Details: - Snapshot name Keyspace name Column family name True size Size on disk - ... - 1534962986979 keyspace1 standard1 5.85 MB 5.85 MB - -Then rewrite the snapshot:: - - sstableupgrade keyspace1 standard1 1534962986979 - Found 1 sstables that need upgrading. - Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') - Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete. - - - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableutil.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableutil.rst.txt deleted file mode 100644 index 30becd0e0..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableutil.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableutil ------------ - -List sstable files for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-7066 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableutil
- -=================================== ================================================================================ --c, --cleanup clean up any outstanding transactions --d, --debug display stack traces --h, --help display this help message --o, --oplog include operation logs --t, --type all (list all files, final or temporary), tmp (list temporary files only), - final (list final files only), --v, --verbose verbose output -=================================== ================================================================================ - -List all sstables -^^^^^^^^^^^^^^^^^ - -The basic command lists the sstables associated with a given keyspace/table. - -Example:: - - sstableutil keyspace eventlog - Listing files... - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32 - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db - /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt - -List only temporary sstables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `tmp` will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra. - -List only final sstables -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -t option followed by `final` will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option. - -Include transaction logs -^^^^^^^^^^^^^^^^^^^^^^^^ - -Using the -o option will include transaction logs in the listing, in the format above. - -Clean up sstables -^^^^^^^^^^^^^^^^^ - -Using the -c option removes any transactions left over from incomplete writes or compactions. - -From the 3.0 upgrade notes: - -New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix "add:" or "remove:". They also contain a special line "commit", only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the "add" prefix) and delete the old sstables (those with the "remove" prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first. - - - diff --git a/src/doc/4.0-beta1/_sources/tools/sstable/sstableverify.rst.txt b/src/doc/4.0-beta1/_sources/tools/sstable/sstableverify.rst.txt deleted file mode 100644 index dad3f4487..000000000 --- a/src/doc/4.0-beta1/_sources/tools/sstable/sstableverify.rst.txt +++ /dev/null @@ -1,91 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -sstableverify -------------- - -Check sstable(s) for errors or corruption, for the provided table. - -ref: https://issues.apache.org/jira/browse/CASSANDRA-5791 - -Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped. - -Usage -^^^^^ -sstableverify
- -=================================== ================================================================================ ---debug display stack traces --e, --extended extended verification --h, --help display this help message --v, --verbose verbose output -=================================== ================================================================================ - -Basic Verification -^^^^^^^^^^^^^^^^^^ - -This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - -Extended Verification -^^^^^^^^^^^^^^^^^^^^^ - -During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time. - -Example:: - - root@DC1C1:/# sstableverify -e keyspace eventlog - WARN 14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') - Extended Verify requested, proceeding to inspect values - Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully - -Corrupted File -^^^^^^^^^^^^^^ - -Corrupted files are listed if they are detected by the script. - -Example:: - - sstableverify keyspace eventlog - Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB) - Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') - Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db - -A similar (but less verbose) tool will show the suggested actions:: - - nodetool verify keyspace eventlog - error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair - - - diff --git a/src/doc/4.0-beta1/_sources/troubleshooting/finding_nodes.rst.txt b/src/doc/4.0-beta1/_sources/troubleshooting/finding_nodes.rst.txt deleted file mode 100644 index df5e16c93..000000000 --- a/src/doc/4.0-beta1/_sources/troubleshooting/finding_nodes.rst.txt +++ /dev/null @@ -1,149 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Find The Misbehaving Nodes -========================== - -The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware). - -There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below. - -Client Logs and Errors ----------------------- -Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter's nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with. - -Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax :ref:`drivers `: - -* ``SyntaxError`` (**client**). This and other ``QueryValidationException`` - indicate that the client sent a malformed request. These are rarely server - issues and usually indicate bad queries. -* ``UnavailableException`` (**server**): This means that the Cassandra - coordinator node has rejected the query as it believes that insufficent - replica nodes are available. If many coordinators are throwing this error it - likely means that there really are (typically) multiple nodes down in the - cluster and you can identify them using :ref:`nodetool status - ` If only a single coordinator is throwing this error it may - mean that node has been partitioned from the rest. -* ``OperationTimedOutException`` (**server**): This is the most frequent - timeout message raised when clients set timeouts and means that the query - took longer than the supplied timeout. This is a *client side* timeout - meaning that it took longer than the client specified timeout. The error - message will include the coordinator node that was last tried which is - usually a good starting point. This error usually indicates either - aggressive client timeout values or latent server coordinators/replicas. -* ``ReadTimeoutException`` or ``WriteTimeoutException`` (**server**): These - are raised when clients do not specify lower timeouts and there is a - *coordinator* timeouts based on the values supplied in the ``cassandra.yaml`` - configuration file. They usually indicate a serious server side problem as - the default values are usually multiple seconds. - -Metrics -------- - -If you have Cassandra :ref:`metrics ` reporting to a -centralized location such as `Graphite `_ or -`Grafana `_ you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are: - -Errors -^^^^^^ -Cassandra refers to internode messaging errors as "drops", and provided a -number of :ref:`Dropped Message Metrics ` to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue. - -Latency -^^^^^^^ -For timeouts or latency related issues you can start with :ref:`Table -Metrics ` by comparing Coordinator level metrics e.g. -``CoordinatorReadLatency`` or ``CoordinatorWriteLatency`` with their associated -replica metrics e.g. ``ReadLatency`` or ``WriteLatency``. Issues usually show -up on the ``99th`` percentile before they show up on the ``50th`` percentile or -the ``mean``. While ``maximum`` coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, ``maximum`` replica latencies that correlate with increased ``99th`` -percentiles on coordinators can help narrow down the problem. - -There are usually three main possibilities: - -1. Coordinator latencies are high on all nodes, but only a few node's local - read latencies are high. This points to slow replica nodes and the - coordinator's are just side-effects. This usually happens when clients are - not token aware. -2. Coordinator latencies and replica latencies increase at the - same time on the a few nodes. If clients are token aware this is almost - always what happens and points to slow replicas of a subset of token - ranges (only part of the ring). -3. Coordinator and local latencies are high on many nodes. This usually - indicates either a tipping point in the cluster capacity (too many writes or - reads per second), or a new query pattern. - -It's important to remember that depending on the client's load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use ``TokenAware`` policies the same -node's coordinator and replica latencies will often increase together, but if -you just use normal ``DCAwareRoundRobin`` coordinator latencies can increase -with unrelated replica node's latencies. For example: - -* ``TokenAware`` + ``LOCAL_ONE``: should always have coordinator and replica - latencies on the same node rise together -* ``TokenAware`` + ``LOCAL_QUORUM``: should always have coordinator and - multiple replica latencies rise together in the same datacenter. -* ``TokenAware`` + ``QUORUM``: replica latencies in other datacenters can - affect coordinator latencies. -* ``DCAwareRoundRobin`` + ``LOCAL_ONE``: coordinator latencies and unrelated - replica node's latencies will rise together. -* ``DCAwareRoundRobin`` + ``LOCAL_QUORUM``: different coordinator and replica - latencies will rise together with little correlation. - -Query Rates -^^^^^^^^^^^ -Sometimes the :ref:`Table ` query rate metrics can help -narrow down load issues as "small" increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with ``BATCH`` writes, where a client may send a single ``BATCH`` -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator ``BATCH`` write turns into 450 -replica writes! This is why keeping ``BATCH``'s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a "single" -query. - - -Next Step: Investigate the Node(s) ----------------------------------- - -Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -:ref:`logs `, :ref:`nodetool `, and -:ref:`os tools `. If you are not able to login you may still -have access to :ref:`logs ` and :ref:`nodetool ` -remotely. diff --git a/src/doc/4.0-beta1/_sources/troubleshooting/index.rst.txt b/src/doc/4.0-beta1/_sources/troubleshooting/index.rst.txt deleted file mode 100644 index 79b46d636..000000000 --- a/src/doc/4.0-beta1/_sources/troubleshooting/index.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -Troubleshooting -=============== - -As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you. - -These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don't -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use. - -.. toctree:: - :maxdepth: 2 - - finding_nodes - reading_logs - use_nodetool - use_tools diff --git a/src/doc/4.0-beta1/_sources/troubleshooting/reading_logs.rst.txt b/src/doc/4.0-beta1/_sources/troubleshooting/reading_logs.rst.txt deleted file mode 100644 index 08f7d4da6..000000000 --- a/src/doc/4.0-beta1/_sources/troubleshooting/reading_logs.rst.txt +++ /dev/null @@ -1,267 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _reading-logs: - -Cassandra Logs -============== -Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs. - -Common Log Files ----------------- -Cassandra has three main logs, the ``system.log``, ``debug.log`` and -``gc.log`` which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively. - -These logs by default live in ``${CASSANDRA_HOME}/logs``, but most Linux -distributions relocate logs to ``/var/log/cassandra``. Operators can tune -this location as well as what levels are logged using the provided -``logback.xml`` file. - -``system.log`` -^^^^^^^^^^^^^^ -This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log: - -* Uncaught exceptions. These can be very useful for debugging errors. -* ``GCInspector`` messages indicating long garbage collector pauses. When long - pauses happen Cassandra will print how long and also what was the state of - the system (thread state) at the time of that pause. This can help narrow - down a capacity issue (either not enough heap or not enough spare CPU). -* Information about nodes joining and leaving the cluster as well as token - metadata (data ownersip) changes. This is useful for debugging network - partitions, data movements, and more. -* Keyspace/Table creation, modification, deletion. -* ``StartupChecks`` that ensure optimal configuration of the operating system - to run Cassandra -* Information about some background operational tasks (e.g. Index - Redistribution). - -As with any application, looking for ``ERROR`` or ``WARN`` lines can be a -great first step:: - - $ # Search for warnings or errors in the latest system.log - $ grep 'WARN\|ERROR' system.log | tail - ... - - $ # Search for warnings or errors in all rotated system.log - $ zgrep 'WARN\|ERROR' system.log.* | less - ... - -``debug.log`` -^^^^^^^^^^^^^^ -This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal ``system.log``. Some -examples of activities logged to this log: - -* Information about compactions, including when they start, which sstables - they contain, and when they finish. -* Information about memtable flushes to disk, including when they happened, - how large the flushes were, and which commitlog segments the flush impacted. - -This log can be *very* noisy, so it is highly recommended to use ``grep`` and -other log analysis tools to dive deep. For example:: - - $ # Search for messages involving a CompactionTask with 5 lines of context - $ grep CompactionTask debug.log -C 5 - ... - - $ # Look at the distribution of flush tasks per keyspace - $ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c - 6 compaction_history: - 1 test_keyspace: - 2 local: - 17 size_estimates: - 17 sstable_activity: - - -``gc.log`` -^^^^^^^^^^^^^^ -The gc log is a standard Java GC log. With the default ``jvm.options`` -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:: - - $ grep stopped gc.log.0.current | tail - 2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds - 2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds - 2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds - 2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds - 2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds - 2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds - 2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds - 2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds - 2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds - 2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds - - -This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current | sort -k 1 - 2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds - 2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds - 2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds - 2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds - 2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds - 2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds - 2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds - 2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds - 2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds - 2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds - -In this case any client waiting on a query would have experienced a `56ms` -latency at 17:13:41. - -Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn't know could have disk latency, so the JVM safepoint logic -doesn't handle a blocking memory mapped read particularly well). - -Using these logs you can even get a pause distribution with something like -`histogram.py `_:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py - # NumSamples = 410293; Min = 0.00; Max = 11.49 - # Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498 - # each ∎ represents a count of 5470 - 0.0001 - 1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ - 1.1496 - 2.2991 [ 15]: - 2.2991 - 3.4486 [ 5]: - 3.4486 - 4.5981 [ 1]: - 4.5981 - 5.7475 [ 5]: - 5.7475 - 6.8970 [ 9]: - 6.8970 - 8.0465 [ 1]: - 8.0465 - 9.1960 [ 0]: - 9.1960 - 10.3455 [ 0]: - 10.3455 - 11.4949 [ 2]: - -We can see in this case while we have very good average performance something -is causing multi second JVM pauses ... In this case it was mostly safepoint -pauses caused by slow disks:: - - $ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X gc.log.0.current| sort -k 1 - 2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds - 2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds - 2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds - 2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds - 2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds - 2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds - 2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds - 2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds - 2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds - 2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds - -Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as `GCViewer -`_ which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -`200ms` and GC throughput greater than `99%` (ymmv). - -Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues. - - -Getting More Information ------------------------- - -If the default logging levels are insuficient, ``nodetool`` can set higher -or lower logging levels for various packages and classes using the -``nodetool setlogginglevel`` command. Start by viewing the current levels:: - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - -Perhaps the ``Gossiper`` is acting up and we wish to enable it at ``TRACE`` -level for even more insight:: - - - $ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE - - $ nodetool getlogginglevels - - Logger Name Log Level - ROOT INFO - org.apache.cassandra DEBUG - org.apache.cassandra.gms.Gossiper TRACE - - $ grep TRACE debug.log | tail -2 - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating - heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ... - TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local - heartbeat version 2341 greater than 2340 for 127.0.0.1:7000 - - -Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -``logback.xml``. - -.. code-block:: diff - - diff --git a/conf/logback.xml b/conf/logback.xml - index b2c5b10..71b0a49 100644 - --- a/conf/logback.xml - +++ b/conf/logback.xml - @@ -98,4 +98,5 @@ appender reference in the root level section below. - - - - + - - -Full Query Logger -^^^^^^^^^^^^^^^^^ - -Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -``nodetool`` and the logs are read with the provided ``bin/fqltool`` utility:: - - $ mkdir /var/tmp/fql_logs - $ nodetool enablefullquerylog --path /var/tmp/fql_logs - - # ... do some querying - - $ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail - Query time: 1530750927224 - Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name = - 'system_views' AND table_name = 'sstable_tasks'; - Values: - - Type: single - Protocol version: 4 - Query time: 1530750934072 - Query: select * from keyspace1.standard1 ; - Values: - - $ nodetool disablefullquerylog - -Note that if you want more information than this tool provides, there are other -live capture options available such as :ref:`packet capture `. diff --git a/src/doc/4.0-beta1/_sources/troubleshooting/use_nodetool.rst.txt b/src/doc/4.0-beta1/_sources/troubleshooting/use_nodetool.rst.txt deleted file mode 100644 index 5072f85d1..000000000 --- a/src/doc/4.0-beta1/_sources/troubleshooting/use_nodetool.rst.txt +++ /dev/null @@ -1,245 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-nodetool: - -Use Nodetool -============ - -Cassandra's ``nodetool`` allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see ``nodetool help`` -for all the commands), but briefly some of the most useful for troubleshooting: - -.. _nodetool-status: - -Cluster Status --------------- - -You can use ``nodetool status`` to assess status of the cluster:: - - $ nodetool status - - Datacenter: dc1 - ======================= - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - UN 127.0.1.1 4.69 GiB 1 100.0% 35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e r1 - UN 127.0.1.2 4.71 GiB 1 100.0% 752e278f-b7c5-4f58-974b-9328455af73f r2 - UN 127.0.1.3 4.69 GiB 1 100.0% 9dc1a293-2cc0-40fa-a6fd-9e6054da04a7 r3 - -In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all "up". The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -``nodetool status`` on multiple nodes in a cluster to see the full view. - -You can use ``nodetool status`` plus a little grep to see which nodes are -down:: - - $ nodetool status | grep -v '^UN' - Datacenter: dc1 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - Datacenter: dc2 - =============== - Status=Up/Down - |/ State=Normal/Leaving/Joining/Moving - -- Address Load Tokens Owns (effective) Host ID Rack - DN 127.0.0.5 105.73 KiB 1 33.3% df303ac7-61de-46e9-ac79-6e630115fd75 r1 - -In this case there are two datacenters and there is one node down in datacenter -``dc2`` and rack ``r1``. This may indicate an issue on ``127.0.0.5`` -warranting investigation. - -.. _nodetool-proxyhistograms: - -Coordinator Query Latency -------------------------- -You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using ``nodetool proxyhistograms``:: - - $ nodetool proxyhistograms - Percentile Read Latency Write Latency Range Latency CAS Read Latency CAS Write Latency View Write Latency - (micros) (micros) (micros) (micros) (micros) (micros) - 50% 454.83 219.34 0.00 0.00 0.00 0.00 - 75% 545.79 263.21 0.00 0.00 0.00 0.00 - 95% 654.95 315.85 0.00 0.00 0.00 0.00 - 98% 785.94 379.02 0.00 0.00 0.00 0.00 - 99% 3379.39 2346.80 0.00 0.00 0.00 0.00 - Min 42.51 105.78 0.00 0.00 0.00 0.00 - Max 25109.16 43388.63 0.00 0.00 0.00 0.00 - -Here you can see the full latency distribution of reads, writes, range requests -(e.g. ``select * from keyspace.table``), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds). - -.. _nodetool-tablehistograms: - -Local Query Latency -------------------- - -If you know which table is having latency/error issues, you can use -``nodetool tablehistograms`` to get a better idea of what is happening -locally on a node:: - - $ nodetool tablehistograms keyspace table - Percentile SSTables Write Latency Read Latency Partition Size Cell Count - (micros) (micros) (bytes) - 50% 0.00 73.46 182.79 17084 103 - 75% 1.00 88.15 315.85 17084 103 - 95% 2.00 126.93 545.79 17084 103 - 98% 2.00 152.32 654.95 17084 103 - 99% 2.00 182.79 785.94 17084 103 - Min 0.00 42.51 24.60 14238 87 - Max 2.00 12108.97 17436.92 17084 103 - -This shows you percentile breakdowns particularly critical metrics. - -The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. ``SizeTieredCompactionStrategy`` typically has many more reads -per read than ``LeveledCompactionStrategy`` does for update heavy workloads. - -The second column shows you a latency breakdown of *local* write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments. - -The third column shows you a latency breakdown of *local* read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read. - -The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it's read. - -.. _nodetool-tpstats: - -Threadpool State ----------------- - -You can use ``nodetool tpstats`` to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:: - - $ nodetool tpstats - Pool Name Active Pending Completed Blocked All time blocked - ReadStage 2 0 12 0 0 - MiscStage 0 0 0 0 0 - CompactionExecutor 0 0 1940 0 0 - MutationStage 0 0 0 0 0 - GossipStage 0 0 10293 0 0 - Repair-Task 0 0 0 0 0 - RequestResponseStage 0 0 16 0 0 - ReadRepairStage 0 0 0 0 0 - CounterMutationStage 0 0 0 0 0 - MemtablePostFlush 0 0 83 0 0 - ValidationExecutor 0 0 0 0 0 - MemtableFlushWriter 0 0 30 0 0 - ViewMutationStage 0 0 0 0 0 - CacheCleanupExecutor 0 0 0 0 0 - MemtableReclaimMemory 0 0 30 0 0 - PendingRangeCalculator 0 0 11 0 0 - SecondaryIndexManagement 0 0 0 0 0 - HintsDispatcher 0 0 0 0 0 - Native-Transport-Requests 0 0 192 0 0 - MigrationStage 0 0 14 0 0 - PerDiskMemtableFlushWriter_0 0 0 30 0 0 - Sampler 0 0 0 0 0 - ViewBuildExecutor 0 0 0 0 0 - InternalResponseStage 0 0 0 0 0 - AntiEntropyStage 0 0 0 0 0 - - Message type Dropped Latency waiting in queue (micros) - 50% 95% 99% Max - READ 0 N/A N/A N/A N/A - RANGE_SLICE 0 0.00 0.00 0.00 0.00 - _TRACE 0 N/A N/A N/A N/A - HINT 0 N/A N/A N/A N/A - MUTATION 0 N/A N/A N/A N/A - COUNTER_MUTATION 0 N/A N/A N/A N/A - BATCH_STORE 0 N/A N/A N/A N/A - BATCH_REMOVE 0 N/A N/A N/A N/A - REQUEST_RESPONSE 0 0.00 0.00 0.00 0.00 - PAGED_RANGE 0 N/A N/A N/A N/A - READ_REPAIR 0 N/A N/A N/A N/A - -This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the ``RequestResponseState`` queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ``ALL`` ties up RF -``RequestResponseState`` threads whereas ``LOCAL_ONE`` only uses a single -thread in the ``ReadStage`` threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the ``concurrent_compactors`` or ``compaction_throughput`` options. - -The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation. - -.. _nodetool-compactionstats: - -Compaction State ----------------- - -As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS `page cache `_, -and can put a lot of load on your disk drives. There are great -:ref:`os tools ` to determine if this is the case, but often it's a -good idea to check if compactions are even running using -``nodetool compactionstats``:: - - $ nodetool compactionstats - pending tasks: 2 - - keyspace.table: 2 - - id compaction type keyspace table completed total unit progress - 2062b290-7f3a-11e8-9358-cd941b956e60 Compaction keyspace table 21848273 97867583 bytes 22.32% - Active compaction remaining time : 0h00m04s - -In this case there is a single compaction running on the ``keyspace.table`` -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass ``-H`` to get the units in a human readable format. - -Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don't take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra's ``concurrent_compactors`` -or ``compaction_throughput`` options. diff --git a/src/doc/4.0-beta1/_sources/troubleshooting/use_tools.rst.txt b/src/doc/4.0-beta1/_sources/troubleshooting/use_tools.rst.txt deleted file mode 100644 index b1347cc6d..000000000 --- a/src/doc/4.0-beta1/_sources/troubleshooting/use_tools.rst.txt +++ /dev/null @@ -1,542 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at -.. -.. http://www.apache.org/licenses/LICENSE-2.0 -.. -.. Unless required by applicable law or agreed to in writing, software -.. distributed under the License is distributed on an "AS IS" BASIS, -.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -.. See the License for the specific language governing permissions and -.. limitations under the License. - -.. _use-os-tools: - -Diving Deep, Use External Tools -=============================== - -Machine access allows operators to dive even deeper than logs and ``nodetool`` -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes. - -JVM Tooling ------------ -The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks. - -**NOTE**: There are two common gotchas with JVM tooling and Cassandra: - -1. By default Cassandra ships with ``-XX:+PerfDisableSharedMem`` set to prevent - long pauses (see ``CASSANDRA-9242`` and ``CASSANDRA-9483`` for details). If - you want to use JVM tooling you can instead have ``/tmp`` mounted on an in - memory ``tmpfs`` which also effectively works around ``CASSANDRA-9242``. -2. Make sure you run the tools as the same user as Cassandra is running as, - e.g. if the database is running as ``cassandra`` the tool also has to be - run as ``cassandra``, e.g. via ``sudo -u cassandra ``. - -Garbage Collection State (jstat) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you suspect heap pressure you can use ``jstat`` to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):: - - - jstat -gcutil 500ms - S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - 0.00 0.00 81.53 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 82.36 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 83.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 84.19 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.03 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - 0.00 0.00 85.94 31.16 93.07 88.20 12 0.151 3 0.257 0.408 - -In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies. - -Thread Information (jstack) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To get a point in time snapshot of exactly what Cassandra is doing, run -``jstack`` against the Cassandra PID. **Note** that this does pause the JVM for -a very brief period (<20ms).:: - - $ jstack > threaddump - - # display the threaddump - $ cat threaddump - ... - - # look at runnable threads - $grep RUNNABLE threaddump -B 1 - "Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000] - java.lang.Thread.State: RUNNABLE - -- - "Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - "C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000] - java.lang.Thread.State: RUNNABLE - -- - ... - - # Note that the nid is the Linux thread id - -Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on. - -Basic OS Tooling ----------------- -A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of: - -* CPU cores. For executing concurrent user queries -* CPU processing time. For query activity (data decompression, row merging, - etc...) -* CPU processing time (low priority). For background tasks (compaction, - streaming, etc ...) -* RAM for Java Heap. Used to hold internal data-structures and by default the - Cassandra memtables. Heap space is a crucial component of write performance - as well as generally. -* RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS - disk cache is a crucial component of read performance. -* Disks. Cassandra cares a lot about disk read latency, disk write throughput, - and of course disk space. -* Network latency. Cassandra makes many internode requests, so network latency - between nodes can directly impact performance. -* Network throughput. Cassandra (as other databases) frequently have the - so called "incast" problem where a small request (e.g. ``SELECT * from - foo.bar``) returns a massively large result set (e.g. the entire dataset). - In such situations outgoing bandwidth is crucial. - -Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource. - -High Level Resource Usage (top/htop) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Cassandra makes signifiant use of system resources, and often the very first -useful action is to run ``top`` or ``htop`` (`website -`_)to see the state of the machine. - -Useful things to look at: - -* System load levels. While these numbers can be confusing, generally speaking - if the load average is greater than the number of CPU cores, Cassandra - probably won't have very good (sub 100 millisecond) latencies. See - `Linux Load Averages `_ - for more information. -* CPU utilization. ``htop`` in particular can help break down CPU utilization - into ``user`` (low and normal priority), ``system`` (kernel), and ``io-wait`` - . Cassandra query threads execute as normal priority ``user`` threads, while - compaction threads execute as low priority ``user`` threads. High ``system`` - time could indicate problems like thread contention, and high ``io-wait`` - may indicate slow disk drives. This can help you understand what Cassandra - is spending processing resources doing. -* Memory usage. Look for which programs have the most resident memory, it is - probably Cassandra. The number for Cassandra is likely inaccurately high due - to how Linux (as of 2018) accounts for memory mapped file memory. - -.. _os-iostat: - -IO Usage (iostat) -^^^^^^^^^^^^^^^^^ -Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:: - - $ sudo iostat -xdm 2 - Linux 4.13.0-13-generic (hostname) 07/03/2018 _x86_64_ (8 CPU) - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.28 0.32 5.42 0.01 0.13 48.55 0.01 2.21 0.26 2.32 0.64 0.37 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 79.34 0.00 0.20 0.20 0.00 0.16 0.00 - sdc 0.34 0.27 0.76 0.36 0.01 0.02 47.56 0.03 26.90 2.98 77.73 9.21 1.03 - - Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util - sda 0.00 0.00 2.00 32.00 0.01 4.04 244.24 0.54 16.00 0.00 17.00 1.06 3.60 - sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - sdc 0.00 24.50 0.00 114.00 0.00 11.62 208.70 5.56 48.79 0.00 48.79 1.12 12.80 - - -In this case we can see that ``/dev/sdc1`` is a very slow drive, having an -``await`` close to 50 milliseconds and an ``avgqu-sz`` close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user. - -Important metrics to assess using iostat: - -* Reads and writes per second. These numbers will change with the workload, - but generally speaking the more reads Cassandra has to do from disk the - slower Cassandra read latencies are. Large numbers of reads per second - can be a dead giveaway that the cluster has insufficient memory for OS - page caching. -* Write throughput. Cassandra's LSM model defers user writes and batches them - together, which means that throughput to the underlying medium is the most - important write metric for Cassandra. -* Read latency (``r_await``). When Cassandra missed the OS page cache and reads - from SSTables, the read latency directly determines how fast Cassandra can - respond with the data. -* Write latency. Cassandra is less sensitive to write latency except when it - syncs the commit log. This typically enters into the very high percentiles of - write latency. - -Note that to get detailed latency breakdowns you will need a more advanced -tool such as :ref:`bcc-tools `. - -OS page Cache Usage -^^^^^^^^^^^^^^^^^^^ -As Cassandra makes heavy use of memory mapped files, the health of the -operating system's `Page Cache `_ is -crucial to performance. Start by finding how much available cache is in the -system:: - - $ free -g - total used free shared buff/cache available - Mem: 15 9 2 0 3 5 - Swap: 0 0 0 - -In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap. - -If you suspect that you are missing the OS page cache frequently you can use -advanced tools like :ref:`cachestat ` or -:ref:`vmtouch ` to dive deeper. - -Network Latency and Reliability -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Whenever Cassandra does writes or reads that involve other replicas, -``LOCAL_QUORUM`` reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ``ping`` and ``traceroute`` or most -effectively ``mtr``:: - - $ mtr -nr www.google.com - Start: Sun Jul 22 13:10:28 2018 - HOST: hostname Loss% Snt Last Avg Best Wrst StDev - 1.|-- 192.168.1.1 0.0% 10 2.0 1.9 1.1 3.7 0.7 - 2.|-- 96.123.29.15 0.0% 10 11.4 11.0 9.0 16.4 1.9 - 3.|-- 68.86.249.21 0.0% 10 10.6 10.7 9.0 13.7 1.1 - 4.|-- 162.141.78.129 0.0% 10 11.5 10.6 9.6 12.4 0.7 - 5.|-- 162.151.78.253 0.0% 10 10.9 12.1 10.4 20.2 2.8 - 6.|-- 68.86.143.93 0.0% 10 12.4 12.6 9.9 23.1 3.8 - 7.|-- 96.112.146.18 0.0% 10 11.9 12.4 10.6 15.5 1.6 - 9.|-- 209.85.252.250 0.0% 10 13.7 13.2 12.5 13.9 0.0 - 10.|-- 108.170.242.238 0.0% 10 12.7 12.4 11.1 13.0 0.5 - 11.|-- 74.125.253.149 0.0% 10 13.4 13.7 11.8 19.2 2.1 - 12.|-- 216.239.62.40 0.0% 10 13.4 14.7 11.5 26.9 4.6 - 13.|-- 108.170.242.81 0.0% 10 14.4 13.2 10.9 16.0 1.7 - 14.|-- 72.14.239.43 0.0% 10 12.2 16.1 11.0 32.8 7.1 - 15.|-- 216.58.195.68 0.0% 10 25.1 15.3 11.1 25.1 4.8 - -In this example of ``mtr``, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between ``200ms`` and ``3s`` of additional latency, so that -can be a common cause of latency issues. - -Network Throughput -^^^^^^^^^^^^^^^^^^ -As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is `iftop `_ which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ``ccm`` cluster:: - - $ # remove the -t for ncurses instead of pure text - $ sudo iftop -nNtP -i lo - interface: lo - IP address is: 127.0.0.1 - MAC address is: 00:00:00:00:00:00 - Listening on lo - # Host name (port/service if enabled) last 2s last 10s last 40s cumulative - -------------------------------------------------------------------------------------------- - 1 127.0.0.1:58946 => 869Kb 869Kb 869Kb 217KB - 127.0.0.3:9042 <= 0b 0b 0b 0B - 2 127.0.0.1:54654 => 736Kb 736Kb 736Kb 184KB - 127.0.0.1:9042 <= 0b 0b 0b 0B - 3 127.0.0.1:51186 => 669Kb 669Kb 669Kb 167KB - 127.0.0.2:9042 <= 0b 0b 0b 0B - 4 127.0.0.3:9042 => 3.30Kb 3.30Kb 3.30Kb 845B - 127.0.0.1:58946 <= 0b 0b 0b 0B - 5 127.0.0.1:9042 => 2.79Kb 2.79Kb 2.79Kb 715B - 127.0.0.1:54654 <= 0b 0b 0b 0B - 6 127.0.0.2:9042 => 2.54Kb 2.54Kb 2.54Kb 650B - 127.0.0.1:51186 <= 0b 0b 0b 0B - 7 127.0.0.1:36894 => 1.65Kb 1.65Kb 1.65Kb 423B - 127.0.0.5:7000 <= 0b 0b 0b 0B - 8 127.0.0.1:38034 => 1.50Kb 1.50Kb 1.50Kb 385B - 127.0.0.2:7000 <= 0b 0b 0b 0B - 9 127.0.0.1:56324 => 1.50Kb 1.50Kb 1.50Kb 383B - 127.0.0.1:7000 <= 0b 0b 0b 0B - 10 127.0.0.1:53044 => 1.43Kb 1.43Kb 1.43Kb 366B - 127.0.0.4:7000 <= 0b 0b 0b 0B - -------------------------------------------------------------------------------------------- - Total send rate: 2.25Mb 2.25Mb 2.25Mb - Total receive rate: 0b 0b 0b - Total send and receive rate: 2.25Mb 2.25Mb 2.25Mb - -------------------------------------------------------------------------------------------- - Peak rate (sent/received/total): 2.25Mb 0b 2.25Mb - Cumulative (sent/received/total): 576KB 0B 576KB - ============================================================================================ - -In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring. - -Advanced tools --------------- -Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy. - -.. _use-bcc-tools: - -bcc-tools -^^^^^^^^^ -Most modern Linux distributions (kernels newer than ``4.1``) support `bcc-tools -`_ for diving deep into performance problems. -First install ``bcc-tools``, e.g. via ``apt`` on Debian:: - - $ apt install bcc-tools - -Then you can use all the tools that ``bcc-tools`` contains. One of the most -useful tools is ``cachestat`` -(`cachestat examples `_) -which allows you to determine exactly how many OS page cache hits and misses -are happening:: - - $ sudo /usr/share/bcc/tools/cachestat -T 1 - TIME TOTAL MISSES HITS DIRTIES BUFFERS_MB CACHED_MB - 18:44:08 66 66 0 64 88 4427 - 18:44:09 40 40 0 75 88 4427 - 18:44:10 4353 45 4308 203 88 4427 - 18:44:11 84 77 7 13 88 4428 - 18:44:12 2511 14 2497 14 88 4428 - 18:44:13 101 98 3 18 88 4428 - 18:44:14 16741 0 16741 58 88 4428 - 18:44:15 1935 36 1899 18 88 4428 - 18:44:16 89 34 55 18 88 4428 - -In this case there are not too many page cache ``MISSES`` which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node's "hot" dataset. If you don't have enough cache, ``MISSES`` will -be high and performance will be slow. If you have enough cache, ``MISSES`` will -be low and performance will be fast (as almost all reads are being served out -of memory). - -You can also measure disk latency distributions using ``biolatency`` -(`biolatency examples `_) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:: - - $ sudo /usr/share/bcc/tools/biolatency -D 10 - Tracing block device I/O... Hit Ctrl-C to end. - - - disk = 'sda' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 12 |****************************************| - 32 -> 63 : 9 |****************************** | - 64 -> 127 : 1 |*** | - 128 -> 255 : 3 |********** | - 256 -> 511 : 7 |*********************** | - 512 -> 1023 : 2 |****** | - - disk = 'sdc' - usecs : count distribution - 0 -> 1 : 0 | | - 2 -> 3 : 0 | | - 4 -> 7 : 0 | | - 8 -> 15 : 0 | | - 16 -> 31 : 0 | | - 32 -> 63 : 0 | | - 64 -> 127 : 41 |************ | - 128 -> 255 : 17 |***** | - 256 -> 511 : 13 |*** | - 512 -> 1023 : 2 | | - 1024 -> 2047 : 0 | | - 2048 -> 4095 : 0 | | - 4096 -> 8191 : 56 |***************** | - 8192 -> 16383 : 131 |****************************************| - 16384 -> 32767 : 9 |** | - -In this case most ios on the data drive (``sdc``) are fast, but many take -between 8 and 16 milliseconds. - -Finally ``biosnoop`` (`examples `_) -can be used to dive even deeper and see per IO latencies:: - - $ sudo /usr/share/bcc/tools/biosnoop | grep java | head - 0.000000000 java 17427 sdc R 3972458600 4096 13.58 - 0.000818000 java 17427 sdc R 3972459408 4096 0.35 - 0.007098000 java 17416 sdc R 3972401824 4096 5.81 - 0.007896000 java 17416 sdc R 3972489960 4096 0.34 - 0.008920000 java 17416 sdc R 3972489896 4096 0.34 - 0.009487000 java 17427 sdc R 3972401880 4096 0.32 - 0.010238000 java 17416 sdc R 3972488368 4096 0.37 - 0.010596000 java 17427 sdc R 3972488376 4096 0.34 - 0.011236000 java 17410 sdc R 3972488424 4096 0.32 - 0.011825000 java 17427 sdc R 3972488576 16384 0.65 - ... time passes - 8.032687000 java 18279 sdc R 10899712 122880 3.01 - 8.033175000 java 18279 sdc R 10899952 8192 0.46 - 8.073295000 java 18279 sdc R 23384320 122880 3.01 - 8.073768000 java 18279 sdc R 23384560 8192 0.46 - - -With ``biosnoop`` you see every single IO and how long they take. This data -can be used to construct the latency distributions in ``biolatency`` but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (``128kb``) of ``read_ahead_kb``. To improve point read -performance you may may want to decrease ``read_ahead_kb`` on fast data volumes -such as SSDs while keeping the a higher value like ``128kb`` value is probably -right for HDs. There are tradeoffs involved, see `queue-sysfs -`_ docs for more -information, but regardless ``biosnoop`` is useful for understanding *how* -Cassandra uses drives. - -.. _use-vmtouch: - -vmtouch -^^^^^^^ -Sometimes it's useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -`vmtouch `_. - -First install it:: - - $ git clone https://github.com/hoytech/vmtouch.git - $ cd vmtouch - $ make - -Then run it on the Cassandra data directory:: - - $ ./vmtouch /var/lib/cassandra/data/ - Files: 312 - Directories: 92 - Resident Pages: 62503/64308 244M/251M 97.2% - Elapsed: 0.005657 seconds - -In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn't really matter unless reads are missing the -cache (per e.g. :ref:`cachestat `), in which case having -additional memory may help read performance. - -CPU Flamegraphs -^^^^^^^^^^^^^^^ -Cassandra often uses a lot of CPU, but telling *what* it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -`CPU Flamegraphs `_ -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a "compaction problem dropping -tombstones" or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -`Java Flamegraphs -`_. - -Generally: - -1. Enable the ``-XX:+PreserveFramePointer`` option in Cassandra's - ``jvm.options`` configuation file. This has a negligible performance impact - but allows you actually see what Cassandra is doing. -2. Run ``perf`` to get some data. -3. Send that data through the relevant scripts in the FlameGraph toolset and - convert the data into a pretty flamegraph. View the resulting SVG image in - a browser or other image browser. - -For example just cloning straight off github we first install the -``perf-map-agent`` to the location of our JVMs (assumed to be -``/usr/lib/jvm``):: - - $ sudo bash - $ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/ - $ cd /usr/lib/jvm - $ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent - $ cd perf-map-agent - $ cmake . - $ make - -Now to get a flamegraph:: - - $ git clone --depth=1 https://github.com/brendangregg/FlameGraph - $ sudo bash - $ cd FlameGraph - $ # Record traces of Cassandra and map symbols for all java processes - $ perf record -F 49 -a -g -p -- sleep 30; ./jmaps - $ # Translate the data - $ perf script > cassandra_stacks - $ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \ - ./flamegraph.pl --color=java --hash > cassandra_flames.svg - - -The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser. - -.. _packet-capture: - -Packet Capture -^^^^^^^^^^^^^^ -Sometimes you have to understand what queries a Cassandra node is performing -*right now* to troubleshoot an issue. For these times trusty packet capture -tools like ``tcpdump`` and `Wireshark -`_ can be very helpful to dissect packet captures. -Wireshark even has native `CQL support -`_ although it sometimes has -compatibility issues with newer Cassandra protocol releases. - -To get a packet capture first capture some packets:: - - $ sudo tcpdump -U -s0 -i -w cassandra.pcap -n "tcp port 9042" - -Now open it up with wireshark:: - - $ wireshark cassandra.pcap - -If you don't see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> ``Decode as`` -> select CQL from the -dropdown for port 9042. - -If you don't want to do this manually or use a GUI, you can also use something -like `cqltrace `_ to ease obtaining and -parsing CQL packet captures. diff --git a/src/doc/4.0-beta1/_static/ajax-loader.gif b/src/doc/4.0-beta1/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab..000000000 Binary files a/src/doc/4.0-beta1/_static/ajax-loader.gif and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/basic.css b/src/doc/4.0-beta1/_static/basic.css deleted file mode 100644 index 0807176ec..000000000 --- a/src/doc/4.0-beta1/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/src/doc/4.0-beta1/_static/comment-bright.png b/src/doc/4.0-beta1/_static/comment-bright.png deleted file mode 100644 index 15e27edb1..000000000 Binary files a/src/doc/4.0-beta1/_static/comment-bright.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/comment-close.png b/src/doc/4.0-beta1/_static/comment-close.png deleted file mode 100644 index 4d91bcf57..000000000 Binary files a/src/doc/4.0-beta1/_static/comment-close.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/comment.png b/src/doc/4.0-beta1/_static/comment.png deleted file mode 100644 index dfbc0cbd5..000000000 Binary files a/src/doc/4.0-beta1/_static/comment.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/doctools.js b/src/doc/4.0-beta1/_static/doctools.js deleted file mode 100644 index 344db17dd..000000000 --- a/src/doc/4.0-beta1/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/src/doc/4.0-beta1/_static/documentation_options.js b/src/doc/4.0-beta1/_static/documentation_options.js deleted file mode 100644 index d28647eb8..000000000 --- a/src/doc/4.0-beta1/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, -}; \ No newline at end of file diff --git a/src/doc/4.0-beta1/_static/down-pressed.png b/src/doc/4.0-beta1/_static/down-pressed.png deleted file mode 100644 index 5756c8cad..000000000 Binary files a/src/doc/4.0-beta1/_static/down-pressed.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/down.png b/src/doc/4.0-beta1/_static/down.png deleted file mode 100644 index 1b3bdad2c..000000000 Binary files a/src/doc/4.0-beta1/_static/down.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/extra.css b/src/doc/4.0-beta1/_static/extra.css deleted file mode 100644 index 5e40dd7d2..000000000 --- a/src/doc/4.0-beta1/_static/extra.css +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -div:not(.highlight) > pre { - background: #fff; - border: 1px solid #e1e4e5; - color: #404040; - margin: 1px 0 24px 0; - overflow-x: auto; - padding: 12px 12px; - font-size: 12px; -} - -a.reference.internal code.literal { - border: none; - font-size: 12px; - color: #2980B9; - padding: 0; - background: none; -} - -a.reference.internal:visited code.literal { - color: #9B59B6; - padding: 0; - background: none; -} - - -/* override table width restrictions */ -.wy-table-responsive table td, .wy-table-responsive table th { - white-space: normal; -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: visible; -} - -table.contentstable { - margin: 0; -} - -td.rightcolumn { - padding-left: 30px; -} - -div#wipwarning { - font-size: 14px; - border: 1px solid #ecc; - color: #f66; - background: #ffe8e8; - padding: 10px 30px; - margin-bottom: 30px; -} -.content-container{ - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; - width:100%; -} diff --git a/src/doc/4.0-beta1/_static/file.png b/src/doc/4.0-beta1/_static/file.png deleted file mode 100644 index a858a410e..000000000 Binary files a/src/doc/4.0-beta1/_static/file.png and /dev/null differ diff --git a/src/doc/4.0-beta1/_static/jquery-3.2.1.js b/src/doc/4.0-beta1/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca479..000000000 --- a/src/doc/4.0-beta1/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "
", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -' - ---- -
-
- -
-
-
- -
-

Dynamo

-

Apache Cassandra relies on a number of techniques from Amazon’s Dynamo -distributed storage key-value system. Each node in the Dynamo system has three -main components:

-
    -
  • Request coordination over a partitioned dataset
  • -
  • Ring membership and failure detection
  • -
  • A local persistence (storage) engine
  • -
-

Cassandra primarily draws from the first two clustering components, -while using a storage engine based on a Log Structured Merge Tree -(LSM). -In particular, Cassandra relies on Dynamo style:

-
    -
  • Dataset partitioning using consistent hashing
  • -
  • Multi-master replication using versioned data and tunable consistency
  • -
  • Distributed cluster membership and failure detection via a gossip protocol
  • -
  • Incremental scale-out on commodity hardware
  • -
-

Cassandra was designed this way to meet large-scale (PiB+) business-critical -storage requirements. In particular, as applications demanded full global -replication of petabyte scale datasets along with always available low-latency -reads and writes, it became imperative to design a new kind of database model -as the relational database systems of the time struggled to meet the new -requirements of global scale applications.

-
-

Dataset Partitioning: Consistent Hashing

-

Cassandra achieves horizontal scalability by -partitioning -all data stored in the system using a hash function. Each partition is replicated -to multiple physical nodes, often across failure domains such as racks and even -datacenters. As every replica can independently accept mutations to every key -that it owns, every key must be versioned. Unlike in the original Dynamo paper -where deterministic versions and vector clocks were used to reconcile concurrent -updates to a key, Cassandra uses a simpler last write wins model where every -mutation is timestamped (including deletes) and then the latest version of data -is the “winning” value. Formally speaking, Cassandra uses a Last-Write-Wins Element-Set -conflict-free replicated data type for each CQL row (a.k.a LWW-Element-Set CRDT) -to resolve conflicting mutations on replica sets.

-
-
-
-

Consistent Hashing using a Token Ring

-

Cassandra partitions data over storage nodes using a special form of hashing -called consistent hashing. -In naive data hashing, you typically allocate keys to buckets by taking a hash -of the key modulo the number of buckets. For example, if you want to distribute -data to 100 nodes using naive hashing you might assign every node to a bucket -between 0 and 100, hash the input key modulo 100, and store the data on the -associated bucket. In this naive scheme, however, adding a single node might -invalidate almost all of the mappings.

-

Cassandra instead maps every node to one or more tokens on a continuous hash -ring, and defines ownership by hashing a key onto the ring and then “walking” -the ring in one direction, similar to the Chord -algorithm. The main difference of consistent hashing to naive data hashing is -that when the number of nodes (buckets) to hash into changes, consistent -hashing only has to move a small fraction of the keys.

-

For example, if we have an eight node cluster with evenly spaced tokens, and -a replication factor (RF) of 3, then to find the owning nodes for a key we -first hash that key to generate a token (which is just the hash of the key), -and then we “walk” the ring in a clockwise fashion until we encounter three -distinct nodes, at which point we have found all the replicas of that key. -This example of an eight node cluster with RF=3 can be visualized as follows:

-
-Dynamo Ring -
-

You can see that in a Dynamo like system, ranges of keys, also known as token -ranges, map to the same physical set of nodes. In this example, all keys that -fall in the token range excluding token 1 and including token 2 (range(t1, t2]) -are stored on nodes 2, 3 and 4.

-
-
-

Multiple Tokens per Physical Node (a.k.a. vnodes)

-

Simple single token consistent hashing works well if you have many physical -nodes to spread data over, but with evenly spaced tokens and a small number of -physical nodes, incremental scaling (adding just a few nodes of capacity) is -difficult because there are no token selections for new nodes that can leave -the ring balanced. Cassandra seeks to avoid token imbalance because uneven -token ranges lead to uneven request load. For example, in the previous example -there is no way to add a ninth token without causing imbalance; instead we -would have to insert 8 tokens in the midpoints of the existing ranges.

-

The Dynamo paper advocates for the use of “virtual nodes” to solve this -imbalance problem. Virtual nodes solve the problem by assigning multiple -tokens in the token ring to each physical node. By allowing a single physical -node to take multiple positions in the ring, we can make small clusters look -larger and therefore even with a single physical node addition we can make it -look like we added many more nodes, effectively taking many smaller pieces of -data from more ring neighbors when we add even a single node.

-

Cassandra introduces some nomenclature to handle these concepts:

-
    -
  • Token: A single position on the dynamo style hash ring.
  • -
  • Endpoint: A single physical IP and port on the network.
  • -
  • Host ID: A unique identifier for a single “physical” node, usually -present at one Endpoint and containing one or more Tokens.
  • -
  • Virtual Node (or vnode): A Token on the hash ring owned by the same -physical node, one with the same Host ID.
  • -
-

The mapping of Tokens to Endpoints gives rise to the Token Map -where Cassandra keeps track of what ring positions map to which physical -endpoints. For example, in the following figure we can represent an eight node -cluster using only four physical nodes by assigning two tokens to every node:

-
-Virtual Tokens Ring -
-

Multiple tokens per physical node provide the following benefits:

-
    -
  1. When a new node is added it accepts approximately equal amounts of data from -other nodes in the ring, resulting in equal distribution of data across the -cluster.
  2. -
  3. When a node is decommissioned, it loses data roughly equally to other members -of the ring, again keeping equal distribution of data across the cluster.
  4. -
  5. If a node becomes unavailable, query load (especially token aware query load), -is evenly distributed across many other nodes.
  6. -
-

Multiple tokens, however, can also have disadvantages:

-
    -
  1. Every token introduces up to 2 * (RF - 1) additional neighbors on the -token ring, which means that there are more combinations of node failures -where we lose availability for a portion of the token ring. The more tokens -you have, the higher the probability of an outage.
  2. -
  3. Cluster-wide maintenance operations are often slowed. For example, as the -number of tokens per node is increased, the number of discrete repair -operations the cluster must do also increases.
  4. -
  5. Performance of operations that span token ranges could be affected.
  6. -
-

Note that in Cassandra 2.x, the only token allocation algorithm available -was picking random tokens, which meant that to keep balance the default number -of tokens per node had to be quite high, at 256. This had the effect of -coupling many physical endpoints together, increasing the risk of -unavailability. That is why in 3.x + the new deterministic token allocator -was added which intelligently picks tokens such that the ring is optimally -balanced while requiring a much lower number of tokens per physical node.

-
-
-
-

Multi-master Replication: Versioned Data and Tunable Consistency

-

Cassandra replicates every partition of data to many nodes across the cluster -to maintain high availability and durability. When a mutation occurs, the -coordinator hashes the partition key to determine the token range the data -belongs to and then replicates the mutation to the replicas of that data -according to the Replication Strategy.

-

All replication strategies have the notion of a replication factor (RF), -which indicates to Cassandra how many copies of the partition should exist. -For example with a RF=3 keyspace, the data will be written to three -distinct replicas. Replicas are always chosen such that they are distinct -physical nodes which is achieved by skipping virtual nodes if needed. -Replication strategies may also choose to skip nodes present in the same failure -domain such as racks or datacenters so that Cassandra clusters can tolerate -failures of whole racks and even datacenters of nodes.

-
-

Replication Strategy

-

Cassandra supports pluggable replication strategies, which determine which -physical nodes act as replicas for a given token range. Every keyspace of -data has its own replication strategy. All production deployments should use -the NetworkTopologyStrategy while the SimpleStrategy replication -strategy is useful only for testing clusters where you do not yet know the -datacenter layout of the cluster.

-
-

NetworkTopologyStrategy

-

NetworkTopologyStrategy allows a replication factor to be specified for each -datacenter in the cluster. Even if your cluster only uses a single datacenter, -NetworkTopologyStrategy should be preferred over SimpleStrategy to make it -easier to add new physical or virtual datacenters to the cluster later.

-

In addition to allowing the replication factor to be specified individually by -datacenter, NetworkTopologyStrategy also attempts to choose replicas within a -datacenter from different racks as specified by the Snitch. If -the number of racks is greater than or equal to the replication factor for the -datacenter, each replica is guaranteed to be chosen from a different rack. -Otherwise, each rack will hold at least one replica, but some racks may hold -more than one. Note that this rack-aware behavior has some potentially -surprising implications. For example, if -there are not an even number of nodes in each rack, the data load on the -smallest rack may be much higher. Similarly, if a single node is bootstrapped -into a brand new rack, it will be considered a replica for the entire ring. -For this reason, many operators choose to configure all nodes in a single -availability zone or similar failure domain as a single “rack”.

-
-
-

SimpleStrategy

-

SimpleStrategy allows a single integer replication_factor to be defined. This determines the number of nodes that -should contain a copy of each row. For example, if replication_factor is 3, then three different nodes should store -a copy of each row.

-

SimpleStrategy treats all nodes identically, ignoring any configured datacenters or racks. To determine the replicas -for a token range, Cassandra iterates through the tokens in the ring, starting with the token range of interest. For -each token, it checks whether the owning node has been added to the set of replicas, and if it has not, it is added to -the set. This process continues until replication_factor distinct nodes have been added to the set of replicas.

-
-
-

Transient Replication

-

Transient replication is an experimental feature in Cassandra 4.0 not present -in the original Dynamo paper. It allows you to configure a subset of replicas -to only replicate data that hasn’t been incrementally repaired. This allows you -to decouple data redundancy from availability. For instance, if you have a -keyspace replicated at rf 3, and alter it to rf 5 with 2 transient replicas, -you go from being able to tolerate one failed replica to being able to tolerate -two, without corresponding increase in storage usage. This is because 3 nodes -will replicate all the data for a given token range, and the other 2 will only -replicate data that hasn’t been incrementally repaired.

-

To use transient replication, you first need to enable it in -cassandra.yaml. Once enabled, both SimpleStrategy and -NetworkTopologyStrategy can be configured to transiently replicate data. -You configure it by specifying replication factor as -<total_replicas>/<transient_replicas Both SimpleStrategy and -NetworkTopologyStrategy support configuring transient replication.

-

Transiently replicated keyspaces only support tables created with read_repair -set to NONE and monotonic reads are not currently supported. You also -can’t use LWT, logged batches, or counters in 4.0. You will possibly never be -able to use materialized views with transiently replicated keyspaces and -probably never be able to use secondary indices with them.

-

Transient replication is an experimental feature that may not be ready for -production use. The expected audience is experienced users of Cassandra -capable of fully validating a deployment of their particular application. That -means being able check that operations like reads, writes, decommission, -remove, rebuild, repair, and replace all work with your queries, data, -configuration, operational practices, and availability requirements.

-

It is anticipated that 4.next will support monotonic reads with transient -replication as well as LWT, logged batches, and counters.

-
-
-
-

Data Versioning

-

Cassandra uses mutation timestamp versioning to guarantee eventual consistency of -data. Specifically all mutations that enter the system do so with a timestamp -provided either from a client clock or, absent a client provided timestamp, -from the coordinator node’s clock. Updates resolve according to the conflict -resolution rule of last write wins. Cassandra’s correctness does depend on -these clocks, so make sure a proper time synchronization process is running -such as NTP.

-

Cassandra applies separate mutation timestamps to every column of every row -within a CQL partition. Rows are guaranteed to be unique by primary key, and -each column in a row resolve concurrent mutations according to last-write-wins -conflict resolution. This means that updates to different primary keys within a -partition can actually resolve without conflict! Furthermore the CQL collection -types such as maps and sets use this same conflict free mechanism, meaning -that concurrent updates to maps and sets are guaranteed to resolve as well.

-
-

Replica Synchronization

-

As replicas in Cassandra can accept mutations independently, it is possible -for some replicas to have newer data than others. Cassandra has many best-effort -techniques to drive convergence of replicas including -Replica read repair <read-repair> in the read path and -Hinted handoff <hints> in the write path.

-

These techniques are only best-effort, however, and to guarantee eventual -consistency Cassandra implements anti-entropy repair <repair> where replicas -calculate hierarchical hash-trees over their datasets called Merkle Trees that can then be compared across -replicas to identify mismatched data. Like the original Dynamo paper Cassandra -supports “full” repairs where replicas hash their entire dataset, create Merkle -trees, send them to each other and sync any ranges that don’t match.

-

Unlike the original Dynamo paper, Cassandra also implements sub-range repair -and incremental repair. Sub-range repair allows Cassandra to increase the -resolution of the hash trees (potentially down to the single partition level) -by creating a larger number of trees that span only a portion of the data -range. Incremental repair allows Cassandra to only repair the partitions that -have changed since the last repair.

-
-
-
-

Tunable Consistency

-

Cassandra supports a per-operation tradeoff between consistency and -availability through Consistency Levels. Cassandra’s consistency levels -are a version of Dynamo’s R + W > N consistency mechanism where operators -could configure the number of nodes that must participate in reads (R) -and writes (W) to be larger than the replication factor (N). In -Cassandra, you instead choose from a menu of common consistency levels which -allow the operator to pick R and W behavior without knowing the -replication factor. Generally writes will be visible to subsequent reads when -the read consistency level contains enough nodes to guarantee a quorum intersection -with the write consistency level.

-

The following consistency levels are available:

-
-
ONE
-
Only a single replica must respond.
-
TWO
-
Two replicas must respond.
-
THREE
-
Three replicas must respond.
-
QUORUM
-
A majority (n/2 + 1) of the replicas must respond.
-
ALL
-
All of the replicas must respond.
-
LOCAL_QUORUM
-
A majority of the replicas in the local datacenter (whichever datacenter the coordinator is in) must respond.
-
EACH_QUORUM
-
A majority of the replicas in each datacenter must respond.
-
LOCAL_ONE
-
Only a single replica must respond. In a multi-datacenter cluster, this also gaurantees that read requests are not -sent to replicas in a remote datacenter.
-
ANY
-
A single replica may respond, or the coordinator may store a hint. If a hint is stored, the coordinator will later -attempt to replay the hint and deliver the mutation to the replicas. This consistency level is only accepted for -write operations.
-
-

Write operations are always sent to all replicas, regardless of consistency -level. The consistency level simply controls how many responses the coordinator -waits for before responding to the client.

-

For read operations, the coordinator generally only issues read commands to -enough replicas to satisfy the consistency level. The one exception to this is -when speculative retry may issue a redundant read request to an extra replica -if the original replicas have not responded within a specified time window.

-
-

Picking Consistency Levels

-

It is common to pick read and write consistency levels such that the replica -sets overlap, resulting in all acknowledged writes being visible to subsequent -reads. This is typically expressed in the same terms Dynamo does, in that W + -R > RF, where W is the write consistency level, R is the read -consistency level, and RF is the replication factor. For example, if RF -= 3, a QUORUM request will require responses from at least 2/3 -replicas. If QUORUM is used for both writes and reads, at least one of the -replicas is guaranteed to participate in both the write and the read request, -which in turn guarantees that the quorums will overlap and the write will be -visible to the read.

-

In a multi-datacenter environment, LOCAL_QUORUM can be used to provide a -weaker but still useful guarantee: reads are guaranteed to see the latest write -from within the same datacenter. This is often sufficient as clients homed to -a single datacenter will read their own writes.

-

If this type of strong consistency isn’t required, lower consistency levels -like LOCAL_ONE or ONE may be used to improve throughput, latency, and -availability. With replication spanning multiple datacenters, LOCAL_ONE is -typically less available than ONE but is faster as a rule. Indeed ONE -will succeed if a single replica is available in any datacenter.

-
-
-
-
-

Distributed Cluster Membership and Failure Detection

-

The replication protocols and dataset partitioning rely on knowing which nodes -are alive and dead in the cluster so that write and read operations can be -optimally routed. In Cassandra liveness information is shared in a distributed -fashion through a failure detection mechanism based on a gossip protocol.

-
-

Gossip

-

Gossip is how Cassandra propagates basic cluster bootstrapping information such -as endpoint membership and internode network protocol versions. In Cassandra’s -gossip system, nodes exchange state information not only about themselves but -also about other nodes they know about. This information is versioned with a -vector clock of (generation, version) tuples, where the generation is a -monotonic timestamp and version is a logical clock the increments roughly every -second. These logical clocks allow Cassandra gossip to ignore old versions of -cluster state just by inspecting the logical clocks presented with gossip -messages.

-

Every node in the Cassandra cluster runs the gossip task independently and -periodically. Every second, every node in the cluster:

-
    -
  1. Updates the local node’s heartbeat state (the version) and constructs the -node’s local view of the cluster gossip endpoint state.
  2. -
  3. Picks a random other node in the cluster to exchange gossip endpoint state -with.
  4. -
  5. Probabilistically attempts to gossip with any unreachable nodes (if one exists)
  6. -
  7. Gossips with a seed node if that didn’t happen in step 2.
  8. -
-

When an operator first bootstraps a Cassandra cluster they designate certain -nodes as “seed” nodes. Any node can be a seed node and the only difference -between seed and non-seed nodes is seed nodes are allowed to bootstrap into the -ring without seeing any other seed nodes. Furthermore, once a cluster is -bootstrapped, seed nodes become “hotspots” for gossip due to step 4 above.

-

As non-seed nodes must be able to contact at least one seed node in order to -bootstrap into the cluster, it is common to include multiple seed nodes, often -one for each rack or datacenter. Seed nodes are often chosen using existing -off-the-shelf service discovery mechanisms.

-
-

Note

-

Nodes do not have to agree on the seed nodes, and indeed once a cluster is -bootstrapped, newly launched nodes can be configured to use any existing -nodes as “seeds”. The only advantage to picking the same nodes as seeds -is it increases their usefullness as gossip hotspots.

-
-

Currently, gossip also propagates token metadata and schema version -information. This information forms the control plane for scheduling data -movements and schema pulls. For example, if a node sees a mismatch in schema -version in gossip state, it will schedule a schema sync task with the other -nodes. As token information propagates via gossip it is also the control plane -for teaching nodes which endpoints own what data.

-
-
-

Ring Membership and Failure Detection

-

Gossip forms the basis of ring membership, but the failure detector -ultimately makes decisions about if nodes are UP or DOWN. Every node in -Cassandra runs a variant of the Phi Accrual Failure Detector, -in which every node is constantly making an independent decision of if their -peer nodes are available or not. This decision is primarily based on received -heartbeat state. For example, if a node does not see an increasing heartbeat -from a node for a certain amount of time, the failure detector “convicts” that -node, at which point Cassandra will stop routing reads to it (writes will -typically be written to hints). If/when the node starts heartbeating again, -Cassandra will try to reach out and connect, and if it can open communication -channels it will mark that node as available.

-
-

Note

-

UP and DOWN state are local node decisions and are not propagated with -gossip. Heartbeat state is propagated with gossip, but nodes will not -consider each other as “UP” until they can successfully message each other -over an actual network channel.

-
-

Cassandra will never remove a node from gossip state without explicit -instruction from an operator via a decommission operation or a new node -bootstrapping with a replace_address_first_boot option. This choice is -intentional to allow Cassandra nodes to temporarily fail without causing data -to needlessly re-balance. This also helps to prevent simultaneous range -movements, where multiple replicas of a token range are moving at the same -time, which can violate monotonic consistency and can even cause data loss.

-
-
-
-

Incremental Scale-out on Commodity Hardware

-

Cassandra scales-out to meet the requirements of growth in data size and -request rates. Scaling-out means adding additional nodes to the ring, and -every additional node brings linear improvements in compute and storage. In -contrast, scaling-up implies adding more capacity to the existing database -nodes. Cassandra is also capable of scale-up, and in certain environments it -may be preferable depending on the deployment. Cassandra gives operators the -flexibility to chose either scale-out or scale-up.

-

One key aspect of Dynamo that Cassandra follows is to attempt to run on -commodity hardware, and many engineering choices are made under this -assumption. For example, Cassandra assumes nodes can fail at any time, -auto-tunes to make the best use of CPU and memory resources available and makes -heavy use of advanced compression and caching techniques to get the most -storage out of limited memory and storage capabilities.

-
-

Simple Query Model

-

Cassandra, like Dynamo, chooses not to provide cross-partition transactions -that are common in SQL Relational Database Management Systems (RDBMS). This -both gives the programmer a simpler read and write API, and allows Cassandra to -more easily scale horizontally since multi-partition transactions spanning -multiple nodes are notoriously difficult to implement and typically very -latent.

-

Instead, Cassanda chooses to offer fast, consistent, latency at any scale for -single partition operations, allowing retrieval of entire partitions or only -subsets of partitions based on primary key filters. Furthermore, Cassandra does -support single partition compare and swap functionality via the lightweight -transaction CQL API.

-
-
-

Simple Interface for Storing Records

-

Cassandra, in a slight departure from Dynamo, chooses a storage interface that -is more sophisticated then “simple key value” stores but significantly less -complex than SQL relational data models. Cassandra presents a wide-column -store interface, where partitions of data contain multiple rows, each of which -contains a flexible set of individually typed columns. Every row is uniquely -identified by the partition key and one or more clustering keys, and every row -can have as many columns as needed.

-

This allows users to flexibly add new columns to existing datasets as new -requirements surface. Schema changes involve only metadata changes and run -fully concurrently with live workloads. Therefore, users can safely add columns -to existing Cassandra databases while remaining confident that query -performance will not degrade.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/architecture/guarantees.html b/src/doc/4.0-beta1/architecture/guarantees.html deleted file mode 100644 index 663e0dbfd..000000000 --- a/src/doc/4.0-beta1/architecture/guarantees.html +++ /dev/null @@ -1,175 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Guarantees" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Guarantees

-

Apache Cassandra is a highly scalable and reliable database. Cassandra is used in web based applications that serve large number of clients and the quantity of data processed is web-scale (Petabyte) large. Cassandra makes some guarantees about its scalability, availability and reliability. To fully understand the inherent limitations of a storage system in an environment in which a certain level of network partition failure is to be expected and taken into account when designing the system it is important to first briefly introduce the CAP theorem.

-
-

What is CAP?

-

According to the CAP theorem it is not possible for a distributed data store to provide more than two of the following guarantees simultaneously.

-
    -
  • Consistency: Consistency implies that every read receives the most recent write or errors out
  • -
  • Availability: Availability implies that every request receives a response. It is not guaranteed that the response contains the most recent write or data.
  • -
  • Partition tolerance: Partition tolerance refers to the tolerance of a storage system to failure of a network partition. Even if some of the messages are dropped or delayed the system continues to operate.
  • -
-

CAP theorem implies that when using a network partition, with the inherent risk of partition failure, one has to choose between consistency and availability and both cannot be guaranteed at the same time. CAP theorem is illustrated in Figure 1.

-
-../_images/Figure_1_guarantees.jpg -
-

Figure 1. CAP Theorem

-

High availability is a priority in web based applications and to this objective Cassandra chooses Availability and Partition Tolerance from the CAP guarantees, compromising on data Consistency to some extent.

-

Cassandra makes the following guarantees.

-
    -
  • High Scalability
  • -
  • High Availability
  • -
  • Durability
  • -
  • Eventual Consistency of writes to a single table
  • -
  • Lightweight transactions with linearizable consistency
  • -
  • Batched writes across multiple tables are guaranteed to succeed completely or not at all
  • -
  • Secondary indexes are guaranteed to be consistent with their local replicas data
  • -
-
-
-

High Scalability

-

Cassandra is a highly scalable storage system in which nodes may be added/removed as needed. Using gossip-based protocol a unified and consistent membership list is kept at each node.

-
-
-

High Availability

-

Cassandra guarantees high availability of data by implementing a fault-tolerant storage system. Failure detection in a node is detected using a gossip-based protocol.

-
-
-

Durability

-

Cassandra guarantees data durability by using replicas. Replicas are multiple copies of a data stored on different nodes in a cluster. In a multi-datacenter environment the replicas may be stored on different datacenters. If one replica is lost due to unrecoverable node/datacenter failure the data is not completely lost as replicas are still available.

-
-
-

Eventual Consistency

-

Meeting the requirements of performance, reliability, scalability and high availability in production Cassandra is an eventually consistent storage system. Eventually consistent implies that all updates reach all replicas eventually. Divergent versions of the same data may exist temporarily but they are eventually reconciled to a consistent state. Eventual consistency is a tradeoff to achieve high availability and it involves some read and write latencies.

-
-
-

Lightweight transactions with linearizable consistency

-

Data must be read and written in a sequential order. Paxos consensus protocol is used to implement lightweight transactions. Paxos protocol implements lightweight transactions that are able to handle concurrent operations using linearizable consistency. Linearizable consistency is sequential consistency with real-time constraints and it ensures transaction isolation with compare and set (CAS) transaction. With CAS replica data is compared and data that is found to be out of date is set to the most consistent value. Reads with linearizable consistency allow reading the current state of the data, which may possibly be uncommitted, without making a new addition or update.

-
-
-

Batched Writes

-

The guarantee for batched writes across multiple tables is that they will eventually succeed, or none will. Batch data is first written to batchlog system data, and when the batch data has been successfully stored in the cluster the batchlog data is removed. The batch is replicated to another node to ensure the full batch completes in the event the coordinator node fails.

-
-
-

Secondary Indexes

-

A secondary index is an index on a column and is used to query a table that is normally not queryable. Secondary indexes when built are guaranteed to be consistent with their local replicas.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/architecture/index.html b/src/doc/4.0-beta1/architecture/index.html deleted file mode 100644 index dbb03ba31..000000000 --- a/src/doc/4.0-beta1/architecture/index.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Architecture" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-beta1/architecture/overview.html b/src/doc/4.0-beta1/architecture/overview.html deleted file mode 100644 index 8c74314f9..000000000 --- a/src/doc/4.0-beta1/architecture/overview.html +++ /dev/null @@ -1,198 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Overview" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Overview

-

Apache Cassandra is an open source, distributed, NoSQL database. It presents -a partitioned wide column storage model with eventually consistent semantics.

-

Apache Cassandra was initially designed at Facebook -using a staged event-driven architecture (SEDA) to implement a combination of -Amazon’s Dynamo -distributed storage and replication techniques combined with Google’s Bigtable -data and storage engine model. Dynamo and Bigtable were both developed to meet -emerging requirements for scalable, reliable and highly available storage -systems, but each had areas that could be improved.

-

Cassandra was designed as a best in class combination of both systems to meet -emerging large scale, both in data footprint and query volume, storage -requirements. As applications began to require full global replication and -always available low-latency reads and writes, it became imperative to design a -new kind of database model as the relational database systems of the time -struggled to meet the new requirements of global scale applications.

-

Systems like Cassandra are designed for these challenges and seek the -following design objectives:

-
    -
  • Full multi-master database replication
  • -
  • Global availability at low latency
  • -
  • Scaling out on commodity hardware
  • -
  • Linear throughput increase with each additional processor
  • -
  • Online load balancing and cluster growth
  • -
  • Partitioned key-oriented queries
  • -
  • Flexible schema
  • -
-
-

Features

-

Cassandra provides the Cassandra Query Language (CQL), an SQL-like language, -to create and update database schema and access data. CQL allows users to -organize data within a cluster of Cassandra nodes using:

-
    -
  • Keyspace: defines how a dataset is replicated, for example in which -datacenters and how many copies. Keyspaces contain tables.
  • -
  • Table: defines the typed schema for a collection of partitions. Cassandra -tables have flexible addition of new columns to tables with zero downtime. -Tables contain partitions, which contain partitions, which contain columns.
  • -
  • Partition: defines the mandatory part of the primary key all rows in -Cassandra must have. All performant queries supply the partition key in -the query.
  • -
  • Row: contains a collection of columns identified by a unique primary key -made up of the partition key and optionally additional clustering keys.
  • -
  • Column: A single datum with a type which belong to a row.
  • -
-

CQL supports numerous advanced features over a partitioned dataset such as:

-
    -
  • Single partition lightweight transactions with atomic compare and set -semantics.
  • -
  • User-defined types, functions and aggregates
  • -
  • Collection types including sets, maps, and lists.
  • -
  • Local secondary indices
  • -
  • (Experimental) materialized views
  • -
-

Cassandra explicitly chooses not to implement operations that require cross -partition coordination as they are typically slow and hard to provide highly -available global semantics. For example Cassandra does not support:

-
    -
  • Cross partition transactions
  • -
  • Distributed joins
  • -
  • Foreign keys or referential integrity.
  • -
-
-
-

Operating

-

Apache Cassandra configuration settings are configured in the cassandra.yaml -file that can be edited by hand or with the aid of configuration management tools. -Some settings can be manipulated live using an online interface, but others -require a restart of the database to take effect.

-

Cassandra provides tools for managing a cluster. The nodetool command -interacts with Cassandra’s live control interface, allowing runtime manipulation -of many settings from cassandra.yaml. The auditlogviewer is used -to view the audit logs. The fqltool is used to view, replay and compare -full query logs. The auditlogviewer and fqltool are new tools in -Apache Cassandra 4.0.

-

In addition, Cassandra supports out of the box atomic snapshot functionality, -which presents a point in time snapshot of Cassandra’s data for easy -integration with many backup tools. Cassandra also supports incremental backups -where data can be backed up as it is written.

-

Apache Cassandra 4.0 has added several new features including virtual tables. -transient replication, audit logging, full query logging, and support for Java -11. Two of these features are experimental: transient replication and Java 11 -support.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/architecture/storage_engine.html b/src/doc/4.0-beta1/architecture/storage_engine.html deleted file mode 100644 index 2f7245b8d..000000000 --- a/src/doc/4.0-beta1/architecture/storage_engine.html +++ /dev/null @@ -1,294 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Architecture" - -doc-title: "Storage Engine" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Storage Engine

-
-

CommitLog

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied to memtables.

-

All mutations write optimized by storing in commitlog segments, reducing the number of seeks needed to write to disk. Commitlog Segments are limited by the “commitlog_segment_size_in_mb” option, once the size is reached, a new commitlog segment is created. Commitlog segments can be archived, deleted, or recycled once all its data has been flushed to SSTables. Commitlog segments are truncated when Cassandra has written data older than a certain point to the SSTables. Running “nodetool drain” before stopping Cassandra will write everything in the memtables to SSTables and remove the need to sync with the commitlogs on startup.

-
    -
  • commitlog_segment_size_in_mb: The default size is 32, which is almost always fine, but if you are archiving commitlog segments (see commitlog_archiving.properties), then you probably want a finer granularity of archiving; 8 or 16 MB is reasonable. Max mutation size is also configurable via max_mutation_size_in_kb setting in cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
  • -
-

*NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must be set to at least twice the size of max_mutation_size_in_kb / 1024*

-

Default Value: 32

-

Commitlogs are an append only log of all mutations local to a Cassandra node. Any data written to Cassandra will first be written to a commit log before being written to a memtable. This provides durability in the case of unexpected shutdown. On startup, any mutations in the commit log will be applied.

-
    -
  • commitlog_sync: may be either “periodic” or “batch.”

    -
      -
    • batch: In batch mode, Cassandra won’t ack writes until the commit log has been fsynced to disk. It will wait “commitlog_sync_batch_window_in_ms” milliseconds between fsyncs. This window should be kept short because the writer threads will be unable to do extra work while waiting. You may need to increase concurrent_writes for the same reason.

      -
        -
      • commitlog_sync_batch_window_in_ms: Time to wait between “batch” fsyncs
      • -
      -

      Default Value: 2

      -
    • -
    • periodic: In periodic mode, writes are immediately ack’ed, and the CommitLog is simply synced every “commitlog_sync_period_in_ms” milliseconds.

      -
        -
      • commitlog_sync_period_in_ms: Time to wait between “periodic” fsyncs
      • -
      -

      Default Value: 10000

      -
    • -
    -
  • -
-

Default Value: batch

-

* NOTE: In the event of an unexpected shutdown, Cassandra can lose up to the sync period or more if the sync is delayed. If using “batch” mode, it is recommended to store commitlogs in a separate, dedicated device.

-
    -
  • commitlog_directory: This option is commented out by default When running on magnetic HDD, this should be a separate spindle than the data directories. If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
  • -
-

Default Value: /var/lib/cassandra/commitlog

-
    -
  • commitlog_compression: Compression to apply to the commitlog. If omitted, the commit log will be written uncompressed. LZ4, Snappy, Deflate and Zstd compressors are supported.
  • -
-

(Default Value: (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
    -
  • commitlog_total_space_in_mb: Total space to use for commit logs on disk.
  • -
-

If space gets above this value, Cassandra will flush every dirty CF in the oldest segment and remove it. So a small total commitlog space will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space of the commitlog volume.

-

Default Value: 8192

-
-
-

Memtables

-

Memtables are in-memory structures where Cassandra buffers writes. In general, there is one active memtable per table. -Eventually, memtables are flushed onto disk and become immutable SSTables. This can be triggered in several -ways:

-
    -
  • The memory usage of the memtables exceeds the configured threshold (see memtable_cleanup_threshold)
  • -
  • The CommitLog approaches its maximum size, and forces memtable flushes in order to allow commitlog segments to -be freed
  • -
-

Memtables may be stored entirely on-heap or partially off-heap, depending on memtable_allocation_type.

-
-
-

SSTables

-

SSTables are the immutable data files that Cassandra uses for persisting data on disk.

-

As SSTables are flushed to disk from Memtables or are streamed from other nodes, Cassandra triggers compactions -which combine multiple SSTables into one. Once the new SSTable has been written, the old SSTables can be removed.

-

Each SSTable is comprised of multiple components stored in separate files:

-
-
Data.db
-
The actual data, i.e. the contents of rows.
-
Index.db
-
An index from partition keys to positions in the Data.db file. For wide partitions, this may also include an -index to rows within a partition.
-
Summary.db
-
A sampling of (by default) every 128th entry in the Index.db file.
-
Filter.db
-
A Bloom Filter of the partition keys in the SSTable.
-
CompressionInfo.db
-
Metadata about the offsets and lengths of compression chunks in the Data.db file.
-
Statistics.db
-
Stores metadata about the SSTable, including information about timestamps, tombstones, clustering keys, compaction, -repair, compression, TTLs, and more.
-
Digest.crc32
-
A CRC-32 digest of the Data.db file.
-
TOC.txt
-
A plain text list of the component files for the SSTable.
-
-

Within the Data.db file, rows are organized by partition. These partitions are sorted in token order (i.e. by a -hash of the partition key when the default partitioner, Murmur3Partition, is used). Within a partition, rows are -stored in the order of their clustering keys.

-

SSTables can be optionally compressed using block-based compression.

-
-
-

SSTable Versions

-

This section was created using the following -gist -which utilized this original -source.

-

The version numbers, to date are:

-
-

Version 0

-
    -
  • b (0.7.0): added version to sstable filenames
  • -
  • c (0.7.0): bloom filter component computes hashes over raw key bytes instead of strings
  • -
  • d (0.7.0): row size in data component becomes a long instead of int
  • -
  • e (0.7.0): stores undecorated keys in data and index components
  • -
  • f (0.7.0): switched bloom filter implementations in data component
  • -
  • g (0.8): tracks flushed-at context in metadata component
  • -
-
-
-

Version 1

-
    -
  • h (1.0): tracks max client timestamp in metadata component
  • -
  • hb (1.0.3): records compression ration in metadata component
  • -
  • hc (1.0.4): records partitioner in metadata component
  • -
  • hd (1.0.10): includes row tombstones in maxtimestamp
  • -
  • he (1.1.3): includes ancestors generation in metadata component
  • -
  • hf (1.1.6): marker that replay position corresponds to 1.1.5+ millis-based id (see CASSANDRA-4782)
  • -
  • ia (1.2.0):
      -
    • column indexes are promoted to the index file
    • -
    • records estimated histogram of deletion times in tombstones
    • -
    • bloom filter (keys and columns) upgraded to Murmur3
    • -
    -
  • -
  • ib (1.2.1): tracks min client timestamp in metadata component
  • -
  • ic (1.2.5): omits per-row bloom filter of column names
  • -
-
-
-

Version 2

-
    -
  • ja (2.0.0):
      -
    • super columns are serialized as composites (note that there is no real format change, this is mostly a marker to know if we should expect super columns or not. We do need a major version bump however, because we should not allow streaming of super columns into this new format)
    • -
    • tracks max local deletiontime in sstable metadata
    • -
    • records bloom_filter_fp_chance in metadata component
    • -
    • remove data size and column count from data file (CASSANDRA-4180)
    • -
    • tracks max/min column values (according to comparator)
    • -
    -
  • -
  • jb (2.0.1):
      -
    • switch from crc32 to adler32 for compression checksums
    • -
    • checksum the compressed data
    • -
    -
  • -
  • ka (2.1.0):
      -
    • new Statistics.db file format
    • -
    • index summaries can be downsampled and the sampling level is persisted
    • -
    • switch uncompressed checksums to adler32
    • -
    • tracks presense of legacy (local and remote) counter shards
    • -
    -
  • -
  • la (2.2.0): new file name format
  • -
  • lb (2.2.7): commit log lower bound included
  • -
-
-
-

Version 3

-
    -
  • ma (3.0.0):
      -
    • swap bf hash order
    • -
    • store rows natively
    • -
    -
  • -
  • mb (3.0.7, 3.7): commit log lower bound included
  • -
  • mc (3.0.8, 3.9): commit log intervals included
  • -
-
-
-

Example Code

-

The following example is useful for finding all sstables that do not match the “ib” SSTable version

-
find /var/lib/cassandra/data/ -type f | grep -v -- -ib- | grep -v "/snapshots"
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/bugs.html b/src/doc/4.0-beta1/bugs.html deleted file mode 100644 index f2e5c8f15..000000000 --- a/src/doc/4.0-beta1/bugs.html +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Reporting Bugs" -doc-header-links: ' - - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Reporting Bugs

-

If you encounter a problem with Cassandra, the first places to ask for help are the user mailing list and the cassandra Slack room.

-

If, after having asked for help, you suspect that you have found a bug in Cassandra, you should report it by opening a -ticket through the Apache Cassandra JIRA. Please provide as much -details as you can on your problem, and don’t forget to indicate which version of Cassandra you are running and on which -environment.

-

Further details on how to contribute can be found at our Contributing to Cassandra section. Please note that the source of -this documentation is part of the Cassandra git repository and hence contributions to the documentation should follow the -same path.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/configuration/cassandra_config_file.html b/src/doc/4.0-beta1/configuration/cassandra_config_file.html deleted file mode 100644 index 7b58ff4ab..000000000 --- a/src/doc/4.0-beta1/configuration/cassandra_config_file.html +++ /dev/null @@ -1,1974 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Configuring Cassandra" - -doc-title: "Cassandra Configuration File" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Cassandra Configuration File

-
-

cluster_name

-

The name of the cluster. This is mainly used to prevent machines in -one logical cluster from joining another.

-

Default Value: ‘Test Cluster’

-
-
-

num_tokens

-

This defines the number of tokens randomly assigned to this node on the ring -The more tokens, relative to other nodes, the larger the proportion of data -that this node will store. You probably want all nodes to have the same number -of tokens assuming they have equal hardware capability.

-

If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -and will use the initial_token as described below.

-

Specifying initial_token will override this setting on the node’s initial start, -on subsequent starts, this setting will apply even if initial token is set.

-

If you already have a cluster with 1 token per node, and wish to migrate to -multiple tokens per node, see http://wiki.apache.org/cassandra/Operations

-

Default Value: 256

-
-
-

allocate_tokens_for_keyspace

-

This option is commented out by default.

-

Triggers automatic allocation of num_tokens tokens for this node. The allocation -algorithm attempts to choose tokens in a way that optimizes replicated load over -the nodes in the datacenter for the replica factor.

-

The load assigned to each node will be close to proportional to its number of -vnodes.

-

Only supported with the Murmur3Partitioner.

-

Replica factor is determined via the replication strategy used by the specified -keyspace.

-

Default Value: KEYSPACE

-
-
-

allocate_tokens_for_local_replication_factor

-

This option is commented out by default.

-

Replica factor is explicitly set, regardless of keyspace or datacenter. -This is the replica factor within the datacenter, like NTS.

-

Default Value: 3

-
-
-

initial_token

-

This option is commented out by default.

-

initial_token allows you to specify tokens manually. While you can use it with -vnodes (num_tokens > 1, above) – in which case you should provide a -comma-separated list – it’s primarily used when adding nodes to legacy clusters -that do not have vnodes enabled.

-
-
-

hinted_handoff_enabled

-

See http://wiki.apache.org/cassandra/HintedHandoff -May either be “true” or “false” to enable globally

-

Default Value: true

-
-
-

hinted_handoff_disabled_datacenters

-

This option is commented out by default.

-

When hinted_handoff_enabled is true, a black list of data centers that will not -perform hinted handoff

-

Default Value (complex option):

-
#    - DC1
-#    - DC2
-
-
-
-
-

max_hint_window_in_ms

-

this defines the maximum amount of time a dead host will have hints -generated. After it has been dead this long, new hints for it will not be -created until it has been seen alive and gone down again.

-

Default Value: 10800000 # 3 hours

-
-
-

hinted_handoff_throttle_in_kb

-

Maximum throttle in KBs per second, per delivery thread. This will be -reduced proportionally to the number of nodes in the cluster. (If there -are two nodes in the cluster, each delivery thread will use the maximum -rate; if there are three, each will throttle to half of the maximum, -since we expect two nodes to be delivering hints simultaneously.)

-

Default Value: 1024

-
-
-

max_hints_delivery_threads

-

Number of threads with which to deliver hints; -Consider increasing this number when you have multi-dc deployments, since -cross-dc handoff tends to be slower

-

Default Value: 2

-
-
-

hints_directory

-

This option is commented out by default.

-

Directory where Cassandra should store hints. -If not set, the default directory is $CASSANDRA_HOME/data/hints.

-

Default Value: /var/lib/cassandra/hints

-
-
-

hints_flush_period_in_ms

-

How often hints should be flushed from the internal buffers to disk. -Will not trigger fsync.

-

Default Value: 10000

-
-
-

max_hints_file_size_in_mb

-

Maximum size for a single hints file, in megabytes.

-

Default Value: 128

-
-
-

hints_compression

-

This option is commented out by default.

-

Compression to apply to the hint files. If omitted, hints files -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

batchlog_replay_throttle_in_kb

-

Maximum throttle in KBs per second, total. This will be -reduced proportionally to the number of nodes in the cluster.

-

Default Value: 1024

-
-
-

authenticator

-

Authentication backend, implementing IAuthenticator; used to identify users -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -PasswordAuthenticator}.

-
    -
  • AllowAllAuthenticator performs no checks - set it to disable authentication.
  • -
  • PasswordAuthenticator relies on username/password pairs to authenticate -users. It keeps usernames and hashed passwords in system_auth.roles table. -Please increase system_auth keyspace replication factor if you use this authenticator. -If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
  • -
-

Default Value: AllowAllAuthenticator

-
-
-

authorizer

-

Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -CassandraAuthorizer}.

-
    -
  • AllowAllAuthorizer allows any action to any user - set it to disable authorization.
  • -
  • CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllAuthorizer

-
-
-

role_manager

-

Part of the Authentication & Authorization backend, implementing IRoleManager; used -to maintain grants and memberships between roles. -Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -which stores role information in the system_auth keyspace. Most functions of the -IRoleManager require an authenticated login, so unless the configured IAuthenticator -actually implements authentication, most of this functionality will be unavailable.

-
    -
  • CassandraRoleManager stores role data in the system_auth keyspace. Please -increase system_auth keyspace replication factor if you use this role manager.
  • -
-

Default Value: CassandraRoleManager

-
-
-

network_authorizer

-

Network authorization backend, implementing INetworkAuthorizer; used to restrict user -access to certain DCs -Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllNetworkAuthorizer, -CassandraNetworkAuthorizer}.

-
    -
  • AllowAllNetworkAuthorizer allows access to any DC to any user - set it to disable authorization.
  • -
  • CassandraNetworkAuthorizer stores permissions in system_auth.network_permissions table. Please -increase system_auth keyspace replication factor if you use this authorizer.
  • -
-

Default Value: AllowAllNetworkAuthorizer

-
-
-

roles_validity_in_ms

-

Validity period for roles cache (fetching granted roles can be an expensive -operation depending on the role manager, CassandraRoleManager is one example) -Granted roles are cached for authenticated sessions in AuthenticatedUser and -after the period specified here, become eligible for (async) reload. -Defaults to 2000, set to 0 to disable caching entirely. -Will be disabled automatically for AllowAllAuthenticator.

-

Default Value: 2000

-
-
-

roles_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for roles cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If roles_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as roles_validity_in_ms.

-

Default Value: 2000

-
-
-

permissions_validity_in_ms

-

Validity period for permissions cache (fetching permissions can be an -expensive operation depending on the authorizer, CassandraAuthorizer is -one example). Defaults to 2000, set to 0 to disable. -Will be disabled automatically for AllowAllAuthorizer.

-

Default Value: 2000

-
-
-

permissions_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for permissions cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If permissions_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as permissions_validity_in_ms.

-

Default Value: 2000

-
-
-

credentials_validity_in_ms

-

Validity period for credentials cache. This cache is tightly coupled to -the provided PasswordAuthenticator implementation of IAuthenticator. If -another IAuthenticator implementation is configured, this cache will not -be automatically used and so the following settings will have no effect. -Please note, credentials are cached in their encrypted form, so while -activating this cache may reduce the number of queries made to the -underlying table, it may not bring a significant reduction in the -latency of individual authentication attempts. -Defaults to 2000, set to 0 to disable credentials caching.

-

Default Value: 2000

-
-
-

credentials_update_interval_in_ms

-

This option is commented out by default.

-

Refresh interval for credentials cache (if enabled). -After this interval, cache entries become eligible for refresh. Upon next -access, an async reload is scheduled and the old value returned until it -completes. If credentials_validity_in_ms is non-zero, then this must be -also. -Defaults to the same value as credentials_validity_in_ms.

-

Default Value: 2000

-
-
-

partitioner

-

The partitioner is responsible for distributing groups of rows (by -partition key) across nodes in the cluster. The partitioner can NOT be -changed without reloading all data. If you are adding nodes or upgrading, -you should set this to the same partitioner that you are currently using.

-

The default partitioner is the Murmur3Partitioner. Older partitioners -such as the RandomPartitioner, ByteOrderedPartitioner, and -OrderPreservingPartitioner have been included for backward compatibility only. -For new clusters, you should NOT change this value.

-

Default Value: org.apache.cassandra.dht.Murmur3Partitioner

-
-
-

data_file_directories

-

This option is commented out by default.

-

Directories where Cassandra should store data on disk. If multiple -directories are specified, Cassandra will spread data evenly across -them by partitioning the token ranges. -If not set, the default directory is $CASSANDRA_HOME/data/data.

-

Default Value (complex option):

-
#     - /var/lib/cassandra/data
-
-
-
-
-

commitlog_directory

-

This option is commented out by default. -commit log. when running on magnetic HDD, this should be a -separate spindle than the data directories. -If not set, the default directory is $CASSANDRA_HOME/data/commitlog.

-

Default Value: /var/lib/cassandra/commitlog

-
-
-

cdc_enabled

-

Enable / disable CDC functionality on a per-node basis. This modifies the logic used -for write path allocation rejection (standard: never reject. cdc: reject Mutation -containing a CDC-enabled table if at space limit in cdc_raw_directory).

-

Default Value: false

-
-
-

cdc_raw_directory

-

This option is commented out by default.

-

CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -segment contains mutations for a CDC-enabled table. This should be placed on a -separate spindle than the data directories. If not set, the default directory is -$CASSANDRA_HOME/data/cdc_raw.

-

Default Value: /var/lib/cassandra/cdc_raw

-
-
-

disk_failure_policy

-

Policy for data disk failures:

-
-
die
-
shut down gossip and client transports and kill the JVM for any fs errors or -single-sstable errors, so the node can be replaced.
-
stop_paranoid
-
shut down gossip and client transports even for single-sstable errors, -kill the JVM for errors during startup.
-
stop
-
shut down gossip and client transports, leaving the node effectively dead, but -can still be inspected via JMX, kill the JVM for errors during startup.
-
best_effort
-
stop using the failed disk and respond to requests based on -remaining available sstables. This means you WILL see obsolete -data at CL.ONE!
-
ignore
-
ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-
-

Default Value: stop

-
-
-

commit_failure_policy

-

Policy for commit disk failures:

-
-
die
-
shut down the node and kill the JVM, so the node can be replaced.
-
stop
-
shut down the node, leaving the node effectively dead, but -can still be inspected via JMX.
-
stop_commit
-
shutdown the commit log, letting writes collect but -continuing to service reads, as in pre-2.0.5 Cassandra
-
ignore
-
ignore fatal errors and let the batches fail
-
-

Default Value: stop

-
-
-

prepared_statements_cache_size_mb

-

Maximum size of the native protocol prepared statement cache

-

Valid values are either “auto” (omitting the value) or a value greater 0.

-

Note that specifying a too large value will result in long running GCs and possbily -out-of-memory errors. Keep the value at a small fraction of the heap.

-

If you constantly see “prepared statements discarded in the last minute because -cache limit reached” messages, the first step is to investigate the root cause -of these messages and check whether prepared statements are used correctly - -i.e. use bind markers for variable parts.

-

Do only change the default value, if you really have more prepared statements than -fit in the cache. In most cases it is not neccessary to change this value. -Constantly re-preparing statements is a performance penalty.

-

Default value (“auto”) is 1/256th of the heap or 10MB, whichever is greater

-
-
-

key_cache_size_in_mb

-

Maximum size of the key cache in memory.

-

Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -minimum, sometimes more. The key cache is fairly tiny for the amount of -time it saves, so it’s worthwhile to use it at large numbers. -The row cache saves even more time, but must contain the entire row, -so it is extremely space-intensive. It’s best to only use the -row cache if you have hot rows or static rows.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.

-
-
-

key_cache_save_period

-

Duration in seconds after which Cassandra should -save the key cache. Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 14400 or 4 hours.

-

Default Value: 14400

-
-
-

key_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the key cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

row_cache_class_name

-

This option is commented out by default.

-

Row cache implementation class name. Available implementations:

-
-
org.apache.cassandra.cache.OHCProvider
-
Fully off-heap row cache implementation (default).
-
org.apache.cassandra.cache.SerializingCacheProvider
-
This is the row cache implementation availabile -in previous releases of Cassandra.
-
-

Default Value: org.apache.cassandra.cache.OHCProvider

-
-
-

row_cache_size_in_mb

-

Maximum size of the row cache in memory. -Please note that OHC cache implementation requires some additional off-heap memory to manage -the map structures and some in-flight memory during operations before/after cache entries can be -accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -Do not specify more memory that the system can afford in the worst usual situation and leave some -headroom for OS block level cache. Do never allow your system to swap.

-

Default value is 0, to disable row caching.

-

Default Value: 0

-
-
-

row_cache_save_period

-

Duration in seconds after which Cassandra should save the row cache. -Caches are saved to saved_caches_directory as specified in this configuration file.

-

Saved caches greatly improve cold-start speeds, and is relatively cheap in -terms of I/O for the key cache. Row cache saving is much more expensive and -has limited use.

-

Default is 0 to disable saving the row cache.

-

Default Value: 0

-
-
-

row_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the row cache to save. -Specify 0 (which is the default), meaning all keys are going to be saved

-

Default Value: 100

-
-
-

counter_cache_size_in_mb

-

Maximum size of the counter cache in memory.

-

Counter cache helps to reduce counter locks’ contention for hot counter cells. -In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -of the lock hold, helping with hot counter cell updates, but will not allow skipping -the read entirely. Only the local (clock, count) tuple of a counter cell is kept -in memory, not the whole counter, so it’s relatively cheap.

-

NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.

-

Default value is empty to make it “auto” (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.

-
-
-

counter_cache_save_period

-

Duration in seconds after which Cassandra should -save the counter cache (keys only). Caches are saved to saved_caches_directory as -specified in this configuration file.

-

Default is 7200 or 2 hours.

-

Default Value: 7200

-
-
-

counter_cache_keys_to_save

-

This option is commented out by default.

-

Number of keys from the counter cache to save -Disabled by default, meaning all keys are going to be saved

-

Default Value: 100

-
-
-

saved_caches_directory

-

This option is commented out by default.

-

saved caches -If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.

-

Default Value: /var/lib/cassandra/saved_caches

-
-
-

commitlog_sync_batch_window_in_ms

-

This option is commented out by default.

-

commitlog_sync may be either “periodic”, “group”, or “batch.”

-

When in batch mode, Cassandra won’t ack writes until the commit log -has been flushed to disk. Each incoming write will trigger the flush task. -commitlog_sync_batch_window_in_ms is a deprecated value. Previously it had -almost no value, and is being removed.

-

Default Value: 2

-
-
-

commitlog_sync_group_window_in_ms

-

This option is commented out by default.

-

group mode is similar to batch mode, where Cassandra will not ack writes -until the commit log has been flushed to disk. The difference is group -mode will wait up to commitlog_sync_group_window_in_ms between flushes.

-

Default Value: 1000

-
-
-

commitlog_sync

-

the default option is “periodic” where writes may be acked immediately -and the CommitLog is simply synced every commitlog_sync_period_in_ms -milliseconds.

-

Default Value: periodic

-
-
-

commitlog_sync_period_in_ms

-

Default Value: 10000

-
-
-

periodic_commitlog_sync_lag_block_in_ms

-

This option is commented out by default.

-

When in periodic commitlog mode, the number of milliseconds to block writes -while waiting for a slow disk flush to complete.

-
-
-

commitlog_segment_size_in_mb

-

The size of the individual commitlog file segments. A commitlog -segment may be archived, deleted, or recycled once all the data -in it (potentially from each columnfamily in the system) has been -flushed to sstables.

-

The default size is 32, which is almost always fine, but if you are -archiving commitlog segments (see commitlog_archiving.properties), -then you probably want a finer granularity of archiving; 8 or 16 MB -is reasonable. -Max mutation size is also configurable via max_mutation_size_in_kb setting in -cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -This should be positive and less than 2048.

-

NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -be set to at least twice the size of max_mutation_size_in_kb / 1024

-

Default Value: 32

-
-
-

commitlog_compression

-

This option is commented out by default.

-

Compression to apply to the commit log. If omitted, the commit log -will be written uncompressed. LZ4, Snappy, and Deflate compressors -are supported.

-

Default Value (complex option):

-
#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-
-
-
-

table

-

This option is commented out by default. -Compression to apply to SSTables as they flush for compressed tables. -Note that tables without compression enabled do not respect this flag.

-

As high ratio compressors like LZ4HC, Zstd, and Deflate can potentially -block flushes for too long, the default is to flush with a known fast -compressor in those cases. Options are:

-

none : Flush without compressing blocks but while still doing checksums. -fast : Flush with a fast compressor. If the table is already using a

-
-
fast compressor that compressor is used.
-

Default Value: Always flush with the same compressor that the table uses. This

-
-
-

flush_compression

-
-
This option is commented out by default.
-
was the pre 4.0 behavior.
-
-

Default Value: fast

-
-
-

seed_provider

-

any class that implements the SeedProvider interface and has a -constructor that takes a Map<String, String> of parameters will do.

-

Default Value (complex option):

-
# Addresses of hosts that are deemed contact points.
-# Cassandra nodes use this list of hosts to find each other and learn
-# the topology of the ring.  You must change this if you are running
-# multiple nodes!
-- class_name: org.apache.cassandra.locator.SimpleSeedProvider
-  parameters:
-      # seeds is actually a comma-delimited list of addresses.
-      # Ex: "<ip1>,<ip2>,<ip3>"
-      - seeds: "127.0.0.1:7000"
-
-
-
-
-

concurrent_reads

-

For workloads with more data than can fit in memory, Cassandra’s -bottleneck will be reads that need to fetch data from -disk. “concurrent_reads” should be set to (16 * number_of_drives) in -order to allow the operations to enqueue low enough in the stack -that the OS and drives can reorder them. Same applies to -“concurrent_counter_writes”, since counter writes read the current -values before incrementing and writing them back.

-

On the other hand, since writes are almost never IO bound, the ideal -number of “concurrent_writes” is dependent on the number of cores in -your system; (8 * number_of_cores) is a good rule of thumb.

-

Default Value: 32

-
-
-

concurrent_writes

-

Default Value: 32

-
-
-

concurrent_counter_writes

-

Default Value: 32

-
-
-

concurrent_materialized_view_writes

-

For materialized view writes, as there is a read involved, so this should -be limited by the less of concurrent reads or concurrent writes.

-

Default Value: 32

-
-
-

file_cache_size_in_mb

-

This option is commented out by default.

-

Maximum memory to use for sstable chunk cache and buffer pooling. -32MB of this are reserved for pooling buffers, the rest is used as an -cache that holds uncompressed sstable chunks. -Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -so is in addition to the memory allocated for heap. The cache also has on-heap -overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -if the default 64k chunk size is used). -Memory is only allocated when needed.

-

Default Value: 512

-
-
-

buffer_pool_use_heap_if_exhausted

-

This option is commented out by default.

-

Flag indicating whether to allocate on or off heap when the sstable buffer -pool is exhausted, that is when it has exceeded the maximum memory -file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.

-

Default Value: true

-
-
-

disk_optimization_strategy

-

This option is commented out by default.

-

The strategy for optimizing disk read -Possible values are: -ssd (for solid state disks, the default) -spinning (for spinning disks)

-

Default Value: ssd

-
-
-

memtable_heap_space_in_mb

-

This option is commented out by default.

-

Total permitted memory to use for memtables. Cassandra will stop -accepting writes when the limit is exceeded until a flush completes, -and will trigger a flush based on memtable_cleanup_threshold -If omitted, Cassandra will set both to 1/4 the size of the heap.

-

Default Value: 2048

-
-
-

memtable_offheap_space_in_mb

-

This option is commented out by default.

-

Default Value: 2048

-
-
-

memtable_cleanup_threshold

-

This option is commented out by default.

-

memtable_cleanup_threshold is deprecated. The default calculation -is the only reasonable choice. See the comments on memtable_flush_writers -for more information.

-

Ratio of occupied non-flushing memtable size to total permitted size -that will trigger a flush of the largest memtable. Larger mct will -mean larger flushes and hence less compaction, but also less concurrent -flush activity which can make it difficult to keep your disks fed -under heavy write load.

-

memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)

-

Default Value: 0.11

-
-
-

memtable_allocation_type

-

Specify the way Cassandra allocates and manages memtable memory. -Options are:

-
-
heap_buffers
-
on heap nio buffers
-
offheap_buffers
-
off heap (direct) nio buffers
-
offheap_objects
-
off heap objects
-
-

Default Value: heap_buffers

-
-
-

repair_session_space_in_mb

-

This option is commented out by default.

-

Limit memory usage for Merkle tree calculations during repairs. The default -is 1/16th of the available heap. The main tradeoff is that smaller trees -have less resolution, which can lead to over-streaming data. If you see heap -pressure during repairs, consider lowering this, but you cannot go below -one megabyte. If you see lots of over-streaming, consider raising -this or using subrange repair.

-

For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096.

-
-
-

commitlog_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for commit logs on disk.

-

If space gets above this value, Cassandra will flush every dirty CF -in the oldest segment and remove it. So a small total commitlog space -will tend to cause more flush activity on less-active columnfamilies.

-

The default value is the smaller of 8192, and 1/4 of the total space -of the commitlog volume.

-

Default Value: 8192

-
-
-

memtable_flush_writers

-

This option is commented out by default.

-

This sets the number of memtable flush writer threads per disk -as well as the total number of memtables that can be flushed concurrently. -These are generally a combination of compute and IO bound.

-

Memtable flushing is more CPU efficient than memtable ingest and a single thread -can keep up with the ingest rate of a whole server on a single fast disk -until it temporarily becomes IO bound under contention typically with compaction. -At that point you need multiple flush threads. At some point in the future -it may become CPU bound all the time.

-

You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation -metric which should be 0, but will be non-zero if threads are blocked waiting on flushing -to free memory.

-

memtable_flush_writers defaults to two for a single data directory. -This means that two memtables can be flushed concurrently to the single data directory. -If you have multiple data directories the default is one memtable flushing at a time -but the flush will use a thread per data directory so you will get two or more writers.

-

Two is generally enough to flush on a fast disk [array] mounted as a single data directory. -Adding more flush writers will result in smaller more frequent flushes that introduce more -compaction overhead.

-

There is a direct tradeoff between number of memtables that can be flushed concurrently -and flush size and frequency. More is not better you just need enough flush writers -to never stall waiting for flushing to free memory.

-

Default Value: 2

-
-
-

cdc_total_space_in_mb

-

This option is commented out by default.

-

Total space to use for change-data-capture logs on disk.

-

If space gets above this value, Cassandra will throw WriteTimeoutException -on Mutations including tables with CDC enabled. A CDCCompactor is responsible -for parsing the raw CDC logs and deleting them when parsing is completed.

-

The default value is the min of 4096 mb and 1/8th of the total space -of the drive where cdc_raw_directory resides.

-

Default Value: 4096

-
-
-

cdc_free_space_check_interval_ms

-

This option is commented out by default.

-

When we hit our cdc_raw limit and the CDCCompactor is either running behind -or experiencing backpressure, we check at the following interval to see if any -new space for cdc-tracked tables has been made available. Default to 250ms

-

Default Value: 250

-
-
-

index_summary_capacity_in_mb

-

A fixed memory pool size in MB for for SSTable index summaries. If left -empty, this will default to 5% of the heap size. If the memory usage of -all index summaries exceeds this limit, SSTables with low read rates will -shrink their index summaries in order to meet this limit. However, this -is a best-effort process. In extreme conditions Cassandra may need to use -more than this amount of memory.

-
-
-

index_summary_resize_interval_in_minutes

-

How frequently index summaries should be resampled. This is done -periodically to redistribute memory from the fixed-size pool to sstables -proportional their recent read rates. Setting to -1 will disable this -process, leaving existing index summaries at their current sampling level.

-

Default Value: 60

-
-
-

trickle_fsync

-

Whether to, when doing sequential writing, fsync() at intervals in -order to force the operating system to flush the dirty -buffers. Enable this to avoid sudden dirty buffer flushing from -impacting read latencies. Almost always a good idea on SSDs; not -necessarily on platters.

-

Default Value: false

-
-
-

trickle_fsync_interval_in_kb

-

Default Value: 10240

-
-
-

storage_port

-

TCP port, for commands and data -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7000

-
-
-

ssl_storage_port

-

SSL port, for legacy encrypted communication. This property is unused unless enabled in -server_encryption_options (see below). As of cassandra 4.0, this property is deprecated -as a single port can be used for either/both secure and insecure connections. -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 7001

-
-
-

listen_address

-

Address or interface to bind to and tell other Cassandra nodes to connect to. -You _must_ change this if you want multiple nodes to be able to communicate!

-

Set listen_address OR listen_interface, not both.

-

Leaving it blank leaves it up to InetAddress.getLocalHost(). This -will always do the Right Thing _if_ the node is properly configured -(hostname, name resolution, etc), and the Right Thing is to use the -address associated with the hostname (it might not be).

-

Setting listen_address to 0.0.0.0 is always wrong.

-

Default Value: localhost

-
-
-

listen_interface

-

This option is commented out by default.

-

Set listen_address OR listen_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth0

-
-
-

listen_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_address

-

This option is commented out by default.

-

Address to broadcast to other Cassandra nodes -Leaving this blank will set it to the same value as listen_address

-

Default Value: 1.2.3.4

-
-
-

listen_on_broadcast_address

-

This option is commented out by default.

-

When using multiple physical network interfaces, set this -to true to listen on broadcast_address in addition to -the listen_address, allowing nodes to communicate in both -interfaces. -Ignore this property if the network configuration automatically -routes between the public and private networks such as EC2.

-

Default Value: false

-
-
-

internode_authenticator

-

This option is commented out by default.

-

Internode authentication backend, implementing IInternodeAuthenticator; -used to allow/disallow connections from peer nodes.

-

Default Value: org.apache.cassandra.auth.AllowAllInternodeAuthenticator

-
-
-

start_native_transport

-

Whether to start the native transport server. -The address on which the native transport is bound is defined by rpc_address.

-

Default Value: true

-
-
-

native_transport_port

-

port for the CQL native transport to listen for clients on -For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: 9042

-
-
-

native_transport_port_ssl

-

This option is commented out by default. -Enabling native transport encryption in client_encryption_options allows you to either use -encryption for the standard port or to use a dedicated, additional port along with the unencrypted -standard native_transport_port. -Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -for native_transport_port. Setting native_transport_port_ssl to a different value -from native_transport_port will use encryption for native_transport_port_ssl while -keeping native_transport_port unencrypted.

-

Default Value: 9142

-
-
-

native_transport_max_threads

-

This option is commented out by default. -The maximum threads for handling requests (note that idle threads are stopped -after 30 seconds so there is not corresponding minimum setting).

-

Default Value: 128

-
-
-

native_transport_max_frame_size_in_mb

-

This option is commented out by default.

-

The maximum size of allowed frame. Frame (requests) larger than this will -be rejected as invalid. The default is 256MB. If you’re changing this parameter, -you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

native_transport_frame_block_size_in_kb

-

This option is commented out by default.

-

If checksumming is enabled as a protocol option, denotes the size of the chunks into which frame -are bodies will be broken and checksummed.

-

Default Value: 32

-
-
-

native_transport_max_concurrent_connections

-

This option is commented out by default.

-

The maximum number of concurrent client connections. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_max_concurrent_connections_per_ip

-

This option is commented out by default.

-

The maximum number of concurrent client connections per source ip. -The default is -1, which means unlimited.

-

Default Value: -1

-
-
-

native_transport_allow_older_protocols

-

Controls whether Cassandra honors older, yet currently supported, protocol versions. -The default is true, which means all supported protocols will be honored.

-

Default Value: true

-
-
-

native_transport_idle_timeout_in_ms

-

This option is commented out by default.

-

Controls when idle client connections are closed. Idle connections are ones that had neither reads -nor writes for a time period.

-

Clients may implement heartbeats by sending OPTIONS native protocol message after a timeout, which -will reset idle timeout timer on the server side. To close idle client connections, corresponding -values for heartbeat intervals have to be set on the client side.

-

Idle connection timeouts are disabled by default.

-

Default Value: 60000

-
-
-

rpc_address

-

The address or interface to bind the native transport server to.

-

Set rpc_address OR rpc_interface, not both.

-

Leaving rpc_address blank has the same effect as on listen_address -(i.e. it will be based on the configured hostname of the node).

-

Note that unlike listen_address, you can specify 0.0.0.0, but you must also -set broadcast_rpc_address to a value other than 0.0.0.0.

-

For security reasons, you should not expose this port to the internet. Firewall it if needed.

-

Default Value: localhost

-
-
-

rpc_interface

-

This option is commented out by default.

-

Set rpc_address OR rpc_interface, not both. Interfaces must correspond -to a single address, IP aliasing is not supported.

-

Default Value: eth1

-
-
-

rpc_interface_prefer_ipv6

-

This option is commented out by default.

-

If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -address will be used. If true the first ipv6 address will be used. Defaults to false preferring -ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.

-

Default Value: false

-
-
-

broadcast_rpc_address

-

This option is commented out by default.

-

RPC address to broadcast to drivers and other Cassandra nodes. This cannot -be set to 0.0.0.0. If left blank, this will be set to the value of -rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -be set.

-

Default Value: 1.2.3.4

-
-
-

rpc_keepalive

-

enable or disable keepalive on rpc/native connections

-

Default Value: true

-
-
-

internode_send_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem -See also: -/proc/sys/net/core/wmem_max -/proc/sys/net/core/rmem_max -/proc/sys/net/ipv4/tcp_wmem -/proc/sys/net/ipv4/tcp_wmem -and ‘man tcp’

-
-
-

internode_recv_buff_size_in_bytes

-

This option is commented out by default.

-

Uncomment to set socket buffer size for internode communication -Note that when setting this, the buffer size is limited by net.core.wmem_max -and when not setting it it is defined by net.ipv4.tcp_wmem

-
-
-

incremental_backups

-

Set to true to have Cassandra create a hard link to each sstable -flushed or streamed locally in a backups/ subdirectory of the -keyspace data. Removing these links is the operator’s -responsibility.

-

Default Value: false

-
-
-

snapshot_before_compaction

-

Whether or not to take a snapshot before each compaction. Be -careful using this option, since Cassandra won’t clean up the -snapshots for you. Mostly useful if you’re paranoid when there -is a data format change.

-

Default Value: false

-
-
-

auto_snapshot

-

Whether or not a snapshot is taken of the data before keyspace truncation -or dropping of column families. The STRONGLY advised default of true -should be used to provide data safety. If you set this flag to false, you will -lose data on truncation or drop.

-

Default Value: true

-
-
-

column_index_size_in_kb

-

Granularity of the collation index of rows within a partition. -Increase if your rows are large, or if you have a very large -number of rows per partition. The competing goals are these:

-
    -
  • a smaller granularity means more index entries are generated -and looking up rows withing the partition by collation column -is faster
  • -
  • but, Cassandra will keep the collation index in memory for hot -rows (as part of the key cache), so a larger granularity means -you can cache more hot rows
  • -
-

Default Value: 64

-
-
-

column_index_cache_size_in_kb

-

Per sstable indexed key cache entries (the collation index in memory -mentioned above) exceeding this size will not be held on heap. -This means that only partition information is held on heap and the -index entries are read from disk.

-

Note that this size refers to the size of the -serialized index information and not the size of the partition.

-

Default Value: 2

-
-
-

concurrent_compactors

-

This option is commented out by default.

-

Number of simultaneous compactions to allow, NOT including -validation “compactions” for anti-entropy repair. Simultaneous -compactions can help preserve read performance in a mixed read/write -workload, by mitigating the tendency of small sstables to accumulate -during a single long running compactions. The default is usually -fine and if you experience problems with compaction running too -slowly or too fast, you should look at -compaction_throughput_mb_per_sec first.

-

concurrent_compactors defaults to the smaller of (number of disks, -number of cores), with a minimum of 2 and a maximum of 8.

-

If your data directories are backed by SSD, you should increase this -to the number of cores.

-

Default Value: 1

-
-
-

concurrent_validations

-

This option is commented out by default.

-

Number of simultaneous repair validations to allow. Default is unbounded -Values less than one are interpreted as unbounded (the default)

-

Default Value: 0

-
-
-

concurrent_materialized_view_builders

-

Number of simultaneous materialized view builder tasks to allow.

-

Default Value: 1

-
-
-

compaction_throughput_mb_per_sec

-

Throttles compaction to the given total throughput across the entire -system. The faster you insert data, the faster you need to compact in -order to keep the sstable count down, but in general, setting this to -16 to 32 times the rate you are inserting data is more than sufficient. -Setting this to 0 disables throttling. Note that this account for all types -of compaction, including validation compaction.

-

Default Value: 16

-
-
-

sstable_preemptive_open_interval_in_mb

-

When compacting, the replacement sstable(s) can be opened before they -are completely written, and used in place of the prior sstables for -any range that has been written. This helps to smoothly transfer reads -between the sstables, reducing page cache churn and keeping hot rows hot

-

Default Value: 50

-
-
-

stream_entire_sstables

-

This option is commented out by default.

-

When enabled, permits Cassandra to zero-copy stream entire eligible -SSTables between nodes, including every component. -This speeds up the network transfer significantly subject to -throttling specified by stream_throughput_outbound_megabits_per_sec. -Enabling this will reduce the GC pressure on sending and receiving node. -When unset, the default is enabled. While this feature tries to keep the -disks balanced, it cannot guarantee it. This feature will be automatically -disabled if internode encryption is enabled. Currently this can be used with -Leveled Compaction. Once CASSANDRA-14586 is fixed other compaction strategies -will benefit as well when used in combination with CASSANDRA-6696.

-

Default Value: true

-
-
-

stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all outbound streaming file transfers on this node to the -given total throughput in Mbps. This is necessary because Cassandra does -mostly sequential IO when streaming data during bootstrap or repair, which -can lead to saturating the network connection and degrading rpc performance. -When unset, the default is 200 Mbps or 25 MB/s.

-

Default Value: 200

-
-
-

inter_dc_stream_throughput_outbound_megabits_per_sec

-

This option is commented out by default.

-

Throttles all streaming file transfer between the datacenters, -this setting allows users to throttle inter dc stream throughput in addition -to throttling all network stream traffic as configured with -stream_throughput_outbound_megabits_per_sec -When unset, the default is 200 Mbps or 25 MB/s

-

Default Value: 200

-
-
-

read_request_timeout_in_ms

-

How long the coordinator should wait for read operations to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

range_request_timeout_in_ms

-

How long the coordinator should wait for seq or index scans to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

write_request_timeout_in_ms

-

How long the coordinator should wait for writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 2000

-
-
-

counter_write_request_timeout_in_ms

-

How long the coordinator should wait for counter writes to complete. -Lowest acceptable value is 10 ms.

-

Default Value: 5000

-
-
-

cas_contention_timeout_in_ms

-

How long a coordinator should continue to retry a CAS operation -that contends with other proposals for the same row. -Lowest acceptable value is 10 ms.

-

Default Value: 1000

-
-
-

truncate_request_timeout_in_ms

-

How long the coordinator should wait for truncates to complete -(This can be much longer, because unless auto_snapshot is disabled -we need to flush first so we can snapshot before removing the data.) -Lowest acceptable value is 10 ms.

-

Default Value: 60000

-
-
-

request_timeout_in_ms

-

The default timeout for other, miscellaneous operations. -Lowest acceptable value is 10 ms.

-

Default Value: 10000

-
-
-

internode_application_send_queue_capacity_in_bytes

-

This option is commented out by default.

-

Defensive settings for protecting Cassandra from true network partitions. -See (CASSANDRA-14358) for details.

-

The amount of time to wait for internode tcp connections to establish. -internode_tcp_connect_timeout_in_ms = 2000

-

The amount of time unacknowledged data is allowed on a connection before we throw out the connection -Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 -(it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 -which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -internode_tcp_user_timeout_in_ms = 30000

-

The maximum continuous period a connection may be unwritable in application space -internode_application_timeout_in_ms = 30000

-

Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes -and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire -size of the message being sent or received.

-

The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. -Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) -messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens -nodes should need to communicate with significant bandwidth.

-

The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, -on all links to or from a single node in the cluster. -The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, -on all links to or from any node in the cluster.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_send_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_send_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

internode_application_receive_queue_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 4194304 #4MiB

-
-
-

internode_application_receive_queue_reserve_endpoint_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 134217728 #128MiB

-
-
-

internode_application_receive_queue_reserve_global_capacity_in_bytes

-

This option is commented out by default.

-

Default Value: 536870912 #512MiB

-
-
-

slow_query_log_timeout_in_ms

-

How long before a node logs slow queries. Select queries that take longer than -this timeout to execute, will generate an aggregated log message, so that slow queries -can be identified. Set this value to zero to disable slow query logging.

-

Default Value: 500

-
-
-

cross_node_timeout

-

This option is commented out by default.

-

Enable operation timeout information exchange between nodes to accurately -measure request timeouts. If disabled, replicas will assume that requests -were forwarded to them instantly by the coordinator, which means that -under overload conditions we will waste that much extra time processing -already-timed-out requests.

-

Warning: It is generally assumed that users have setup NTP on their clusters, and that clocks are modestly in sync, -since this is a requirement for general correctness of last write wins.

-

Default Value: true

-
-
-

streaming_keep_alive_period_in_secs

-

This option is commented out by default.

-

Set keep-alive period for streaming -This node will send a keep-alive message periodically with this period. -If the node does not receive a keep-alive message from the peer for -2 keep-alive cycles the stream session times out and fail -Default value is 300s (5 minutes), which means stalled stream -times out in 10 minutes by default

-

Default Value: 300

-
-
-

streaming_connections_per_host

-

This option is commented out by default.

-

Limit number of connections per host for streaming -Increase this when you notice that joins are CPU-bound rather that network -bound (for example a few nodes with big files).

-

Default Value: 1

-
-
-

phi_convict_threshold

-

This option is commented out by default.

-

phi value that must be reached for a host to be marked down. -most users should never need to adjust this.

-

Default Value: 8

-
-
-

endpoint_snitch

-

endpoint_snitch – Set this to a class that implements -IEndpointSnitch. The snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route -requests efficiently
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid -correlated failures. It does this by grouping machines into -“datacenters” and “racks.” Cassandra will do its best not to have -more than one replica on the same “rack” (which may not actually -be a physical location)
  • -
-

CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -This means that if you start with the default SimpleSnitch, which -locates every node on “rack1” in “datacenter1”, your only options -if you need to add another datacenter are GossipingPropertyFileSnitch -(and the older PFS). From there, if you want to migrate to an -incompatible snitch like Ec2Snitch you can do it by adding new nodes -under Ec2Snitch (which will locate them in a new “datacenter”) and -decommissioning the old ones.

-

Out of the box, Cassandra provides:

-
-
SimpleSnitch:
-
Treats Strategy order as proximity. This can improve cache -locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack -and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via -gossip. If cassandra-topology.properties exists, it is used as a -fallback, allowing migration from the PropertyFileSnitch.
-
PropertyFileSnitch:
-
Proximity is determined by rack and data center, which are -explicitly configured in cassandra-topology.properties.
-
Ec2Snitch:
-
Appropriate for EC2 deployments in a single Region. Loads Region -and Availability Zone information from the EC2 API. The Region is -treated as the datacenter, and the Availability Zone as the rack. -Only private IPs are used, so this will not work across multiple -Regions.
-
Ec2MultiRegionSnitch:
-
Uses public IPs as broadcast_address to allow cross-region -connectivity. (Thus, you should set seed addresses to the public -IP as well.) You will need to open the storage_port or -ssl_storage_port on the public IP firewall. (For intra-Region -traffic, Cassandra will switch to the private IP after -establishing a connection.)
-
RackInferringSnitch:
-
Proximity is determined by rack and data center, which are -assumed to correspond to the 3rd and 2nd octet of each node’s IP -address, respectively. Unless this happens to match your -deployment conventions, this is best used as an example of -writing a custom Snitch class and is provided in that spirit.
-
-

You can use a custom Snitch by setting this to the full class name -of the snitch, which will be assumed to be on your classpath.

-

Default Value: SimpleSnitch

-
-
-

dynamic_snitch_update_interval_in_ms

-

controls how often to perform the more expensive part of host score -calculation

-

Default Value: 100

-
-
-

dynamic_snitch_reset_interval_in_ms

-

controls how often to reset all host scores, allowing a bad host to -possibly recover

-

Default Value: 600000

-
-
-

dynamic_snitch_badness_threshold

-

if set greater than zero, this will allow -‘pinning’ of replicas to hosts in order to increase cache capacity. -The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is -expressed as a double which represents a percentage. Thus, a value of -0.2 means Cassandra would continue to prefer the static snitch values -until the pinned host was 20% worse than the fastest.

-

Default Value: 0.1

-
-
-

server_encryption_options

-

Enable or disable inter-node encryption -JVM and netty defaults for supported SSL socket protocols and cipher suites can -be replaced using custom encryption options. This is not recommended -unless you have policies in place that dictate certain settings, or -need to disable vulnerable ciphers or protocols in case the JVM cannot -be updated. -FIPS compliant settings can be configured at JVM level and should not -involve changing encryption settings here: -https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html

-

NOTE No custom encryption options are enabled at the moment -The available internode options are : all, none, dc, rack -If set to dc cassandra will encrypt the traffic between the DCs -If set to rack cassandra will encrypt the traffic between the racks

-

The passwords used in these options must match the passwords used when generating -the keystore and truststore. For instructions on generating these files, see: -http://download.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore

-

Default Value (complex option):

-
# set to true for allowing secure incoming connections
-enabled: false
-# If enabled and optional are both set to true, encrypted and unencrypted connections are handled on the storage_port
-optional: false
-# if enabled, will open up an encrypted listening socket on ssl_storage_port. Should be used
-# during upgrade to 4.0; otherwise, set to false.
-enable_legacy_ssl_storage_port: false
-# on outbound connections, determine which type of peers to securely connect to. 'enabled' must be set to true.
-internode_encryption: none
-keystore: conf/.keystore
-keystore_password: cassandra
-truststore: conf/.truststore
-truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-# require_client_auth: false
-# require_endpoint_verification: false
-
-
-
-
-

client_encryption_options

-

enable or disable client-to-server encryption.

-

Default Value (complex option):

-
enabled: false
-# If enabled and optional is set to true encrypted and unencrypted connections are handled.
-optional: false
-keystore: conf/.keystore
-keystore_password: cassandra
-# require_client_auth: false
-# Set trustore and truststore_password if require_client_auth is true
-# truststore: conf/.truststore
-# truststore_password: cassandra
-# More advanced defaults below:
-# protocol: TLS
-# store_type: JKS
-# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-
-
-
-

internode_compression

-

internode_compression controls whether traffic between nodes is -compressed. -Can be:

-
-
all
-
all traffic is compressed
-
dc
-
traffic between different datacenters is compressed
-
none
-
nothing is compressed.
-
-

Default Value: dc

-
-
-

inter_dc_tcp_nodelay

-

Enable or disable tcp_nodelay for inter-dc communication. -Disabling it will result in larger (but fewer) network packets being sent, -reducing overhead from the TCP protocol itself, at the cost of increasing -latency if you block for cross-datacenter responses.

-

Default Value: false

-
-
-

tracetype_query_ttl

-

TTL for different trace types used during logging of the repair process.

-

Default Value: 86400

-
-
-

tracetype_repair_ttl

-

Default Value: 604800

-
-
-

enable_user_defined_functions

-

If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -INFO level -UDFs (user defined functions) are disabled by default. -As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.

-

Default Value: false

-
-
-

enable_scripted_user_defined_functions

-

Enables scripted UDFs (JavaScript UDFs). -Java UDFs are always enabled, if enable_user_defined_functions is true. -Enable this option to be able to use UDFs with “language javascript” or any custom JSR-223 provider. -This option has no effect, if enable_user_defined_functions is false.

-

Default Value: false

-
-
-

windows_timer_interval

-

The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -Lowering this value on Windows can provide much tighter latency and better throughput, however -some virtualized environments may see a negative performance impact from changing this setting -below their system default. The sysinternals ‘clockres’ tool can confirm your system’s default -setting.

-

Default Value: 1

-
-
-

transparent_data_encryption_options

-

Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -the “key_alias” is the only key that will be used for encrypt opertaions; previously used keys -can still (and should!) be in the keystore and will be used on decrypt operations -(to handle the case of key rotation).

-

It is strongly recommended to download and install Java Cryptography Extension (JCE) -Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -(current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)

-

Currently, only the following file types are supported for transparent data encryption, although -more are coming in future cassandra releases: commitlog, hints

-

Default Value (complex option):

-
enabled: false
-chunk_length_kb: 64
-cipher: AES/CBC/PKCS5Padding
-key_alias: testing:1
-# CBC IV length for AES needs to be 16 bytes (which is also the default size)
-# iv_length: 16
-key_provider:
-  - class_name: org.apache.cassandra.security.JKSKeyProvider
-    parameters:
-      - keystore: conf/.keystore
-        keystore_password: cassandra
-        store_type: JCEKS
-        key_password: cassandra
-
-
-
-
-

tombstone_warn_threshold

-
-

SAFETY THRESHOLDS #

-

When executing a scan, within or across a partition, we need to keep the -tombstones seen in memory so we can return them to the coordinator, which -will use them to make sure other replicas also know about the deleted rows. -With workloads that generate a lot of tombstones, this can cause performance -problems and even exaust the server heap. -(http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -Adjust the thresholds here if you understand the dangers and want to -scan more tombstones anyway. These thresholds may also be adjusted at runtime -using the StorageService mbean.

-

Default Value: 1000

-
-
-
-

tombstone_failure_threshold

-

Default Value: 100000

-
-
-

batch_size_warn_threshold_in_kb

-

Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. -Caution should be taken on increasing the size of this threshold as it can lead to node instability.

-

Default Value: 5

-
-
-

batch_size_fail_threshold_in_kb

-

Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.

-

Default Value: 50

-
-
-

unlogged_batch_across_partitions_warn_threshold

-

Log WARN on any batches not of type LOGGED than span across more partitions than this limit

-

Default Value: 10

-
-
-

compaction_large_partition_warning_threshold_mb

-

Log a warning when compacting partitions larger than this value

-

Default Value: 100

-
-
-

gc_log_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than 200 ms will be logged at INFO level -This threshold can be adjusted to minimize logging if necessary

-

Default Value: 200

-
-
-

gc_warn_threshold_in_ms

-

This option is commented out by default.

-

GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -Adjust the threshold based on your application throughput requirement. Setting to 0 -will deactivate the feature.

-

Default Value: 1000

-
-
-

max_value_size_in_mb

-

This option is commented out by default.

-

Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -early. Any value size larger than this threshold will result into marking an SSTable -as corrupted. This should be positive and less than 2048.

-

Default Value: 256

-
-
-

back_pressure_enabled

-

Back-pressure settings # -If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation -sent to replicas, with the aim of reducing pressure on overloaded replicas.

-

Default Value: false

-
-
-

back_pressure_strategy

-

The back-pressure strategy applied. -The default implementation, RateBasedBackPressure, takes three arguments: -high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests. -If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor; -if above high ratio, the rate limiting is increased by the given factor; -such factor is usually best configured between 1 and 10, use larger values for a faster recovery -at the expense of potentially more dropped mutations; -the rate limiting is applied according to the flow type: if FAST, it’s rate limited at the speed of the fastest replica, -if SLOW at the speed of the slowest one. -New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and -provide a public constructor accepting a Map<String, Object>.

-
-
-

otc_coalescing_strategy

-

This option is commented out by default.

-

Coalescing Strategies # -Coalescing multiples messages turns out to significantly boost message processing throughput (think doubling or more). -On bare metal, the floor for packet processing throughput is high enough that many applications won’t notice, but in -virtualized environments, the point at which an application can be bound by network packet processing can be -surprisingly low compared to the throughput of task processing that is possible inside a VM. It’s not that bare metal -doesn’t benefit from coalescing messages, it’s that the number of packets a bare metal network interface can process -is sufficient for many applications such that no load starvation is experienced even without coalescing. -There are other benefits to coalescing network messages that are harder to isolate with a simple metric like messages -per second. By coalescing multiple tasks together, a network thread can process multiple messages for the cost of one -trip to read from a socket, and all the task submission work can be done at the same time reducing context switching -and increasing cache friendliness of network message processing. -See CASSANDRA-8692 for details.

-

Strategy to use for coalescing messages in OutboundTcpConnection. -Can be fixed, movingaverage, timehorizon, disabled (default). -You can also specify a subclass of CoalescingStrategies.CoalescingStrategy by name.

-

Default Value: DISABLED

-
-
-

otc_coalescing_window_us

-

This option is commented out by default.

-

How many microseconds to wait for coalescing. For fixed strategy this is the amount of time after the first -message is received before it will be sent with any accompanying messages. For moving average this is the -maximum amount of time that will be waited as well as the interval at which messages must arrive on average -for coalescing to be enabled.

-

Default Value: 200

-
-
-

otc_coalescing_enough_coalesced_messages

-

This option is commented out by default.

-

Do not try to coalesce messages if we already got that many messages. This should be more than 2 and less than 128.

-

Default Value: 8

-
-
-

otc_backlog_expiration_interval_ms

-

This option is commented out by default.

-

How many milliseconds to wait between two expiration runs on the backlog (queue) of the OutboundTcpConnection. -Expiration is done if messages are piling up in the backlog. Droppable messages are expired to free the memory -taken by expired messages. The interval should be between 0 and 1000, and in most installations the default value -will be appropriate. A smaller value could potentially expire messages slightly sooner at the expense of more CPU -time and queue contention while iterating the backlog of messages. -An interval of 0 disables any wait time, which is the behavior of former Cassandra versions.

-

Default Value: 200

-
-
-

ideal_consistency_level

-

This option is commented out by default.

-

Track a metric per keyspace indicating whether replication achieved the ideal consistency -level for writes without timing out. This is different from the consistency level requested by -each write which may be lower in order to facilitate availability.

-

Default Value: EACH_QUORUM

-
-
-

automatic_sstable_upgrade

-

This option is commented out by default.

-

Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the -oldest non-upgraded sstable will get upgraded to the latest version

-

Default Value: false

-
-
-

max_concurrent_automatic_sstable_upgrades

-

This option is commented out by default. -Limit the number of concurrent sstable upgrades

-

Default Value: 1

-
-
-

audit_logging_options

-

Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs -on audit_logging for full details about the various configuration options.

-
-
-

full_query_logging_options

-

This option is commented out by default.

-

default options for full query logging - these can be overridden from command line when executing -nodetool enablefullquerylog

-
-
-

corrupted_tombstone_strategy

-

This option is commented out by default.

-

validate tombstones on reads and compaction -can be either “disabled”, “warn” or “exception”

-

Default Value: disabled

-
-
-

diagnostic_events_enabled

-

Diagnostic Events # -If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details -on internal state and temporal relationships across events, accessible by clients via JMX.

-

Default Value: false

-
-
-

native_transport_flush_in_batches_legacy

-

This option is commented out by default.

-

Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in -particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.

-

Default Value: false

-
-
-

repaired_data_tracking_for_range_reads_enabled

-

Enable tracking of repaired state of data during reads and comparison between replicas -Mismatches between the repaired sets of replicas can be characterized as either confirmed -or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair -sessions, unrepaired partition tombstones, or some other condition means that the disparity -cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation -as they may be indicative of corruption or data loss. -There are separate flags for range vs partition reads as single partition reads are only tracked -when CL > 1 and a digest mismatch occurs. Currently, range queries don’t use digests so if -enabled for range reads, all range reads will include repaired data tracking. As this adds -some overhead, operators may wish to disable it whilst still enabling it for partition reads

-

Default Value: false

-
-
-

repaired_data_tracking_for_partition_reads_enabled

-

Default Value: false

-
-
-

report_unconfirmed_repaired_data_mismatches

-

If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed -mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed -mismatches are less actionable than confirmed ones.

-

Default Value: false

-
-
-

enable_materialized_views

-
-

EXPERIMENTAL FEATURES #

-

Enables materialized view creation on this node. -Materialized views are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-
-

enable_sasi_indexes

-

Enables SASI index creation on this node. -SASI indexes are considered experimental and are not recommended for production use.

-

Default Value: false

-
-
-

enable_transient_replication

-

Enables creation of transiently replicated keyspaces on this node. -Transient replication is experimental and is not recommended for production use.

-

Default Value: false

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/configuration/index.html b/src/doc/4.0-beta1/configuration/index.html deleted file mode 100644 index f70213771..000000000 --- a/src/doc/4.0-beta1/configuration/index.html +++ /dev/null @@ -1,111 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

This section describes how to configure Apache Cassandra.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/contactus.html b/src/doc/4.0-beta1/contactus.html deleted file mode 100644 index 75b5711f9..000000000 --- a/src/doc/4.0-beta1/contactus.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contact us" -doc-header-links: ' - - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contact us

-

You can get in touch with the Cassandra community either via the mailing lists or Slack rooms.

-
-

Mailing lists

-

The following mailing lists are available:

- -

Subscribe by sending an email to the email address in the Subscribe links above. Follow the instructions in the welcome -email to confirm your subscription. Make sure to keep the welcome email as it contains instructions on how to -unsubscribe.

-
-
-

Slack

-

To chat with developers or users in real-time, join our rooms on ASF Slack:

-
    -
  • cassandra - for user questions and general discussions.
  • -
  • cassandra-dev - strictly for questions or discussions related to Cassandra development.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/appendices.html b/src/doc/4.0-beta1/cql/appendices.html deleted file mode 100644 index d83a3a228..000000000 --- a/src/doc/4.0-beta1/cql/appendices.html +++ /dev/null @@ -1,568 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Appendices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Appendices

-
-

Appendix A: CQL Keywords

-

CQL distinguishes between reserved and non-reserved keywords. -Reserved keywords cannot be used as identifier, they are truly reserved -for the language (but one can enclose a reserved keyword by -double-quotes to use it as an identifier). Non-reserved keywords however -only have a specific meaning in certain context but can used as -identifier otherwise. The only raison d’être of these non-reserved -keywords is convenience: some keyword are non-reserved when it was -always easy for the parser to decide whether they were used as keywords -or not.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KeywordReserved?
ADDyes
AGGREGATEno
ALLno
ALLOWyes
ALTERyes
ANDyes
APPLYyes
ASno
ASCyes
ASCIIno
AUTHORIZEyes
BATCHyes
BEGINyes
BIGINTno
BLOBno
BOOLEANno
BYyes
CALLEDno
CLUSTERINGno
COLUMNFAMILYyes
COMPACTno
CONTAINSno
COUNTno
COUNTERno
CREATEyes
CUSTOMno
DATEno
DECIMALno
DELETEyes
DESCyes
DESCRIBEyes
DISTINCTno
DOUBLEno
DROPyes
ENTRIESyes
EXECUTEyes
EXISTSno
FILTERINGno
FINALFUNCno
FLOATno
FROMyes
FROZENno
FULLyes
FUNCTIONno
FUNCTIONSno
GRANTyes
IFyes
INyes
INDEXyes
INETno
INFINITYyes
INITCONDno
INPUTno
INSERTyes
INTno
INTOyes
JSONno
KEYno
KEYSno
KEYSPACEyes
KEYSPACESno
LANGUAGEno
LIMITyes
LISTno
LOGINno
MAPno
MODIFYyes
NANyes
NOLOGINno
NORECURSIVEyes
NOSUPERUSERno
NOTyes
NULLyes
OFyes
ONyes
OPTIONSno
ORyes
ORDERyes
PASSWORDno
PERMISSIONno
PERMISSIONSno
PRIMARYyes
RENAMEyes
REPLACEyes
RETURNSno
REVOKEyes
ROLEno
ROLESno
SCHEMAyes
SELECTyes
SETyes
SFUNCno
SMALLINTno
STATICno
STORAGEno
STYPEno
SUPERUSERno
TABLEyes
TEXTno
TIMEno
TIMESTAMPno
TIMEUUIDno
TINYINTno
TOyes
TOKENyes
TRIGGERno
TRUNCATEyes
TTLno
TUPLEno
TYPEno
UNLOGGEDyes
UPDATEyes
USEyes
USERno
USERSno
USINGyes
UUIDno
VALUESno
VARCHARno
VARINTno
WHEREyes
WITHyes
WRITETIMEno
-
-
-

Appendix B: CQL Reserved Types

-

The following type names are not currently used by CQL, but are reserved -for potential future use. User-defined types may not use reserved type -names as their name.

- --- - - - - - - - - - - - - - - - - - - -
type
bitstring
byte
complex
enum
interval
macaddr
-
-
-

Appendix C: Dropping Compact Storage

-

Starting version 4.0, Thrift and COMPACT STORAGE is no longer supported.

-

‘ALTER … DROP COMPACT STORAGE’ statement makes Compact Tables CQL-compatible, -exposing internal structure of Thrift/Compact Tables:

-
    -
  • CQL-created Compact Tables that have no clustering columns, will expose an -additional clustering column column1 with UTF8Type.
  • -
  • CQL-created Compact Tables that had no regular columns, will expose a -regular column value with BytesType.
  • -
  • For CQL-Created Compact Tables, all columns originally defined as -regular will be come static
  • -
  • CQL-created Compact Tables that have clustering but have no regular -columns will have an empty value column (of EmptyType)
  • -
  • SuperColumn Tables (can only be created through Thrift) will expose -a compact value map with an empty name.
  • -
  • Thrift-created Compact Tables will have types corresponding to their -Thrift definition.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/changes.html b/src/doc/4.0-beta1/cql/changes.html deleted file mode 100644 index a9d77008c..000000000 --- a/src/doc/4.0-beta1/cql/changes.html +++ /dev/null @@ -1,364 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Changes

-

The following describes the changes in each version of CQL.

-
-

3.4.5

- -
-
-

3.4.4

-
    -
  • ALTER TABLE ALTER has been removed; a column’s type may not be changed after creation (CASSANDRA-12443).
  • -
  • ALTER TYPE ALTER has been removed; a field’s type may not be changed after creation (CASSANDRA-12443).
  • -
-
-
-

3.4.3

- -
-
-

3.4.2

-
    -
  • If a table has a non zero default_time_to_live, then explicitly specifying a TTL of 0 in an INSERT or -UPDATE statement will result in the new writes not having any expiration (that is, an explicit TTL of 0 cancels -the default_time_to_live). This wasn’t the case before and the default_time_to_live was applied even though a -TTL had been explicitly set.
  • -
  • ALTER TABLE ADD and DROP now allow multiple columns to be added/removed.
  • -
  • New PER PARTITION LIMIT option for SELECT statements (see CASSANDRA-7017.
  • -
  • User-defined functions can now instantiate UDTValue and TupleValue instances via the -new UDFContext interface (see CASSANDRA-10818.
  • -
  • User-defined types may now be stored in a non-frozen form, allowing individual fields to be updated and -deleted in UPDATE statements and DELETE statements, respectively. (CASSANDRA-7423).
  • -
-
-
-

3.4.1

-
    -
  • Adds CAST functions.
  • -
-
-
-

3.4.0

-
    -
  • Support for materialized views.
  • -
  • DELETE support for inequality expressions and IN restrictions on any primary key columns.
  • -
  • UPDATE support for IN restrictions on any primary key columns.
  • -
-
-
-

3.3.1

-
    -
  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X.
  • -
-
-
-

3.3.0

-
    -
  • User-defined functions and aggregates are now supported.
  • -
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • -
  • Introduces Roles to supersede user based authentication and access control
  • -
  • New date, time, tinyint and smallint data types have been added.
  • -
  • JSON support has been added
  • -
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf.
  • -
-
-
-

3.2.0

-
    -
  • User-defined types supported.
  • -
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the -keys() function
  • -
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • -
  • Tuple types were added to hold fixed-length sets of typed positional fields.
  • -
  • DROP INDEX now supports optionally specifying a keyspace.
  • -
-
-
-

3.1.7

-
    -
  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations -of clustering columns.
  • -
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statements, -respectively.
  • -
-
-
-

3.1.6

-
    -
  • A new uuid() method has been added.
  • -
  • Support for DELETE ... IF EXISTS syntax.
  • -
-
-
-

3.1.5

-
    -
  • It is now possible to group clustering columns in a relation, see WHERE clauses.
  • -
  • Added support for static columns.
  • -
-
-
-

3.1.4

-
    -
  • CREATE INDEX now allows specifying options when creating CUSTOM indexes.
  • -
-
-
-

3.1.3

-
    -
  • Millisecond precision formats have been added to the timestamp parser.
  • -
-
-
-

3.1.2

-
    -
  • NaN and Infinity has been added as valid float constants. They are now reserved keywords. In the unlikely case -you we using them as a column identifier (or keyspace/table one), you will now need to double quote them.
  • -
-
-
-

3.1.1

-
    -
  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • -
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable -will be a list of whatever type c is.
  • -
  • It is now possible to use named bind variables (using :name instead of ?).
  • -
-
-
-

3.1.0

-
    -
  • ALTER TABLE DROP option added.
  • -
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported.
  • -
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. -Similarly, DROP statements support a IF EXISTS condition.
  • -
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.
  • -
-
-
-

3.0.5

-
    -
  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626.
  • -
-
-
-

3.0.4

-
    -
  • Updated the syntax for custom secondary indexes.
  • -
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not -correct (the order was not the one of the type of the partition key). Instead, the token method should always -be used for range queries on the partition key (see WHERE clauses).
  • -
-
-
-

3.0.3

- -
-
-

3.0.2

-
    -
  • Type validation for the constants has been fixed. For instance, the implementation used to allow -'2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid -blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer -the case, type validation of constants is now more strict. See the data types section for details -on which constant is allowed for which type.
  • -
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow the input of -blobs. Do note that while the input of blobs as strings constant is still supported by this version (to allow smoother -transition to blob constant), it is now deprecated and will be removed by a future version. If you were using strings -as blobs, you should thus update your client code ASAP to switch blob constants.
  • -
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is -now also allowed in select clauses. See the section on functions for details.
  • -
-
-
-

3.0.1

-
    -
  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense -that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help -working with timeuuid: now, minTimeuuid, maxTimeuuid , -dateOf and unixTimestampOf.
  • -
  • Float constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.
  • -
-
-
-

Versioning

-

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the -form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no -correlation between Cassandra release versions and the CQL language version.

- ---- - - - - - - - - - - - - - - - - -
versiondescription
MajorThe major version must be bumped when backward incompatible changes are introduced. This should rarely -occur.
MinorMinor version increments occur when new, but backward compatible, functionality is introduced.
PatchThe patch version is incremented when bugs are fixed.
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/ddl.html b/src/doc/4.0-beta1/cql/ddl.html deleted file mode 100644 index a820883f7..000000000 --- a/src/doc/4.0-beta1/cql/ddl.html +++ /dev/null @@ -1,908 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Definition" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Definition

-

CQL stores data in tables, whose schema defines the layout of said data in the table, and those tables are grouped in -keyspaces. A keyspace defines a number of options that applies to all the tables it contains, most prominently of -which is the replication strategy used by the keyspace. It is generally encouraged to use -one keyspace by application, and thus many cluster may define only one keyspace.

-

This section describes the statements used to create, modify, and remove those keyspace and tables.

-
-

Common definitions

-

The names of the keyspaces and tables are defined by the following grammar:

-
-keyspace_name ::=  name
-table_name    ::=  [ keyspace_name '.' ] name
-name          ::=  unquoted_name | quoted_name
-unquoted_name ::=  re('[a-zA-Z_0-9]{1, 48}')
-quoted_name   ::=  '"' unquoted_name '"'
-
-

Both keyspace and table name should be comprised of only alphanumeric characters, cannot be empty and are limited in -size to 48 characters (that limit exists mostly to avoid filenames (which may include the keyspace and table name) to go -over the limits of certain file systems). By default, keyspace and table names are case insensitive (myTable is -equivalent to mytable) but case sensitivity can be forced by using double-quotes ("myTable" is different from -mytable).

-

Further, a table is always part of a keyspace and a table name can be provided fully-qualified by the keyspace it is -part of. If is is not fully-qualified, the table is assumed to be in the current keyspace (see USE statement).

-

Further, the valid names for columns is simply defined as:

-
-column_name ::=  identifier
-
-

We also define the notion of statement options for use in the following section:

-
-options ::=  option ( AND option )*
-option  ::=  identifier '=' ( identifier | constant | map_literal )
-
-
-
-

CREATE KEYSPACE

-

A keyspace is created using a CREATE KEYSPACE statement:

-
-create_keyspace_statement ::=  CREATE KEYSPACE [ IF NOT EXISTS ] keyspace_name WITH options
-
-

For instance:

-
CREATE KEYSPACE excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-    AND durable_writes = false;
-
-
-

Attempting to create a keyspace that already exists will return an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the keyspace already exists.

-

The supported options are:

- ------- - - - - - - - - - - - - - - - - - - - - - - -
namekindmandatorydefaultdescription
replicationmapyes The replication strategy and options to use for the keyspace (see -details below).
durable_writessimplenotrueWhether to use the commit log for updates on this keyspace -(disable this option at your own risk!).
-

The replication property is mandatory and must at least contains the 'class' sub-option which defines the -replication strategy class to use. The rest of the sub-options depends on what replication -strategy is used. By default, Cassandra support the following 'class':

-
-

SimpleStrategy

-

A simple strategy that defines a replication factor for data to be spread -across the entire cluster. This is generally not a wise choice for production -because it does not respect datacenter layouts and can lead to wildly varying -query latency. For a production ready strategy, see -NetworkTopologyStrategy. SimpleStrategy supports a single mandatory argument:

- ------ - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'replication_factor'intallThe number of replicas to store per range
-
-
-

NetworkTopologyStrategy

-

A production ready replication strategy that allows to set the replication -factor independently for each data-center. The rest of the sub-options are -key-value pairs where a key is a data-center name and its value is the -associated replication factor. Options:

- ------ - - - - - - - - - - - - - - - - - - - -
sub-optiontypesincedescription
'<datacenter>'intallThe number of replicas to store per range in -the provided datacenter.
'replication_factor'int4.0The number of replicas to use as a default -per datacenter if not specifically provided. -Note that this always defers to existing -definitions or explicit datacenter settings. -For example, to have three replicas per -datacenter, supply this with a value of 3.
-

Note that when ALTER ing keyspaces and supplying replication_factor, -auto-expansion will only add new datacenters for safety, it will not alter -existing datacenters or remove any even if they are no longer in the cluster. -If you want to remove datacenters while still supplying replication_factor, -explicitly zero out the datacenter you want to have zero replicas.

-

An example of auto-expanding datacenters with two datacenters: DC1 and DC2:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '3'} AND durable_writes = true;
-
-
-

An example of auto-expanding and overriding a datacenter:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 2}
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3', 'DC2': '2'} AND durable_writes = true;
-
-
-

An example that excludes a datacenter while using replication_factor:

-
CREATE KEYSPACE excalibur
-    WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor' : 3, 'DC2': 0} ;
-
-DESCRIBE KEYSPACE excalibur
-    CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
-
-
-

If transient replication has been enabled, transient replicas can be configured for both -SimpleStrategy and NetworkTopologyStrategy by defining replication factors in the format '<total_replicas>/<transient_replicas>'

-

For instance, this keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-
-
-
-

USE

-

The USE statement allows to change the current keyspace (for the connection on which it is executed). A number -of objects in CQL are bound to a keyspace (tables, user-defined types, functions, …) and the current keyspace is the -default keyspace used when those objects are referred without a fully-qualified name (that is, without being prefixed a -keyspace name). A USE statement simply takes the keyspace to use as current as argument:

-
-use_statement ::=  USE keyspace_name
-
-
-
-

ALTER KEYSPACE

-

An ALTER KEYSPACE statement allows to modify the options of a keyspace:

-
-alter_keyspace_statement ::=  ALTER KEYSPACE keyspace_name WITH options
-
-

For instance:

-
ALTER KEYSPACE Excelsior
-    WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-
-

The supported options are the same than for creating a keyspace.

-
-
-

DROP KEYSPACE

-

Dropping a keyspace can be done using the DROP KEYSPACE statement:

-
-drop_keyspace_statement ::=  DROP KEYSPACE [ IF EXISTS ] keyspace_name
-
-

For instance:

-
DROP KEYSPACE Excelsior;
-
-
-

Dropping a keyspace results in the immediate, irreversible removal of that keyspace, including all the tables, UTD and -functions in it, and all the data contained in those tables.

-

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

CREATE TABLE

-

Creating a new table uses the CREATE TABLE statement:

-
-create_table_statement ::=  CREATE TABLE [ IF NOT EXISTS ] table_name
-                            '('
-                                column_definition
-                                ( ',' column_definition )*
-                                [ ',' PRIMARY KEY '(' primary_key ')' ]
-                            ')' [ WITH table_options ]
-column_definition      ::=  column_name cql_type [ STATIC ] [ PRIMARY KEY]
-primary_key            ::=  partition_key [ ',' clustering_columns ]
-partition_key          ::=  column_name
-                            | '(' column_name ( ',' column_name )* ')'
-clustering_columns     ::=  column_name ( ',' column_name )*
-table_options          ::=  COMPACT STORAGE [ AND table_options ]
-                            | CLUSTERING ORDER BY '(' clustering_order ')' [ AND table_options ]
-                            | options
-clustering_order       ::=  column_name (ASC | DESC) ( ',' column_name (ASC | DESC) )*
-
-

For instance:

-
CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records';
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-
-CREATE TABLE loads (
-    machine inet,
-    cpu int,
-    mtime timeuuid,
-    load float,
-    PRIMARY KEY ((machine, cpu), mtime)
-) WITH CLUSTERING ORDER BY (mtime DESC);
-
-
-

A CQL table has a name and is composed of a set of rows. Creating a table amounts to defining which columns the rows will be composed, which of those columns compose the primary key, as -well as optional options for the table.

-

Attempting to create an already existing table will return an error unless the IF NOT EXISTS directive is used. If -it is used, the statement will be a no-op if the table already exists.

-

Every rows in a CQL table has a set of predefined columns defined at the time of the table creation (or added later -using an alter statement).

-

A column_definition is primarily comprised of the name of the column defined and it’s type, -which restrict which values are accepted for that column. Additionally, a column definition can have the following -modifiers:

-
-
STATIC
-
it declares the column as being a static column.
-
PRIMARY KEY
-
it declares the column as being the sole component of the primary key of the table.
-
-

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the -rows belonging to the same partition (having the same partition key). For instance:

-
CREATE TABLE t (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-
-INSERT INTO t (pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO t (pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-
-SELECT * FROM t;
-   pk | t | v      | s
-  ----+---+--------+-----------
-   0  | 0 | 'val0' | 'static1'
-   0  | 1 | 'val1' | 'static1'
-
-
-

As can be seen, the s value is the same (static1) for both of the row in the partition (the partition key in -that example being pk, both rows are in that same partition): the 2nd insertion has overridden the value for s.

-

The use of static columns as the following restrictions:

-
    -
  • tables with the COMPACT STORAGE option (see below) cannot use them.
  • -
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition -has only one row, and so every column is inherently static).
  • -
  • only non PRIMARY KEY columns can be static.
  • -
-

Within a table, a row is uniquely identified by its PRIMARY KEY, and hence all table must define a PRIMARY KEY -(and only one). A PRIMARY KEY definition is composed of one or more of the columns defined in the table. -Syntactically, the primary key is defined the keywords PRIMARY KEY followed by comma-separated list of the column -names composing it within parenthesis, but if the primary key has only one column, one can alternatively follow that -column definition by the PRIMARY KEY keywords. The order of the columns in the primary key definition matter.

-

A CQL primary key is composed of 2 parts:

-
    -
  • the partition key part. It is the first component of the primary key definition. It can be a -single column or, using additional parenthesis, can be multiple columns. A table always have at least a partition key, -the smallest possible table definition is:

    -
    CREATE TABLE t (k text PRIMARY KEY);
    -
    -
    -
  • -
  • the clustering columns. Those are the columns after the first component of the primary key -definition, and the order of those columns define the clustering order.

    -
  • -
-

Some example of primary key definition are:

-
    -
  • PRIMARY KEY (a): a is the partition key and there is no clustering columns.
  • -
  • PRIMARY KEY (a, b, c) : a is the partition key and b and c are the clustering columns.
  • -
  • PRIMARY KEY ((a, b), c) : a and b compose the partition key (this is often called a composite partition -key) and c is the clustering column.
  • -
-

Within a table, CQL defines the notion of a partition. A partition is simply the set of rows that share the same value -for their partition key. Note that if the partition key is composed of multiple columns, then rows belong to the same -partition only they have the same values for all those partition key column. So for instance, given the following table -definition and content:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    d int,
-    PRIMARY KEY ((a, b), c, d)
-);
-
-SELECT * FROM t;
-   a | b | c | d
-  ---+---+---+---
-   0 | 0 | 0 | 0    // row 1
-   0 | 0 | 1 | 1    // row 2
-   0 | 1 | 2 | 2    // row 3
-   0 | 1 | 3 | 3    // row 4
-   1 | 1 | 4 | 4    // row 5
-
-
-

row 1 and row 2 are in the same partition, row 3 and row 4 are also in the same partition (but a -different one) and row 5 is in yet another partition.

-

Note that a table always has a partition key, and that if the table has no clustering columns, then every partition of that table is only comprised of a single row (since the primary key -uniquely identifies rows and the primary key is equal to the partition key if there is no clustering columns).

-

The most important property of partition is that all the rows belonging to the same partition are guarantee to be stored -on the same set of replica nodes. In other words, the partition key of a table defines which of the rows will be -localized together in the Cluster, and it is thus important to choose your partition key wisely so that rows that needs -to be fetch together are in the same partition (so that querying those rows together require contacting a minimum of -nodes).

-

Please note however that there is a flip-side to this guarantee: as all rows sharing a partition key are guaranteed to -be stored on the same set of replica node, a partition key that groups too much data can create a hotspot.

-

Another useful property of a partition is that when writing data, all the updates belonging to a single partition are -done atomically and in isolation, which is not the case across partitions.

-

The proper choice of the partition key and clustering columns for a table is probably one of the most important aspect -of data modeling in Cassandra, and it largely impact which queries can be performed, and how efficiently they are.

-

The clustering columns of a table defines the clustering order for the partition of that table. For a given -partition, all the rows are physically ordered inside Cassandra by that clustering order. For -instance, given:

-
CREATE TABLE t (
-    a int,
-    b int,
-    c int,
-    PRIMARY KEY (a, b, c)
-);
-
-SELECT * FROM t;
-   a | b | c
-  ---+---+---
-   0 | 0 | 4     // row 1
-   0 | 1 | 9     // row 2
-   0 | 2 | 2     // row 3
-   0 | 3 | 3     // row 4
-
-
-

then the rows (which all belong to the same partition) are all stored internally in the order of the values of their -b column (the order they are displayed above). So where the partition key of the table allows to group rows on the -same replica set, the clustering columns controls how those rows are stored on the replica. That sorting allows the -retrieval of a range of rows within a partition (for instance, in the example above, SELECT * FROM t WHERE a = 0 AND b -> 1 and b <= 3) to be very efficient.

-

A CQL table has a number of options that can be set at creation (and, for most of them, altered later). These options are specified after the WITH keyword.

-

Amongst those options, two important ones cannot be changed after creation and influence which queries can be done -against the table: the COMPACT STORAGE option and the CLUSTERING ORDER option. Those, as well as the other -options of a table are described in the following sections.

-
-

Warning

-

Since Cassandra 3.0, compact tables have the exact same layout internally than non compact ones (for the -same schema obviously), and declaring a table compact only creates artificial limitations on the table definition -and usage. It only exists for historical reason and is preserved for backward compatibility And as COMPACT -STORAGE cannot, as of Cassandra 4.0-alpha5, be removed, it is strongly discouraged to create new table with the -COMPACT STORAGE option.

-
-

A compact table is one defined with the COMPACT STORAGE option. This option is only maintained for backward -compatibility for definitions created before CQL version 3 and shouldn’t be used for new tables. Declaring a -table with this option creates limitations for the table which are largely arbitrary (and exists for historical -reasons). Amongst those limitation:

-
    -
  • a compact table cannot use collections nor static columns.
  • -
  • if a compact table has at least one clustering column, then it must have exactly one column outside of the primary -key ones. This imply you cannot add or remove columns after creation in particular.
  • -
  • a compact table is limited in the indexes it can create, and no materialized view can be created on it.
  • -
-

The clustering order of a table is defined by the clustering columns of that table. By -default, that ordering is based on natural order of those clustering order, but the CLUSTERING ORDER allows to -change that clustering order to use the reverse natural order for some (potentially all) of the columns.

-

The CLUSTERING ORDER option takes the comma-separated list of the clustering column, each with a ASC (for -ascendant, e.g. the natural order) or DESC (for descendant, e.g. the reverse natural order). Note in particular -that the default (if the CLUSTERING ORDER option is not used) is strictly equivalent to using the option with all -clustering columns using the ASC modifier.

-

Note that this option is basically a hint for the storage engine to change the order in which it stores the row but it -has 3 visible consequences:

-
-
# it limits which ORDER BY clause are allowed for selects on that table. You can only
-
order results by the clustering order or the reverse clustering order. Meaning that if a table has 2 clustering column -a and b and you defined WITH CLUSTERING ORDER (a DESC, b ASC), then in queries you will be allowed to use -ORDER BY (a DESC, b ASC) and (reverse clustering order) ORDER BY (a ASC, b DESC) but not ORDER BY (a -ASC, b ASC) (nor ORDER BY (a DESC, b DESC)).
-
# it also change the default order of results when queried (if no ORDER BY is provided). Results are always returned
-
in clustering order (within a partition).
-
# it has a small performance impact on some queries as queries in reverse clustering order are slower than the one in
-
forward clustering order. In practice, this means that if you plan on querying mostly in the reverse natural order of -your columns (which is common with time series for instance where you often want data from the newest to the oldest), -it is an optimization to declare a descending clustering order.
-
-
-

Todo

-

review (misses cdc if nothing else) and link to proper categories when appropriate (compaction for instance)

-
-

A table supports the following options:

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
optionkinddefaultdescription
comment -speculative_retrysimple -simplenone -99PERCENTILEA free-form, human-readable comment. -Speculative retry options.
cdcbooleanfalseCreate a Change Data Capture (CDC) log on the table.
additional_write_policysimple99PERCENTILESpeculative retry options.
gc_grace_secondssimple864000Time to wait before garbage collecting tombstones -(deletion markers).
bloom_filter_fp_chancesimple0.00075The target probability of false positive of the sstable -bloom filters. Said bloom filters will be sized to provide -the provided probability (thus lowering this value impact -the size of bloom filters in-memory and on-disk)
default_time_to_livesimple0The default expiration time (“TTL”) in seconds for a -table.
compactionmapsee belowCompaction options.
compressionmapsee belowCompression options.
cachingmapsee belowCaching options.
memtable_flush_period_in_mssimple0Time (in ms) before Cassandra flushes memtables to disk.
read_repairsimpleBLOCKINGSets read repair behavior (see below)
-

By default, Cassandra read coordinators only query as many replicas as necessary to satisfy -consistency levels: one for consistency level ONE, a quorum for QUORUM, and so on. -speculative_retry determines when coordinators may query additional replicas, which is useful -when replicas are slow or unresponsive. Speculative retries are used to reduce the latency. The speculative_retry option may be -used to configure rapid read protection with which a coordinator sends more requests than needed to satisfy the Consistency level.

-

Pre-4.0 speculative Retry Policy takes a single string as a parameter, this can be NONE, ALWAYS, 99PERCENTILE (PERCENTILE), 50MS (CUSTOM).

-

Examples of setting speculative retry are:

-
ALTER TABLE users WITH speculative_retry = '10ms';
-
-
-

Or,

-
ALTER TABLE users WITH speculative_retry = '99PERCENTILE';
-
-
-

The problem with these settings is when a single host goes into an unavailable state this drags up the percentiles. This means if we -are set to use p99 alone, we might not speculate when we intended to to because the value at the specified percentile has gone so high. -As a fix 4.0 adds support for hybrid MIN(), MAX() speculative retry policies (CASSANDRA-14293). This means if the normal p99 for the -table is <50ms, we will still speculate at this value and not drag the tail latencies up… but if the p99th goes above what we know we -should never exceed we use that instead.

-

In 4.0 the values (case-insensitive) discussed in the following table are supported:

-

As of version 4.0 speculative retry allows more friendly params (CASSANDRA-13876). The speculative_retry is more flexible with case. As an example a -value does not have to be NONE, and the following are supported alternatives.

-
alter table users WITH speculative_retry = 'none';
-alter table users WITH speculative_retry = 'None';
-
-
-

The text component is case insensitive and for nPERCENTILE version 4.0 allows nP, for instance 99p. -In a hybrid value for speculative retry, one of the two values must be a fixed millisecond value and the other a percentile value.

-

Some examples:

-
min(99percentile,50ms)
-max(99p,50MS)
-MAX(99P,50ms)
-MIN(99.9PERCENTILE,50ms)
-max(90percentile,100MS)
-MAX(100.0PERCENTILE,60ms)
-
-
-

Two values of the same kind cannot be specified such as min(90percentile,99percentile) as it wouldn’t be a hybrid value. -This setting does not affect reads with consistency level ALL because they already query all replicas.

-

Note that frequently reading from additional replicas can hurt cluster performance. -When in doubt, keep the default 99PERCENTILE.

-

additional_write_policy specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas.

-

The compaction options must at least define the 'class' sub-option, that defines the compaction strategy class -to use. The supported class are 'SizeTieredCompactionStrategy' (STCS), -'LeveledCompactionStrategy' (LCS) and 'TimeWindowCompactionStrategy' (TWCS) (the -'DateTieredCompactionStrategy' is also supported but is deprecated and 'TimeWindowCompactionStrategy' should be -preferred instead). The default is 'SizeTieredCompactionStrategy'. Custom strategy can be provided by specifying the full class name as a string constant.

-

All default strategies support a number of common options, as well as options specific to -the strategy chosen (see the section corresponding to your strategy for details: STCS, LCS and TWCS).

-

The compression options define if and how the sstables of the table are compressed. Compression is configured on a per-table -basis as an optional argument to CREATE TABLE or ALTER TABLE. The following sub-options are -available:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDefaultDescription
classLZ4CompressorThe compression algorithm to use. Default compressor are: LZ4Compressor, -SnappyCompressor, DeflateCompressor and ZstdCompressor. Use 'enabled' : false to disable -compression. Custom compressor can be provided by specifying the full class -name as a “string constant”:#constants.
enabledtrueEnable/disable sstable compression. If the enabled option is set to false no other -options must be specified.
chunk_length_in_kb64

On disk SSTables are compressed by block (to allow random reads). This -defines the size (in KB) of said block. Bigger values may improve the -compression rate, but increases the minimum size of data to be read from disk -for a read. The default value is an optimal value for compressing tables. Chunk length must -be a power of 2 because so is assumed so when computing the chunk number from an uncompressed -file offset. Block size may be adjusted based on read/write access patterns such as:

-
-
    -
  • How much data is typically requested at once
  • -
  • Average size of rows in the table
  • -
-
-
crc_check_chance1.0Determines how likely Cassandra is to verify the checksum on each compression chunk during -reads.
compression_level3Compression level. It is only applicable for ZstdCompressor and accepts values between --131072 and 22.
-

For instance, to create a table with LZ4Compressor and a chunk_lenth_in_kb of 4KB:

-
CREATE TABLE simple (
-   id int,
-   key text,
-   value text,
-   PRIMARY KEY (key, value)
-) with compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 4};
-
-
-

Caching optimizes the use of cache memory of a table. The cached data is weighed by size and access frequency. The caching -options allows to configure both the key cache and the row cache for the table. The following -sub-options are available:

- ----- - - - - - - - - - - - - - - - - -
OptionDefaultDescription
keysALLWhether to cache keys (“key cache”) for this table. Valid values are: ALL and -NONE.
rows_per_partitionNONEThe amount of rows to cache per partition (“row cache”). If an integer n is -specified, the first n queried rows of a partition will be cached. Other -possible options are ALL, to cache all rows of a queried partition, or NONE -to disable row caching.
-

For instance, to create a table with both a key cache and 10 rows per partition:

-
CREATE TABLE simple (
-id int,
-key text,
-value text,
-PRIMARY KEY (key, value)
-) WITH caching = {'keys': 'ALL', 'rows_per_partition': 10};
-
-
-

The read_repair options configures the read repair behavior to allow tuning for various performance and -consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back -in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of -replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. -Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement -is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it -is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a -batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-

The default setting. When read_repair is set to BLOCKING, and a read repair is triggered, the read will block -on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition -level write atomicity

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not -attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-
    -
  • Adding new columns (see ALTER TABLE below) is a constant time operation. There is thus no need to try to -anticipate future usage when creating a table.
  • -
-
-
-

ALTER TABLE

-

Altering an existing table uses the ALTER TABLE statement:

-
-alter_table_statement   ::=  ALTER TABLE table_name alter_table_instruction
-alter_table_instruction ::=  ADD column_name cql_type ( ',' column_name cql_type )*
-                             | DROP column_name ( column_name )*
-                             | WITH options
-
-

For instance:

-
ALTER TABLE addamsFamily ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-       WITH comment = 'A most excellent and useful table';
-
-
-

The ALTER TABLE statement can:

-
    -
  • Add new column(s) to the table (through the ADD instruction). Note that the primary key of a table cannot be -changed and thus newly added column will, by extension, never be part of the primary key. Also note that compact -tables have restrictions regarding column addition. Note that this is constant (in the amount of -data the cluster contains) time operation.
  • -
  • Remove column(s) from the table. This drops both the column and all its content, but note that while the column -becomes immediately unavailable, its content is only removed lazily during compaction. Please also see the warnings -below. Due to lazy removal, the altering itself is a constant (in the amount of data removed or contained in the -cluster) time operation.
  • -
  • Change some of the table options (through the WITH instruction). The supported options are the same that when creating a table (outside of COMPACT STORAGE and CLUSTERING -ORDER that cannot be changed after creation). Note that setting any compaction sub-options has the effect of -erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. -The same note applies to the set of compression sub-options.
  • -
-
-

Warning

-

Dropping a column assumes that the timestamps used for the value of this column are “real” timestamp in -microseconds. Using “real” timestamps in microseconds is the default is and is strongly recommended but as -Cassandra allows the client to provide any timestamp on any table it is theoretically possible to use another -convention. Please be aware that if you do so, dropping a column will not work correctly.

-
-
-

Warning

-

Once a column is dropped, it is allowed to re-add a column with the same name than the dropped one -unless the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).

-
-
-
-

DROP TABLE

-

Dropping a table uses the DROP TABLE statement:

-
-drop_table_statement ::=  DROP TABLE [ IF EXISTS ] table_name
-
-

Dropping a table results in the immediate, irreversible removal of the table, including all data it contains.

-

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
-

TRUNCATE

-

A table can be truncated using the TRUNCATE statement:

-
-truncate_statement ::=  TRUNCATE [ TABLE ] table_name
-
-

Note that TRUNCATE TABLE foo is allowed for consistency with other DDL statements but tables are the only object -that can be truncated currently and so the TABLE keyword can be omitted.

-

Truncating a table permanently removes all existing data from the table, but without removing the table itself.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/definitions.html b/src/doc/4.0-beta1/cql/definitions.html deleted file mode 100644 index 3fad24fef..000000000 --- a/src/doc/4.0-beta1/cql/definitions.html +++ /dev/null @@ -1,317 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Definitions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Definitions

-
-

Conventions

-

To aid in specifying the CQL syntax, we will use the following conventions in this document:

-
    -
  • Language rules will be given in an informal BNF variant notation. In particular, we’ll use square brakets -([ item ]) for optional items, * and + for repeated items (where + imply at least one).
  • -
  • The grammar will also use the following convention for convenience: non-terminal term will be lowercase (and link to -their definition) while terminal keywords will be provided “all caps”. Note however that keywords are -Identifiers and keywords and are thus case insensitive in practice. We will also define some early construction using -regexp, which we’ll indicate with re(<some regular expression>).
  • -
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the comma on the -last column definition in a CREATE TABLE statement is optional but supported if present even though the grammar in -this document suggests otherwise. Also, not everything accepted by the grammar is necessarily valid CQL.
  • -
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.
  • -
-
-
-

Identifiers and keywords

-

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token -matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

-

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language -and most are reserved. The list of those keywords can be found in Appendix A: CQL Keywords.

-

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and -myId is the same than myid or MYID. A convention often used (in particular by the samples of this -documentation) is to use upper case for keywords and lower case for other identifiers.

-

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of -characters (non empty) in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a -reserved keyword and can be used to refer to a column (note that using this is particularly advised), while select -would raise a parsing error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case -sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches -[a-zA-Z][a-zA-Z0-9_]* is however equivalent to the unquoted identifier obtained by removing the double-quote (so -"myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the -double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

-
-

Note

-

quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with -specific names used by the server. For instance, when using conditional update, the server will respond with a -result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this -could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but -if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like -"[applied]") and any name that looks like a function call (like "f(x)").

-
-

More formally, we have:

-
-identifier          ::=  unquoted_identifier | quoted_identifier
-unquoted_identifier ::=  re('[a-zA-Z][a-zA-Z0-9_]*')
-quoted_identifier   ::=  '"' (any character where " can appear if doubled)+ '"'
-
-
-
-

Constants

-

CQL defines the following kind of constants:

-
-constant ::=  string | integer | float | boolean | uuid | blob | NULL
-string   ::=  '\'' (any character where ' can appear if doubled)+ '\''
-              '$$' (any character other than '$$') '$$'
-integer  ::=  re('-?[0-9]+')
-float    ::=  re('-?[0-9]+(\.[0-9]*)?([eE][+-]?[0-9+])?') | NAN | INFINITY
-boolean  ::=  TRUE | FALSE
-uuid     ::=  hex{8}-hex{4}-hex{4}-hex{4}-hex{12}
-hex      ::=  re("[0-9a-fA-F]")
-blob     ::=  '0' ('x' | 'X') hex+
-
-

In other words:

-
    -
  • A string constant is an arbitrary sequence of characters enclosed by single-quote('). A single-quote -can be included by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted -Identifiers and keywords that use double-quotes. Alternatively, a string can be defined by enclosing the arbitrary sequence -of characters by two dollar characters, in which case single-quote can be used without escaping ($$It's raining -today$$). That latter form is often used when defining user-defined functions to avoid having to -escape single-quote characters in function body (as they are more likely to occur than $$).
  • -
  • Integer, float and boolean constant are defined as expected. Note however than float allows the special NaN and -Infinity constants.
  • -
  • CQL supports UUID constants.
  • -
  • Blobs content are provided in hexadecimal and prefixed by 0x.
  • -
  • The special NULL constant denotes the absence of value.
  • -
-

For how these constants are typed, see the Data Types section.

-
-
-

Terms

-

CQL has the notion of a term, which denotes the kind of values that CQL support. Terms are defined by:

-
-term                 ::=  constant | literal | function_call | arithmetic_operation | type_hint | bind_marker
-literal              ::=  collection_literal | udt_literal | tuple_literal
-function_call        ::=  identifier '(' [ term (',' term)* ] ')'
-arithmetic_operation ::=  '-' term | term ('+' | '-' | '*' | '/' | '%') term
-type_hint            ::=  '(' cql_type `)` term
-bind_marker          ::=  '?' | ':' identifier
-
-

A term is thus one of:

- -
-
-

Comments

-

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

-

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-
-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-
-
-
-
-

Statements

-

CQL consists of statements that can be divided in the following categories:

- -

All the statements are listed below and are described in the rest of this documentation (see links above):

-
-cql_statement                ::=  statement [ ';' ]
-statement                    ::=  ddl_statement
-                                  | dml_statement
-                                  | secondary_index_statement
-                                  | materialized_view_statement
-                                  | role_or_permission_statement
-                                  | udf_statement
-                                  | udt_statement
-                                  | trigger_statement
-ddl_statement                ::=  use_statement
-                                  | create_keyspace_statement
-                                  | alter_keyspace_statement
-                                  | drop_keyspace_statement
-                                  | create_table_statement
-                                  | alter_table_statement
-                                  | drop_table_statement
-                                  | truncate_statement
-dml_statement                ::=  select_statement
-                                  | insert_statement
-                                  | update_statement
-                                  | delete_statement
-                                  | batch_statement
-secondary_index_statement    ::=  create_index_statement
-                                  | drop_index_statement
-materialized_view_statement  ::=  create_materialized_view_statement
-                                  | drop_materialized_view_statement
-role_or_permission_statement ::=  create_role_statement
-                                  | alter_role_statement
-                                  | drop_role_statement
-                                  | grant_role_statement
-                                  | revoke_role_statement
-                                  | list_roles_statement
-                                  | grant_permission_statement
-                                  | revoke_permission_statement
-                                  | list_permissions_statement
-                                  | create_user_statement
-                                  | alter_user_statement
-                                  | drop_user_statement
-                                  | list_users_statement
-udf_statement                ::=  create_function_statement
-                                  | drop_function_statement
-                                  | create_aggregate_statement
-                                  | drop_aggregate_statement
-udt_statement                ::=  create_type_statement
-                                  | alter_type_statement
-                                  | drop_type_statement
-trigger_statement            ::=  create_trigger_statement
-                                  | drop_trigger_statement
-
-
-
-

Prepared Statements

-

CQL supports prepared statements. Prepared statements are an optimization that allows to parse a query only once but -execute it multiple times with different concrete values.

-

Any statement that uses at least one bind marker (see bind_marker) will need to be prepared. After which the statement -can be executed by provided concrete values for each of its marker. The exact details of how a statement is prepared -and then executed depends on the CQL driver used and you should refer to your driver documentation.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/dml.html b/src/doc/4.0-beta1/cql/dml.html deleted file mode 100644 index 3455a7d31..000000000 --- a/src/doc/4.0-beta1/cql/dml.html +++ /dev/null @@ -1,561 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Manipulation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Manipulation

-

This section describes the statements supported by CQL to insert, update, delete and query data.

-
-

SELECT

-

Querying data from data is done using a SELECT statement:

-
-select_statement ::=  SELECT [ JSON | DISTINCT ] ( select_clause | '*' )
-                      FROM table_name
-                      [ WHERE where_clause ]
-                      [ GROUP BY group_by_clause ]
-                      [ ORDER BY ordering_clause ]
-                      [ PER PARTITION LIMIT (integer | bind_marker) ]
-                      [ LIMIT (integer | bind_marker) ]
-                      [ ALLOW FILTERING ]
-select_clause    ::=  selector [ AS identifier ] ( ',' selector [ AS identifier ] )
-selector         ::=  column_name
-                      | term
-                      | CAST '(' selector AS cql_type ')'
-                      | function_name '(' [ selector ( ',' selector )* ] ')'
-                      | COUNT '(' '*' ')'
-where_clause     ::=  relation ( AND relation )*
-relation         ::=  column_name operator term
-                      '(' column_name ( ',' column_name )* ')' operator tuple_literal
-                      TOKEN '(' column_name ( ',' column_name )* ')' operator term
-operator         ::=  '=' | '<' | '>' | '<=' | '>=' | '!=' | IN | CONTAINS | CONTAINS KEY
-group_by_clause  ::=  column_name ( ',' column_name )*
-ordering_clause  ::=  column_name [ ASC | DESC ] ( ',' column_name [ ASC | DESC ] )*
-
-

For instance:

-
SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT (*) AS user_count FROM users;
-
-
-

The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of the rows -matching the request, where each row contains the values for the selection corresponding to the query. Additionally, -functions including aggregation ones can be applied to the result.

-

A SELECT statement contains at least a selection clause and the name of the table on which -the selection is on (note that CQL does not joins or sub-queries and thus a select statement only apply to a single -table). In most case, a select will also have a where clause and it can optionally have additional -clauses to order or limit the results. Lastly, queries that require -filtering can be allowed if the ALLOW FILTERING flag is provided.

-
-

Selection clause

-

The select_clause determines which columns needs to be queried and returned in the result-set, as well as any -transformation to apply to this result before returning. It consists of a comma-separated list of selectors or, -alternatively, of the wildcard character (*) to select all the columns defined in the table.

-
-

Selectors

-

A selector can be one of:

-
    -
  • A column name of the table selected, to retrieve the values for that column.
  • -
  • A term, which is usually used nested inside other selectors like functions (if a term is selected directly, then the -corresponding column of the result-set will simply have the value of this term for every row returned).
  • -
  • A casting, which allows to convert a nested selector to a (compatible) type.
  • -
  • A function call, where the arguments are selector themselves. See the section on functions for -more details.
  • -
  • The special call COUNT(*) to the COUNT function, which counts all non-null results.
  • -
-
-
-

Aliases

-

Every top-level selector can also be aliased (using AS). If so, the name of the corresponding column in the result -set will be that of the alias. For instance:

-
// Without alias
-SELECT intAsBlob(4) FROM t;
-
-//  intAsBlob(4)
-// --------------
-//  0x00000004
-
-// With alias
-SELECT intAsBlob(4) AS four FROM t;
-
-//  four
-// ------------
-//  0x00000004
-
-
-
-

Note

-

Currently, aliases aren’t recognized anywhere else in the statement where they are used (not in the WHERE -clause, not in the ORDER BY clause, …). You must use the orignal column name instead.

-
-
-
-

WRITETIME and TTL function

-

Selection supports two special functions (that aren’t allowed anywhere else): WRITETIME and TTL. Both function -take only one argument and that argument must be a column name (so for instance TTL(3) is invalid).

-

Those functions allow to retrieve meta-information that are stored internally for each column, namely:

-
    -
  • the timestamp of the value of the column for WRITETIME.
  • -
  • the remaining time to live (in seconds) for the value of the column if it set to expire (and null otherwise).
  • -
-
-
-
-

The WHERE clause

-

The WHERE clause specifies which rows must be queried. It is composed of relations on the columns that are part of -the PRIMARY KEY and/or have a secondary index defined on them.

-

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal -relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on -the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations -on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For -instance, given:

-
CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-
-
-

The following query is allowed:

-
SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND blog_title='John''s Blog'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are -set):

-
// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts
- WHERE userid = 'john doe'
-   AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-
-
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, -rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a -key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also -note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, -token(-1) > token(0) in particular). Example:

-
SELECT * FROM posts
- WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-
-
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full -primary key.

-

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-
-
-

will request all rows that sorts after the one having “John’s Blog” as blog_tile and ‘2012-01-01’ for posted_at -in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their -blog_title > 'John''s Blog', which would not be the case for:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND blog_title > 'John''s Blog'
-   AND posted_at > '2012-01-01'
-
-
-

The tuple notation may also be used for IN clauses on clustering columns:

-
SELECT * FROM posts
- WHERE userid = 'john doe'
-   AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01'), ('Extreme Chess', '2014-06-01'))
-
-
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, -CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the -map keys.

-
-
-

Grouping results

-

The GROUP BY option allows to condense into a single row all selected rows that share the same values for a set -of columns.

-

Using the GROUP BY option, it is only possible to group rows at the partition key level or at a clustering column -level. By consequence, the GROUP BY option only accept as arguments primary key column names in the primary key -order. If a primary key column is restricted by an equality restriction it is not required to be present in the -GROUP BY clause.

-

Aggregate functions will produce a separate value for each group. If no GROUP BY clause is specified, -aggregates functions will produce a single value for all the rows.

-

If a column is selected without an aggregate function, in a statement with a GROUP BY, the first value encounter -in each group will be returned.

-
-
-

Ordering results

-

The ORDER BY clause allows to select the order of the returned results. It takes as argument a list of column names -along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being -equivalent to ASC). Currently the possible orderings are limited by the clustering order -defined on the table:

-
    -
  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order -induced by the clustering columns and the reverse of that one.
  • -
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.
  • -
-
-
-

Limiting results

-

The LIMIT option to a SELECT statement limits the number of rows returned by a query, while the PER PARTITION -LIMIT option limits the number of rows returned for a given partition by the query. Note that both type of limit can -used in the same statement.

-
-
-

Allowing filtering

-

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that -all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” -queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of -data returned by the query (which can be controlled through LIMIT).

-

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a -query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query -that selects a handful of records may exhibit performance that depends on the total amount of data stored in the -cluster.

-

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on -it) and country of residence:

-
CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-
-
-

Then the following queries are valid:

-
SELECT * FROM users;
-SELECT * FROM users WHERE birth_year = 1981;
-
-
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data -returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number -of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this -query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. -Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile -stored). Of course, both query may return very large result set in practice, but the amount of data returned can always -be controlled by adding a LIMIT.

-

However, the following query will be rejected:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR';
-
-
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is -small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from -France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW -FILTERING and so the following query is valid:

-
SELECT * FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-
-
-
-
-
-

INSERT

-

Inserting data for a row is done using an INSERT statement:

-
-insert_statement ::=  INSERT INTO table_name ( names_values | json_clause )
-                      [ IF NOT EXISTS ]
-                      [ USING update_parameter ( AND update_parameter )* ]
-names_values     ::=  names VALUES tuple_literal
-json_clause      ::=  JSON string [ DEFAULT ( NULL | UNSET ) ]
-names            ::=  '(' column_name ( ',' column_name )* ')'
-
-

For instance:

-
INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-      USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity",
-                              "director": "Joss Whedon",
-                              "year": 2005}';
-
-
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by -its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be -supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the -section on JSON support for more detail.

-

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none -existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

-

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the -insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos -will be used) so this should be used sparingly.

-

All updates for an INSERT are applied atomically and in isolation.

-

Please refer to the UPDATE section for informations on the update_parameter.

-

Also note that INSERT does not support counters, while UPDATE does.

-
-
-

UPDATE

-

Updating a row is done using an UPDATE statement:

-
-update_statement ::=  UPDATE table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      SET assignment ( ',' assignment )*
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-update_parameter ::=  ( TIMESTAMP | TTL ) ( integer | bind_marker )
-assignment       ::=  simple_selection '=' term
-                     | column_name '=' column_name ( '+' | '-' ) term
-                     | column_name '=' list_literal '+' column_name
-simple_selection ::=  column_name
-                     | column_name '[' term ']'
-                     | column_name '.' `field_name
-condition        ::=  simple_selection operator term
-
-

For instance:

-
UPDATE NerdMovies USING TTL 400
-   SET director   = 'Joss Whedon',
-       main_actor = 'Nathan Fillion',
-       year       = 2005
- WHERE movie = 'Serenity';
-
-UPDATE UserActions
-   SET total = total + 2
-   WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14
-     AND action = 'click';
-
-
-

The UPDATE statement writes one or more columns for a given row in a table. The where_clause is used to -select the row to update and must include all columns composing the PRIMARY KEY. Non primary key columns are then -set using the SET keyword.

-

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through IF, see -below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know -whether a creation or update occurred.

-

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated -unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance -cost (internally, Paxos will be used) so this should be used sparingly.

-

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

-

Regarding the assignment:

-
    -
  • c = c + 3 is used to increment/decrement counters. The column name after the ‘=’ sign must be the same than -the one before the ‘=’ sign. Note that increment/decrement is only allowed on counters, and are the only update -operations allowed on counters. See the section on counters for details.
  • -
  • id = id + <some-collection> and id[value1] = value2 are for collections, see the relevant section for details.
  • -
  • id.field = 3 is for setting the value of a field on a non-frozen user-defined types. see the relevant section for details.
  • -
-
-

Update parameters

-

The UPDATE, INSERT (and DELETE and BATCH for the TIMESTAMP) statements support the following -parameters:

-
    -
  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in -microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • -
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are -automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not -the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL -is specified in that update). By default, values never expire. A TTL of 0 is equivalent to no TTL. If the table has a -default_time_to_live, a TTL of 0 will remove the TTL for the inserted or updated values. A TTL of null is equivalent -to inserting with a TTL of 0.
  • -
-
-
-
-

DELETE

-

Deleting rows or parts of rows uses the DELETE statement:

-
-delete_statement ::=  DELETE [ simple_selection ( ',' simple_selection ) ]
-                      FROM table_name
-                      [ USING update_parameter ( AND update_parameter )* ]
-                      WHERE where_clause
-                      [ IF ( EXISTS | condition ( AND condition )*) ]
-
-

For instance:

-
DELETE FROM NerdMovies USING TIMESTAMP 1240003134
- WHERE movie = 'Serenity';
-
-DELETE phone FROM Users
- WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-
-
-

The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, -only those columns are deleted from the row indicated by the WHERE clause. Otherwise, whole rows are removed.

-

The WHERE clause specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an -IN operator. A range of rows may be deleted using an inequality operator (such as >=).

-

DELETE supports the TIMESTAMP option with the same semantics as in updates.

-

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

-

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT -statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost -(internally, Paxos will be used) and so should be used sparingly.

-
-
-

BATCH

-

Multiple INSERT, UPDATE and DELETE can be executed in a single statement by grouping them through a -BATCH statement:

-
-batch_statement        ::=  BEGIN [ UNLOGGED | COUNTER ] BATCH
-                            [ USING update_parameter ( AND update_parameter )* ]
-                            modification_statement ( ';' modification_statement )*
-                            APPLY BATCH
-modification_statement ::=  insert_statement | update_statement | delete_statement
-
-

For instance:

-
BEGIN BATCH
-   INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-   UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-   DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-
-
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single -statement. It serves several purposes:

-
    -
  • It saves network round-trips between the client and the server (and sometimes between the server coordinator and the -replicas) when batching multiple updates.
  • -
  • All updates in a BATCH belonging to a given partition key are performed in isolation.
  • -
  • By default, all operations in the batch are performed as logged, to ensure all mutations eventually complete (or -none will). See the notes on UNLOGGED batches for more details.
  • -
-

Note that:

-
    -
  • BATCH statements may only contain UPDATE, INSERT and DELETE statements (not other batches for instance).
  • -
  • Batches are not a full analogue for SQL transactions.
  • -
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp -(either one generated automatically, or the timestamp provided at the batch level). Due to Cassandra’s conflict -resolution procedure in the case of timestamp ties, operations may -be applied in an order that is different from the order they are listed in the BATCH statement. To force a -particular operation ordering, you must specify per-operation timestamps.
  • -
  • A LOGGED batch to a single partition will be converted to an UNLOGGED batch as an optimization.
  • -
-
-

UNLOGGED batches

-

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note -however that operations are only isolated within a single partition).

-

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur -this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is -used, a failed batch might leave the patch only partly applied.

-
-
-

COUNTER batches

-

Use the COUNTER option for batched counter updates. Unlike other -updates in Cassandra, counter updates are not idempotent.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/functions.html b/src/doc/4.0-beta1/cql/functions.html deleted file mode 100644 index 31431b7c1..000000000 --- a/src/doc/4.0-beta1/cql/functions.html +++ /dev/null @@ -1,706 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Functions" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Functions

-

CQL supports 2 main categories of functions:

-
    -
  • the scalar functions, which simply take a number of values and produce an output with it.
  • -
  • the aggregate functions, which are used to aggregate multiple rows results from a -SELECT statement.
  • -
-

In both cases, CQL provides a number of native “hard-coded” functions as well as the ability to create new user-defined -functions.

-
-

Note

-

By default, the use of user-defined functions is disabled by default for security concerns (even when -enabled, the execution of user-defined functions is sandboxed and a “rogue” function should not be allowed to do -evil, but no sandbox is perfect so using user-defined functions is opt-in). See the enable_user_defined_functions -in cassandra.yaml to enable them.

-
-

A function is identifier by its name:

-
-function_name ::=  [ keyspace_name '.' ] name
-
-
-

Scalar functions

-
-

Native functions

-
-

Cast

-

The cast function can be used to converts one native datatype to another.

-

The following table describes the conversions supported by the cast function. Cassandra will silently ignore any -cast converting a datatype into its own datatype.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FromTo
asciitext, varchar
biginttinyint, smallint, int, float, double, decimal, varint, text, -varchar
booleantext, varchar
countertinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
datetimestamp
decimaltinyint, smallint, int, bigint, float, double, varint, text, -varchar
doubletinyint, smallint, int, bigint, float, decimal, varint, text, -varchar
floattinyint, smallint, int, bigint, double, decimal, varint, text, -varchar
inettext, varchar
inttinyint, smallint, bigint, float, double, decimal, varint, text, -varchar
smallinttinyint, int, bigint, float, double, decimal, varint, text, -varchar
timetext, varchar
timestampdate, text, varchar
timeuuidtimestamp, date, text, varchar
tinyinttinyint, smallint, int, bigint, float, double, decimal, varint, -text, varchar
uuidtext, varchar
varinttinyint, smallint, int, bigint, float, double, decimal, text, -varchar
-

The conversions rely strictly on Java’s semantics. For example, the double value 1 will be converted to the text value -‘1.0’. For instance:

-
SELECT avg(cast(count as double)) FROM myTable
-
-
-
-
-

Token

-

The token function allows to compute the token for a given partition key. The exact signature of the token function -depends on the table concerned and of the partitioner used by the cluster.

-

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on -the partitioner in use:

-
    -
  • For Murmur3Partitioner, the return type is bigint.
  • -
  • For RandomPartitioner, the return type is varint.
  • -
  • For ByteOrderedPartitioner, the return type is blob.
  • -
-

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by:

-
CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-)
-
-
-

then the token function will take a single argument of type text (in that case, the partition key is userid -(there is no clustering columns so the partition key is the same than the primary key)), and the return type will be -bigint.

-
-
-

Uuid

-

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or -UPDATE statements.

-
-
-

Timeuuid functions

-
-
now
-

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid at the -time the function is invoked. Note that this method is useful for insertion but is largely non-sensical in -WHERE clauses. For instance, a query of the form:

-
SELECT * FROM myTable WHERE t = now()
-
-
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

-

currentTimeUUID is an alias of now.

-
-
-
minTimeuuid and maxTimeuuid
-

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp -or a date string <timestamps>) and return a fake timeuuid corresponding to the smallest (resp. biggest) -possible timeuuid having for timestamp t. So for instance:

-
SELECT * FROM myTable
- WHERE t > maxTimeuuid('2013-01-01 00:05+0000')
-   AND t < minTimeuuid('2013-02-02 10:00+0000')
-
-
-

will select all rows where the timeuuid column t is strictly older than '2013-01-01 00:05+0000' but strictly -younger than '2013-02-02 10:00+0000'. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still -not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > -maxTimeuuid('2013-01-01 00:05+0000').

-
-

Note

-

We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect -the Time-Based UUID generation process specified by the RFC 4122. In -particular, the value returned by these 2 methods will not be unique. This means you should only use those methods -for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

-
-
-
-
-

Datetime functions

-
-
Retrieving the current date/time
-

The following functions can be used to retrieve the date/time at the time where the function is invoked:

- ---- - - - - - - - - - - - - - - - - - - - -
Function nameOutput type
currentTimestamptimestamp
currentDatedate
currentTimetime
currentTimeUUIDtimeUUID
-

For example the last 2 days of data can be retrieved using:

-
SELECT * FROM myTable WHERE date >= currentDate() - 2d
-
-
-
-
-
Time conversion functions
-

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native -type.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Function nameInput typeDescription
toDatetimeuuidConverts the timeuuid argument into a date type
toDatetimestampConverts the timestamp argument into a date type
toTimestamptimeuuidConverts the timeuuid argument into a timestamp type
toTimestampdateConverts the date argument into a timestamp type
toUnixTimestamptimeuuidConverts the timeuuid argument into a bigInt raw value
toUnixTimestamptimestampConverts the timestamp argument into a bigInt raw value
toUnixTimestampdateConverts the date argument into a bigInt raw value
dateOftimeuuidSimilar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOftimeuuidSimilar to toUnixTimestamp(timeuuid) (DEPRECATED)
-
-
-
-

Blob conversion functions

-

A number of functions are provided to “convert” the native types into binary data (blob). For every -<native-type> type supported by CQL (a notable exceptions is blob, for obvious reasons), the function -typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType -takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is -0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

-
-
-
-

User-defined functions

-

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining -functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and -Scala) can be added by adding a JAR to the classpath.

-

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

-

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

-
CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-
-
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, -implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of -exceptions. An exception during function execution will result in the entire statement failing.

-

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. -Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the -documentation of the Java Driver for details on handling tuple types and user-defined types.

-

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

-

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

-
CREATE FUNCTION some_function ( arg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS int
-    LANGUAGE java
-    AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-
-CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$ return udtarg.getString("txt"); $$;
-
-
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

-

The implicitly available udfContext field (or binding for script UDFs) provides the necessary functionality to -create new UDT and tuple values:

-
CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct\_using\_udt ( somearg int )
-    RETURNS NULL ON NULL INPUT
-    RETURNS custom_type
-    LANGUAGE java
-    AS $$
-        UDTValue udt = udfContext.newReturnUDTValue();
-        udt.setString("txt", "some string");
-        udt.setInt("i", 42);
-        return udt;
-    $$;
-
-
-

The definition of the UDFContext interface can be found in the Apache Cassandra source code for -org.apache.cassandra.cql3.functions.UDFContext.

-
public interface UDFContext
-{
-    UDTValue newArgUDTValue(String argName);
-    UDTValue newArgUDTValue(int argNum);
-    UDTValue newReturnUDTValue();
-    UDTValue newUDTValue(String udtName);
-    TupleValue newArgTupleValue(String argName);
-    TupleValue newArgTupleValue(int argNum);
-    TupleValue newReturnTupleValue();
-    TupleValue newTupleValue(String cqlDefinition);
-}
-
-
-

Java UDFs already have some imports for common interfaces and classes defined. These imports are:

-
import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.apache.cassandra.cql3.functions.UDFContext;
-import com.datastax.driver.core.TypeCodec;
-import com.datastax.driver.core.TupleValue;
-import com.datastax.driver.core.UDTValue;
-
-
-

Please note, that these convenience imports are not available for script UDFs.

-
-

CREATE FUNCTION

-

Creating a new user-defined function uses the CREATE FUNCTION statement:

-
-create_function_statement ::=  CREATE [ OR REPLACE ] FUNCTION [ IF NOT EXISTS]
-                                   function_name '(' arguments_declaration ')'
-                                   [ CALLED | RETURNS NULL ] ON NULL INPUT
-                                   RETURNS cql_type
-                                   LANGUAGE identifier
-                                   AS string
-arguments_declaration     ::=  identifier cql_type ( ',' identifier cql_type )*
-
-

For instance:

-
CREATE OR REPLACE FUNCTION somefunction(somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list)
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-CREATE FUNCTION IF NOT EXISTS akeyspace.fname(someArg int)
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-        // some Java code
-    $$;
-
-
-

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with -the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already -exists.

-

If the optional IF NOT EXISTS keywords are used, the function will -only be created if another function with the same signature does not -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

Behavior on invocation with null values must be defined for each -function. There are two options:

-
    -
  1. RETURNS NULL ON NULL INPUT declares that the function will always -return null if any of the input arguments is null.
  2. -
  3. CALLED ON NULL INPUT declares that the function will always be -executed.
  4. -
-
-
Function Signature
-

Signatures are used to distinguish individual functions. The signature consists of:

-
    -
  1. The fully qualified function name - i.e keyspace plus function-name
  2. -
  3. The concatenated list of all argument types
  4. -
-

Note that keyspace names, function names and argument types are subject to the default naming conventions and -case-sensitivity rules.

-

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. -the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the -system keyspaces.

-
-
-
-

DROP FUNCTION

-

Dropping a function uses the DROP FUNCTION statement:

-
-drop_function_statement ::=  DROP FUNCTION [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-arguments_signature     ::=  cql_type ( ',' cql_type )*
-
-

For instance:

-
DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-
-
-

You must specify the argument types (arguments_signature) of the function to drop if there are multiple -functions with the same name but a different signature (overloaded functions).

-

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists, but does not throw an error if -it doesn’t

-
-
-
-
-

Aggregate functions

-

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.

-

If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with -aggregate functions, the values returned for them will be the ones of the first row matching the query.

-
-

Native aggregates

-
-

Count

-

The count function can be used to count the rows returned by a query. Example:

-
SELECT COUNT (*) FROM plays;
-SELECT COUNT (1) FROM plays;
-
-
-

It also can be used to count the non null value of a given column:

-
SELECT COUNT (scores) FROM plays;
-
-
-
-
-

Max and Min

-

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a -given column. For instance:

-
SELECT MIN (players), MAX (players) FROM plays WHERE game = 'quake';
-
-
-
-
-

Sum

-

The sum function can be used to sum up all the values returned by a query for a given column. For instance:

-
SELECT SUM (players) FROM plays;
-
-
-
-
-

Avg

-

The avg function can be used to compute the average of all the values returned by a query for a given column. For -instance:

-
SELECT AVG (players) FROM plays;
-
-
-
-
-
-

User-Defined Aggregates

-

User-defined aggregates allow the creation of custom aggregate functions. Common examples of aggregate functions are -count, min, and max.

-

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first -argument of the state function must have type STYPE. The remaining arguments of the state function must match the -types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by -the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last -state value as its argument.

-

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final -function (since the overload can appear after creation of the aggregate).

-

User-defined aggregates can be used in SELECT statement.

-

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE -statement):

-
CREATE OR REPLACE FUNCTION averageState(state tuple<int,bigint>, val int)
-    CALLED ON NULL INPUT
-    RETURNS tuple
-    LANGUAGE java
-    AS $$
-        if (val != null) {
-            state.setInt(0, state.getInt(0)+1);
-            state.setLong(1, state.getLong(1)+val.intValue());
-        }
-        return state;
-    $$;
-
-CREATE OR REPLACE FUNCTION averageFinal (state tuple<int,bigint>)
-    CALLED ON NULL INPUT
-    RETURNS double
-    LANGUAGE java
-    AS $$
-        double r = 0;
-        if (state.getInt(0) == 0) return null;
-        r = state.getLong(1);
-        r /= state.getInt(0);
-        return Double.valueOf(r);
-    $$;
-
-CREATE OR REPLACE AGGREGATE average(int)
-    SFUNC averageState
-    STYPE tuple
-    FINALFUNC averageFinal
-    INITCOND (0, 0);
-
-CREATE TABLE atable (
-    pk int PRIMARY KEY,
-    val int
-);
-
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-
-SELECT average(val) FROM atable;
-
-
-
-

CREATE AGGREGATE

-

Creating (or replacing) a user-defined aggregate function uses the CREATE AGGREGATE statement:

-
-create_aggregate_statement ::=  CREATE [ OR REPLACE ] AGGREGATE [ IF NOT EXISTS ]
-                                    function_name '(' arguments_signature ')'
-                                    SFUNC function_name
-                                    STYPE cql_type
-                                    [ FINALFUNC function_name ]
-                                    [ INITCOND term ]
-
-

See above for a complete example.

-

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one -with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature -already exists.

-

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already -exist.

-

OR REPLACE and IF NOT EXISTS cannot be used together.

-

STYPE defines the type of the state value and must be specified.

-

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null -INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

-

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the -state function must match STYPE. The remaining argument types of the state function must match the argument types of -the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called -with null.

-

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with -type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS -NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

-

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is -defined, it is the return type of that function.

-
-
-

DROP AGGREGATE

-

Dropping an user-defined aggregate function uses the DROP AGGREGATE statement:

-
-drop_aggregate_statement ::=  DROP AGGREGATE [ IF EXISTS ] function_name [ '(' arguments_signature ')' ]
-
-

For instance:

-
DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-
-
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument -types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded -aggregates).

-

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a -function with the signature does not exist.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/index.html b/src/doc/4.0-beta1/cql/index.html deleted file mode 100644 index 6f85cf8e5..000000000 --- a/src/doc/4.0-beta1/cql/index.html +++ /dev/null @@ -1,247 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "The Cassandra Query Language (CQL)" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

The Cassandra Query Language (CQL)

-

This document describes the Cassandra Query Language (CQL) [1]. Note that this document describes the last version of -the languages. However, the changes section provides the diff between the different versions of CQL.

-

CQL offers a model close to SQL in the sense that data is put in tables containing rows of columns. For -that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have -in SQL.

- - - - - - -
[1]Technically, this document CQL version 3, which is not backward compatible with CQL version 1 and 2 (which have -been deprecated and remove) and differs from it in numerous ways.
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/indexes.html b/src/doc/4.0-beta1/cql/indexes.html deleted file mode 100644 index 4c3958290..000000000 --- a/src/doc/4.0-beta1/cql/indexes.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Secondary Indexes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Secondary Indexes

-

CQL supports creating secondary indexes on tables, allowing queries on the table to use those indexes. A secondary index -is identified by a name defined by:

-
-index_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE INDEX

-

Creating a secondary index on a table uses the CREATE INDEX statement:

-
-create_index_statement ::=  CREATE [ CUSTOM ] INDEX [ IF NOT EXISTS ] [ index_name ]
-                                ON table_name '(' index_identifier ')'
-                                [ USING string [ WITH OPTIONS = map_literal ] ]
-index_identifier       ::=  column_name
-                           | ( KEYS | VALUES | ENTRIES | FULL ) '(' column_name ')'
-
-

For instance:

-
CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-
-
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a -given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists -for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed -automatically at insertion time.

-

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it -is used, the statement will be a no-op if the index already exists.

-
-

Indexes on Map Keys

-

When creating an index on a maps, you may index either the keys or the values. If the column identifier is -placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in -WHERE clauses. Otherwise, the index will be on the map values.

-
-
-
-

DROP INDEX

-

Dropping a secondary index uses the DROP INDEX statement:

-
-drop_index_statement ::=  DROP INDEX [ IF EXISTS ] index_name
-
-

The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index -name, which may optionally specify the keyspace of the index.

-

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the -operation is a no-op.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/json.html b/src/doc/4.0-beta1/cql/json.html deleted file mode 100644 index ddf5e5ea2..000000000 --- a/src/doc/4.0-beta1/cql/json.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "JSON Support" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

JSON Support

-

Cassandra 2.2 introduces JSON support to SELECT and INSERT -statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply -provides a convenient way to work with JSON documents.

-
-

SELECT JSON

-

With SELECT statements, the JSON keyword can be used to return each row as a single JSON encoded map. The -remainder of the SELECT statement behavior is the same.

-

The result map keys are the same as the column names in a normal result set. For example, a statement like SELECT JSON -a, ttl(b) FROM ... would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: -for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with -double quotes. For example, SELECT JSON myColumn FROM ... would result in a map key "\"myColumn\"" (note the -escaped quotes).

-

The map values will JSON-encoded representations (as described below) of the result set values.

-
-
-

INSERT JSON

-

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single -row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same -table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a -table with two columns named “myKey” and “value”, you would do the following:

-
INSERT INTO mytable JSON '{ "\"myKey\"": 0, "value": 0}'
-
-
-

By default (or if DEFAULT NULL is explicitly used), a column omitted from the JSON map will be set to NULL, -meaning that any pre-existing value for that column will be removed (resulting in a tombstone being created). -Alternatively, if the DEFAULT UNSET directive is used after the value, omitted column values will be left unset, -meaning that pre-existing values for those column will be preserved.

-
-
-

JSON Encoding of Cassandra Data Types

-

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will -also accept string representations matching the CQL literal format for all single-field types. For example, floats, -ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, -and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string -representation of the collection.

-

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() -arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and -fromJson()):

- ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeFormats acceptedReturn formatNotes
asciistringstringUses JSON’s \u character escape
bigintinteger, stringintegerString must be valid 64 bit integer
blobstringstringString should be 0x followed by an even number of hex digits
booleanboolean, stringbooleanString must be “true” or “false”
datestringstringDate in format YYYY-MM-DD, timezone UTC
decimalinteger, float, stringfloatMay exceed 32 or 64-bit IEEE-754 floating point precision in -client-side decoder
doubleinteger, float, stringfloatString must be valid integer or float
floatinteger, float, stringfloatString must be valid integer or float
inetstringstringIPv4 or IPv6 address
intinteger, stringintegerString must be valid 32 bit integer
listlist, stringlistUses JSON’s native list representation
mapmap, stringmapUses JSON’s native map representation
smallintinteger, stringintegerString must be valid 16 bit integer
setlist, stringlistUses JSON’s native list representation
textstringstringUses JSON’s \u character escape
timestringstringTime of day in format HH-MM-SS[.fffffffff]
timestampinteger, stringstringA timestamp. Strings constant allows to input timestamps -as dates. Datestamps with format YYYY-MM-DD -HH:MM:SS.SSS are returned.
timeuuidstringstringType 1 UUID. See constant for the UUID format
tinyintinteger, stringintegerString must be valid 8 bit integer
tuplelist, stringlistUses JSON’s native list representation
UDTmap, stringmapUses JSON’s native map representation with field names as keys
uuidstringstringSee constant for the UUID format
varcharstringstringUses JSON’s \u character escape
varintinteger, stringintegerVariable length; may overflow 32 or 64 bit integers in -client-side decoder
-
-
-

The fromJson() Function

-

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used -in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or -SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

-
-
-

The toJson() Function

-

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used -in the selection clause of a SELECT statement.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/mvs.html b/src/doc/4.0-beta1/cql/mvs.html deleted file mode 100644 index a931716ef..000000000 --- a/src/doc/4.0-beta1/cql/mvs.html +++ /dev/null @@ -1,261 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Materialized Views" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Materialized Views

-

Materialized views names are defined by:

-
-view_name ::=  re('[a-zA-Z_0-9]+')
-
-
-

CREATE MATERIALIZED VIEW

-

You can create a materialized view on a table using a CREATE MATERIALIZED VIEW statement:

-
-create_materialized_view_statement ::=  CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] view_name AS
-                                            select_statement
-                                            PRIMARY KEY '(' primary_key ')'
-                                            WITH table_options
-
-

For instance:

-
CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT * FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-
-
-

The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which -corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A -materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the -view.

-

Creating a materialized view has 3 main parts:

- -

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is -used. If it is used, the statement will be a no-op if the materialized view already exists.

-
-

Note

-

By default, materialized views are built in a single thread. The initial build can be parallelized by -increasing the number of threads specified by the property concurrent_materialized_view_builders in -cassandra.yaml. This property can also be manipulated at runtime through both JMX and the -setconcurrentviewbuilders and getconcurrentviewbuilders nodetool commands.

-
-
-

MV select statement

-

The select statement of a materialized view creation defines which of the base table is included in the view. That -statement is limited in a number of ways:

-
    -
  • the selection is limited to those that only select columns of the base table. In other -words, you can’t use any function (aggregate or not), casting, term, etc. Aliases are also not supported. You can -however use * as a shortcut of selecting all columns. Further, static columns cannot be -included in a materialized view (which means SELECT * isn’t allowed if the base table has static columns).
  • -
  • the WHERE clause have the following restrictions:
      -
    • it cannot include any bind_marker.
    • -
    • the columns that are not part of the base table primary key can only be restricted by an IS NOT NULL -restriction. No other restriction is allowed.
    • -
    • as the columns that are part of the view primary key cannot be null, they must always be at least restricted by a -IS NOT NULL restriction (or any other restriction, but they must have one).
    • -
    -
  • -
  • it cannot have neither an ordering clause, nor a limit, nor ALLOW -FILTERING.
  • -
-
-
-

MV primary key

-

A view must have a primary key and that primary key must conform to the following restrictions:

-
    -
  • it must contain all the primary key columns of the base table. This ensures that every row of the view correspond to -exactly one row of the base table.
  • -
  • it can only contain a single column that is not a primary key column in the base table.
  • -
-

So for instance, give the following base table definition:

-
CREATE TABLE t (
-    k int,
-    c1 int,
-    c2 int,
-    v1 int,
-    v2 int,
-    PRIMARY KEY (k, c1, c2)
-)
-
-
-

then the following view definitions are allowed:

-
CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, k, c2)
-
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (v1, k, c1, c2)
-
-
-

but the following ones are not allowed:

-
// Error: cannot include both v1 and v2 in the primary key as both are not in the base table primary key
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE k IS NOT NULL AND c1 IS NOT NULL AND c2 IS NOT NULL AND v1 IS NOT NULL
-    PRIMARY KEY (v1, v2, k, c1, c2)
-
-// Error: must include k in the primary as it's a base table primary key column
-CREATE MATERIALIZED VIEW mv1 AS
-    SELECT * FROM t WHERE c1 IS NOT NULL AND c2 IS NOT NULL
-    PRIMARY KEY (c1, c2)
-
-
-
-
-

MV options

-

A materialized view is internally implemented by a table and as such, creating a MV allows the same options than -creating a table.

-
-
-
-

ALTER MATERIALIZED VIEW

-

After creation, you can alter the options of a materialized view using the ALTER MATERIALIZED VIEW statement:

-
-alter_materialized_view_statement ::=  ALTER MATERIALIZED VIEW view_name WITH table_options
-
-

The options that can be updated are the same than at creation time and thus the same than for tables.

-
-
-

DROP MATERIALIZED VIEW

-

Dropping a materialized view users the DROP MATERIALIZED VIEW statement:

-
-drop_materialized_view_statement ::=  DROP MATERIALIZED VIEW [ IF EXISTS ] view_name;
-
-

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case -the operation is a no-op.

-
-

MV Limitations

-
-

Note

-

Removal of columns not selected in the Materialized View (via UPDATE base SET unselected_column = null or -DELETE unselected_column FROM base) may shadow missed updates to other columns received by hints or repair. -For this reason, we advise against doing deletions on base columns not selected in views until this is -fixed on CASSANDRA-13826.

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/operators.html b/src/doc/4.0-beta1/cql/operators.html deleted file mode 100644 index a96b58710..000000000 --- a/src/doc/4.0-beta1/cql/operators.html +++ /dev/null @@ -1,301 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Arithmetic Operators" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Arithmetic Operators

-

CQL supports the following operators:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - -
OperatorDescription
- (unary)Negates operand
+Addition
-Substraction
*Multiplication
/Division
%Returns the remainder of a division
-
-

Number Arithmetic

-

All arithmetic operations are supported on numeric types or counters.

-

The return type of the operation will be based on the operand types:

- ------------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
left/righttinyintsmallintintbigintcounterfloatdoublevarintdecimal
tinyinttinyintsmallintintbigintbigintfloatdoublevarintdecimal
smallintsmallintsmallintintbigintbigintfloatdoublevarintdecimal
intintintintbigintbigintfloatdoublevarintdecimal
bigintbigintbigintbigintbigintbigintdoubledoublevarintdecimal
counterbigintbigintbigintbigintbigintdoubledoublevarintdecimal
floatfloatfloatfloatdoubledoublefloatdoubledecimaldecimal
doubledoubledoubledoubledoubledoubledoubledoubledecimaldecimal
varintvarintvarintvarintdecimaldecimaldecimaldecimaldecimaldecimal
decimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimaldecimal
-

*, / and % operators have a higher precedence level than + and - operator. By consequence, -they will be evaluated before. If two operator in an expression have the same precedence level, they will be evaluated -left to right based on their position in the expression.

-
-
-

Datetime Arithmetic

-

A duration can be added (+) or substracted (-) from a timestamp or a date to create a new -timestamp or date. So for instance:

-
SELECT * FROM myTable WHERE t = '2017-01-01' - 2d
-
-
-

will select all the records with a value of t which is in the last 2 days of 2016.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/security.html b/src/doc/4.0-beta1/cql/security.html deleted file mode 100644 index efecbf419..000000000 --- a/src/doc/4.0-beta1/cql/security.html +++ /dev/null @@ -1,743 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-
-

Database Roles

-

CQL uses database roles to represent users and group of users. Syntactically, a role is defined by:

-
-role_name ::=  identifier | string
-
-
-

CREATE ROLE

-

Creating a role uses the CREATE ROLE statement:

-
-create_role_statement ::=  CREATE ROLE [ IF NOT EXISTS ] role_name
-                               [ WITH role_options ]
-role_options          ::=  role_option ( AND role_option )*
-role_option           ::=  PASSWORD '=' string
-                          | LOGIN '=' boolean
-                          | SUPERUSER '=' boolean
-                          | OPTIONS '=' map_literal
-                          | ACCESS TO DATACENTERS set_literal
-                          | ACCESS TO ALL DATACENTERS
-
-

For instance:

-
CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO DATACENTERS {'DC1', 'DC3'};
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND ACCESS TO ALL DATACENTERS;
-
-
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

-

Permissions on database resources are granted to roles; types of resources include keyspaces, -tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions -structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is -not.

-

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that -connection, the client will acquire any roles and privileges granted to that role.

-

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see -the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra -is pluggable and custom implementations may support only a subset of the listed options.

-

Role names should be quoted if they contain non-alphanumeric characters.

-
-

Setting credentials for internal authentication

-

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single -quotation marks.

-

If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD -clause is not necessary.

-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. Not specifiying -datacenters implicitly grants access to all datacenters. The clause ACCESS TO ALL DATACENTERS can be used for -explicitness, but there’s no functional difference.

-
-
-

Creating a role conditionally

-

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. -If the option is used and the role exists, the statement is a no-op:

-
CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-
-
-
-
-
-

ALTER ROLE

-

Altering a role options uses the ALTER ROLE statement:

-
-alter_role_statement ::=  ALTER ROLE role_name WITH role_options
-
-

For instance:

-
ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-
-
-
-

Restricting connections to specific datacenters

-

If a network_authorizer has been configured, you can restrict login roles to specific datacenters with the -ACCESS TO DATACENTERS clause followed by a set literal of datacenters the user can access. To remove any -data center restrictions, use the ACCESS TO ALL DATACENTERS clause.

-

Conditions on executing ALTER ROLE statements:

-
    -
  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • -
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • -
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • -
  • To modify properties of a role, the client must be granted ALTER permission on that role
  • -
-
-
-
-

DROP ROLE

-

Dropping a role uses the DROP ROLE statement:

-
-drop_role_statement ::=  DROP ROLE [ IF EXISTS ] role_name
-
-

DROP ROLE requires the client to have DROP permission on the role in question. In -addition, client may not DROP the role with which it identified at login. Finally, only a client with SUPERUSER -status may DROP another SUPERUSER role.

-

Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is -used. If the option is used and the role does not exist the statement is a no-op.

-
-

Note

-

DROP ROLE intentionally does not terminate any open user sessions. Currently connected sessions will remain -connected and will retain the ability to perform any database actions which do not require authorization. -However, if authorization is enabled, permissions of the dropped role are also revoked, -subject to the caching options configured in cassandra.yaml. -Should a dropped role be subsequently recreated and have new permissions or -roles granted to it, any client sessions still connected will acquire the newly granted -permissions and roles.

-
-
-
-

GRANT ROLE

-

Granting a role to another uses the GRANT ROLE statement:

-
-grant_role_statement ::=  GRANT role_name TO role_name
-
-

For instance:

-
GRANT report_writer TO alice;
-
-
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also -acquired by alice.

-

Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in -error conditions:

-
GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
-GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-
-
-
-
-

REVOKE ROLE

-

Revoking a role uses the REVOKE ROLE statement:

-
-revoke_role_statement ::=  REVOKE role_name FROM role_name
-
-

For instance:

-
REVOKE report_writer FROM alice;
-
-
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the -report_writer role are also revoked.

-
-
-

LIST ROLES

-

All the known roles (in the system or granted to specific role) can be listed using the LIST ROLES statement:

-
-list_roles_statement ::=  LIST ROLES [ OF role_name ] [ NORECURSIVE ]
-
-

For instance:

-
LIST ROLES;
-
-
-

returns all known roles in the system, this requires DESCRIBE permission on the database roles resource. And:

-
LIST ROLES OF alice;
-
-
-

enumerates all roles granted to alice, including those transitively acquired. But:

-
LIST ROLES OF bob NORECURSIVE
-
-
-

lists all roles directly granted to bob without including any of the transitively acquired ones.

-
-
-
-

Users

-

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a -USER. For backward compatibility, the legacy syntax has been preserved with USER centric statements becoming -synonyms for the ROLE based equivalents. In other words, creating/updating a user is just a different syntax for -creating/updating a role.

-
-

CREATE USER

-

Creating a user uses the CREATE USER statement:

-
-create_user_statement ::=  CREATE USER [ IF NOT EXISTS ] role_name [ WITH PASSWORD string ] [ user_option ]
-user_option           ::=  SUPERUSER | NOSUPERUSER
-
-

For instance:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-
-
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of -statements are equivalent:

-
CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF NOT EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-
-
-
-
-

ALTER USER

-

Altering the options of a user uses the ALTER USER statement:

-
-alter_user_statement ::=  ALTER USER role_name [ WITH PASSWORD string ] [ user_option ]
-
-

For instance:

-
ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-
-
-
-
-

DROP USER

-

Dropping a user uses the DROP USER statement:

-
-drop_user_statement ::=  DROP USER [ IF EXISTS ] role_name
-
-
-
-

LIST USERS

-

Existing users can be listed using the LIST USERS statement:

-
-list_users_statement ::=  LIST USERS
-
-

Note that this statement is equivalent to:

-
LIST ROLES;
-
-
-

but only roles with the LOGIN privilege are included in the output.

-
-
-
-

Data Control

-
-

Permissions

-

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type -is modelled hierarchically:

-
    -
  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> -TABLE.
  • -
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • -
  • Resources representing roles have the structure ALL ROLES -> ROLE
  • -
  • Resources representing JMX ObjectNames, which map to sets of MBeans/MXBeans, have the structure ALL MBEANS -> -MBEAN
  • -
-

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a -resource higher up the chain automatically grants that same permission on all resources lower down. For example, -granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting -a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It -is also possible to grant permissions on all functions scoped to a particular keyspace.

-

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established -following permissions changes.

-

The full set of available permissions is:

-
    -
  • CREATE
  • -
  • ALTER
  • -
  • DROP
  • -
  • SELECT
  • -
  • MODIFY
  • -
  • AUTHORIZE
  • -
  • DESCRIBE
  • -
  • EXECUTE
  • -
-

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context -of functions or mbeans; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT -a permission on resource to which it cannot be applied results in an error response. The following illustrates which -permissions can be granted on which types of resource, and which statements are enabled by that permission.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PermissionResourceOperations
CREATEALL KEYSPACESCREATE KEYSPACE and CREATE TABLE in any keyspace
CREATEKEYSPACECREATE TABLE in specified keyspace
CREATEALL FUNCTIONSCREATE FUNCTION in any keyspace and CREATE AGGREGATE in any -keyspace
CREATEALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE in specified keyspace
CREATEALL ROLESCREATE ROLE
ALTERALL KEYSPACESALTER KEYSPACE and ALTER TABLE in any keyspace
ALTERKEYSPACEALTER KEYSPACE and ALTER TABLE in specified keyspace
ALTERTABLEALTER TABLE
ALTERALL FUNCTIONSCREATE FUNCTION and CREATE AGGREGATE: replacing any existing
ALTERALL FUNCTIONS IN KEYSPACECREATE FUNCTION and CREATE AGGREGATE: replacing existing in -specified keyspace
ALTERFUNCTIONCREATE FUNCTION and CREATE AGGREGATE: replacing existing
ALTERALL ROLESALTER ROLE on any role
ALTERROLEALTER ROLE
DROPALL KEYSPACESDROP KEYSPACE and DROP TABLE in any keyspace
DROPKEYSPACEDROP TABLE in specified keyspace
DROPTABLEDROP TABLE
DROPALL FUNCTIONSDROP FUNCTION and DROP AGGREGATE in any keyspace
DROPALL FUNCTIONS IN KEYSPACEDROP FUNCTION and DROP AGGREGATE in specified keyspace
DROPFUNCTIONDROP FUNCTION
DROPALL ROLESDROP ROLE on any role
DROPROLEDROP ROLE
SELECTALL KEYSPACESSELECT on any table
SELECTKEYSPACESELECT on any table in specified keyspace
SELECTTABLESELECT on specified table
SELECTALL MBEANSCall getter methods on any mbean
SELECTMBEANSCall getter methods on any mbean matching a wildcard pattern
SELECTMBEANCall getter methods on named mbean
MODIFYALL KEYSPACESINSERT, UPDATE, DELETE and TRUNCATE on any table
MODIFYKEYSPACEINSERT, UPDATE, DELETE and TRUNCATE on any table in -specified keyspace
MODIFYTABLEINSERT, UPDATE, DELETE and TRUNCATE on specified table
MODIFYALL MBEANSCall setter methods on any mbean
MODIFYMBEANSCall setter methods on any mbean matching a wildcard pattern
MODIFYMBEANCall setter methods on named mbean
AUTHORIZEALL KEYSPACESGRANT PERMISSION and REVOKE PERMISSION on any table
AUTHORIZEKEYSPACEGRANT PERMISSION and REVOKE PERMISSION on any table in -specified keyspace
AUTHORIZETABLEGRANT PERMISSION and REVOKE PERMISSION on specified table
AUTHORIZEALL FUNCTIONSGRANT PERMISSION and REVOKE PERMISSION on any function
AUTHORIZEALL FUNCTIONS IN KEYSPACEGRANT PERMISSION and REVOKE PERMISSION in specified keyspace
AUTHORIZEFUNCTIONGRANT PERMISSION and REVOKE PERMISSION on specified function
AUTHORIZEALL MBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean
AUTHORIZEMBEANSGRANT PERMISSION and REVOKE PERMISSION on any mbean matching -a wildcard pattern
AUTHORIZEMBEANGRANT PERMISSION and REVOKE PERMISSION on named mbean
AUTHORIZEALL ROLESGRANT ROLE and REVOKE ROLE on any role
AUTHORIZEROLESGRANT ROLE and REVOKE ROLE on specified roles
DESCRIBEALL ROLESLIST ROLES on all roles or only roles granted to another, -specified role
DESCRIBEALL MBEANSRetrieve metadata about any mbean from the platform’s MBeanServer
DESCRIBEMBEANSRetrieve metadata about any mbean matching a wildcard patter from the -platform’s MBeanServer
DESCRIBEMBEANRetrieve metadata about a named mbean from the platform’s MBeanServer
EXECUTEALL FUNCTIONSSELECT, INSERT and UPDATE using any function, and use of -any function in CREATE AGGREGATE
EXECUTEALL FUNCTIONS IN KEYSPACESELECT, INSERT and UPDATE using any function in specified -keyspace and use of any function in keyspace in CREATE AGGREGATE
EXECUTEFUNCTIONSELECT, INSERT and UPDATE using specified function and use -of the function in CREATE AGGREGATE
EXECUTEALL MBEANSExecute operations on any mbean
EXECUTEMBEANSExecute operations on any mbean matching a wildcard pattern
EXECUTEMBEANExecute operations on named mbean
-
-
-

GRANT PERMISSION

-

Granting a permission uses the GRANT PERMISSION statement:

-
-grant_permission_statement ::=  GRANT permissions ON resource TO role_name
-permissions                ::=  ALL [ PERMISSIONS ] | permission [ PERMISSION ]
-permission                 ::=  CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESCRIBE | EXECUTE
-resource                   ::=  ALL KEYSPACES
-                               | KEYSPACE keyspace_name
-                               | [ TABLE ] table_name
-                               | ALL ROLES
-                               | ROLE role_name
-                               | ALL FUNCTIONS [ IN KEYSPACE keyspace_name ]
-                               | FUNCTION function_name '(' [ cql_type ( ',' cql_type )* ] ')'
-                               | ALL MBEANS
-                               | ( MBEAN | MBEANS ) string
-
-

For instance:

-
GRANT SELECT ON ALL KEYSPACES TO data_reader;
-
-
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all -keyspaces:

-
GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-
-
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE -and TRUNCATE queries on all tables in the keyspace1 keyspace:

-
GRANT DROP ON keyspace1.table1 TO schema_owner;
-
-
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1:

-
GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-
-
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries -which use the function keyspace1.user_function( int ):

-
GRANT DESCRIBE ON ALL ROLES TO role_admin;
-
-
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST -ROLES statement

-
-

GRANT ALL

-

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target -resource.

-
-
-

Automatic Granting

-

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or -CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is -automatically granted all applicable permissions on the new resource.

-
-
-
-

REVOKE PERMISSION

-

Revoking a permission from a role uses the REVOKE PERMISSION statement:

-
-revoke_permission_statement ::=  REVOKE permissions ON resource FROM role_name
-
-

For instance:

-
REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-
-
-

Because of their function in normal driver operations, certain tables cannot have their SELECT permissions -revoked. The following tables will be available to all authorized users regardless of their assigned role:

-
* `system_schema.keyspaces`
-* `system_schema.columns`
-* `system_schema.tables`
-* `system.local`
-* `system.peers`
-
-
-
-
-

LIST PERMISSIONS

-

Listing granted permissions uses the LIST PERMISSIONS statement:

-
-list_permissions_statement ::=  LIST permissions [ ON resource ] [ OF role_name [ NORECURSIVE ] ]
-
-

For instance:

-
LIST ALL PERMISSIONS OF alice;
-
-
-

Show all permissions granted to alice, including those acquired transitively from any other roles:

-
LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-
-
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other -roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. -For example, should bob have ALTER permission on keyspace1, that would be included in the results of this -query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to -bob or one of bob’s roles:

-
LIST SELECT PERMISSIONS OF carlos;
-
-
-

Show any permissions granted to carlos or any of carlos’s roles, limited to SELECT permissions on any -resource.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/triggers.html b/src/doc/4.0-beta1/cql/triggers.html deleted file mode 100644 index bcc050051..000000000 --- a/src/doc/4.0-beta1/cql/triggers.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Triggers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Triggers

-

Triggers are identified by a name defined by:

-
-trigger_name ::=  identifier
-
-
-

CREATE TRIGGER

-

Creating a new trigger uses the CREATE TRIGGER statement:

-
-create_trigger_statement ::=  CREATE TRIGGER [ IF NOT EXISTS ] trigger_name
-                                  ON table_name
-                                  USING string
-
-

For instance:

-
CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-
-
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. -You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during -cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a -requested DML statement occurs, which ensures the atomicity of the transaction.

-
-
-

DROP TRIGGER

-

Dropping a trigger uses the DROP TRIGGER statement:

-
-drop_trigger_statement ::=  DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name
-
-

For instance:

-
DROP TRIGGER myTrigger ON myTable;
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/cql/types.html b/src/doc/4.0-beta1/cql/types.html deleted file mode 100644 index 66e982311..000000000 --- a/src/doc/4.0-beta1/cql/types.html +++ /dev/null @@ -1,700 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "The Cassandra Query Language (CQL)" - -doc-title: "Data Types" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Data Types

-

CQL is a typed language and supports a rich set of data types, including native types, -collection types, user-defined types, tuple types and custom -types:

-
-cql_type ::=  native_type | collection_type | user_defined_type | tuple_type | custom_type
-
-
-

Native Types

-

The native types supported by CQL are:

-
-native_type ::=  ASCII
-                 | BIGINT
-                 | BLOB
-                 | BOOLEAN
-                 | COUNTER
-                 | DATE
-                 | DECIMAL
-                 | DOUBLE
-                 | DURATION
-                 | FLOAT
-                 | INET
-                 | INT
-                 | SMALLINT
-                 | TEXT
-                 | TIME
-                 | TIMESTAMP
-                 | TIMEUUID
-                 | TINYINT
-                 | UUID
-                 | VARCHAR
-                 | VARINT
-
-

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
typeconstants supporteddescription
asciistringASCII character string
bigintinteger64-bit signed long
blobblobArbitrary bytes (no validation)
booleanbooleanEither true or false
counterintegerCounter column (64-bit signed value). See Counters for details
dateinteger, -stringA date (with no corresponding time value). See Working with dates below for details
decimalinteger, -floatVariable-precision decimal
doubleinteger -float64-bit IEEE-754 floating point
durationduration,A duration with nanosecond precision. See Working with durations below for details
floatinteger, -float32-bit IEEE-754 floating point
inetstringAn IP address, either IPv4 (4 bytes long) or IPv6 (16 bytes long). Note that -there is no inet constant, IP address should be input as strings
intinteger32-bit signed int
smallintinteger16-bit signed int
textstringUTF8 encoded string
timeinteger, -stringA time (with no corresponding date value) with nanosecond precision. See -Working with times below for details
timestampinteger, -stringA timestamp (date and time) with millisecond precision. See Working with timestamps -below for details
timeuuiduuidVersion 1 UUID, generally used as a “conflict-free” timestamp. Also see -Timeuuid functions
tinyintinteger8-bit signed int
uuiduuidA UUID (of any version)
varcharstringUTF8 encoded string
varintintegerArbitrary-precision integer
-
-

Counters

-

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed -integer and on which 2 operations are supported: incrementing and decrementing (see the UPDATE statement for syntax). Note that the value of a counter cannot be set: a counter does not exist until first -incremented/decremented, and that first increment/decrement is made as if the prior value was 0.

-

Counters have a number of important limitations:

-
    -
  • They cannot be used for columns part of the PRIMARY KEY of a table.
  • -
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside -the PRIMARY KEY have the counter type, or none of them have it.
  • -
  • Counters do not support expiration.
  • -
  • The deletion of counters is supported, but is only guaranteed to work the first time you delete a counter. In other -words, you should not re-update a counter that you have deleted (if you do, proper behavior is not guaranteed).
  • -
  • Counter updates are, by nature, not idemptotent. An important -consequence is that if a counter update fails unexpectedly (timeout or loss of connection to the coordinator node), -the client has no way to know if the update has been applied or not. In particular, replaying the update may or may -not lead to an over count.
  • -
-
-
-
-

Working with timestamps

-

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the -standard base time known as the epoch: January 1 1970 at 00:00:00 GMT.

-

Timestamps can be input in CQL either using their value as an integer, or using a string that -represents an ISO 8601 date. For instance, all of the values below are -valid timestamp values for Mar 2, 2011, at 04:05:00 AM, GMT:

-
    -
  • 1299038700000
  • -
  • '2011-02-03 04:05+0000'
  • -
  • '2011-02-03 04:05:00+0000'
  • -
  • '2011-02-03 04:05:00.000+0000'
  • -
  • '2011-02-03T04:05+0000'
  • -
  • '2011-02-03T04:05:00+0000'
  • -
  • '2011-02-03T04:05:00.000+0000'
  • -
-

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is --0800. The time zone may be omitted if desired ('2011-02-03 04:05:00'), and if so, the date will be interpreted -as being in the time zone under which the coordinating Cassandra node is configured. There are however difficulties -inherent in relying on the time zone configuration being as expected, so it is recommended that the time zone always be -specified for timestamps when feasible.

-

The time of day may also be omitted ('2011-02-03' or '2011-02-03+0000'), in which case the time of day will -default to 00:00:00 in the specified or default time zone. However, if only the date part is relevant, consider using -the date type.

-
-
-

Working with dates

-

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at -the center of the range (2^31). Epoch is January 1st, 1970

-

As for timestamp, a date can be input either as an integer or using a date -string. In the later case, the format should be yyyy-mm-dd (so '2011-02-03' for instance).

-
-
-

Working with times

-

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

-

As for timestamp, a time can be input either as an integer or using a string -representing the time. In the later case, the format should be hh:mm:ss[.fffffffff] (where the sub-second precision -is optional and if provided, can be less than the nanosecond). So for instance, the following are valid inputs for a -time:

-
    -
  • '08:12:54'
  • -
  • '08:12:54.123'
  • -
  • '08:12:54.123456'
  • -
  • '08:12:54.123456789'
  • -
-
-
-

Working with durations

-

Values of the duration type are encoded as 3 signed integer of variable lengths. The first integer represents the -number of months, the second the number of days and the third the number of nanoseconds. This is due to the fact that -the number of days in a month can change, and a day can have 23 or 25 hours depending on the daylight saving. -Internally, the number of months and days are decoded as 32 bits integers whereas the number of nanoseconds is decoded -as a 64 bits integer.

-

A duration can be input as:

-
-
    -
  1. (quantity unit)+ like 12h30m where the unit can be:

    -
    -
      -
    • y: years (12 months)
    • -
    • mo: months (1 month)
    • -
    • w: weeks (7 days)
    • -
    • d: days (1 day)
    • -
    • h: hours (3,600,000,000,000 nanoseconds)
    • -
    • m: minutes (60,000,000,000 nanoseconds)
    • -
    • s: seconds (1,000,000,000 nanoseconds)
    • -
    • ms: milliseconds (1,000,000 nanoseconds)
    • -
    • us or µs : microseconds (1000 nanoseconds)
    • -
    • ns: nanoseconds (1 nanosecond)
    • -
    -
    -
  2. -
  3. ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W

    -
  4. -
  5. ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss]

    -
  6. -
-
-

For example:

-
INSERT INTO RiderResults (rider, race, result) VALUES ('Christopher Froome', 'Tour de France', 89h4m48s);
-INSERT INTO RiderResults (rider, race, result) VALUES ('BARDET Romain', 'Tour de France', PT89H8M53S);
-INSERT INTO RiderResults (rider, race, result) VALUES ('QUINTANA Nairo', 'Tour de France', P0000-00-00T89:09:09);
-
-
-

Duration columns cannot be used in a table’s PRIMARY KEY. This limitation is due to the fact that -durations cannot be ordered. It is effectively not possible to know if 1mo is greater than 29d without a date -context.

-

A 1d duration is not equals to a 24h one as the duration type has been created to be able to support daylight -saving.

-
-
-

Collections

-

CQL supports 3 kind of collections: Maps, Sets and Lists. The types of those collections is defined -by:

-
-collection_type ::=  MAP '<' cql_type ',' cql_type '>'
-                     | SET '<' cql_type '>'
-                     | LIST '<' cql_type '>'
-
-

and their values can be inputd using collection literals:

-
-collection_literal ::=  map_literal | set_literal | list_literal
-map_literal        ::=  '{' [ term ':' term (',' term : term)* ] '}'
-set_literal        ::=  '{' [ term (',' term)* ] '}'
-list_literal       ::=  '[' [ term (',' term)* ] ']'
-
-

Note however that neither bind_marker nor NULL are supported inside collection literals.

-
-

Noteworthy characteristics

-

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the -phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all -messages sent by a user”, “events registered by a sensor”…), then collections are not appropriate and a specific table -(with clustering columns) should be used. Concretely, (non-frozen) collections have the following noteworthy -characteristics and limitations:

-
    -
  • Individual collections are not indexed internally. Which means that even to access a single element of a collection, -the while collection has to be read (and reading one is not paged internally).
  • -
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do. -Further, some lists operations are not idempotent by nature (see the section on lists below for -details), making their retry in case of timeout problematic. It is thus advised to prefer sets over lists when -possible.
  • -
-

Please note that while some of those limitations may or may not be removed/improved upon in the future, it is a -anti-pattern to use a (single) collection to store large amounts of data.

-
-
-

Maps

-

A map is a (sorted) set of key-value pairs, where keys are unique and the map is sorted by its keys. You can define -and insert a map with:

-
CREATE TABLE users (
-    id text PRIMARY KEY,
-    name text,
-    favs map<text, text> // A map of text keys, and text values
-);
-
-INSERT INTO users (id, name, favs)
-           VALUES ('jsmith', 'John Smith', { 'fruit' : 'Apple', 'band' : 'Beatles' });
-
-// Replace the existing map entirely.
-UPDATE users SET favs = { 'fruit' : 'Banana' } WHERE id = 'jsmith';
-
-
-

Further, maps support:

-
    -
  • Updating or inserting one or more elements:

    -
    UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith';
    -UPDATE users SET favs = favs + { 'movie' : 'Cassablanca', 'band' : 'ZZ Top' } WHERE id = 'jsmith';
    -
    -
    -
  • -
  • Removing one or more element (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    DELETE favs['author'] FROM users WHERE id = 'jsmith';
    -UPDATE users SET favs = favs - { 'movie', 'band'} WHERE id = 'jsmith';
    -
    -
    -

    Note that for removing multiple elements in a map, you remove from it a set of keys.

    -
  • -
-

Lastly, TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly -inserted/updated elements. In other words:

-
UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith';
-
-
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

-
-
-

Sets

-

A set is a (sorted) collection of unique values. You can define and insert a map with:

-
CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    tags set<text> // A set of text values
-);
-
-INSERT INTO images (name, owner, tags)
-            VALUES ('cat.jpg', 'jsmith', { 'pet', 'cute' });
-
-// Replace the existing set entirely
-UPDATE images SET tags = { 'kitten', 'cat', 'lol' } WHERE name = 'cat.jpg';
-
-
-

Further, sets support:

-
    -
  • Adding one or multiple elements (as this is a set, inserting an already existing element is a no-op):

    -
    UPDATE images SET tags = tags + { 'gray', 'cuddly' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
  • Removing one or multiple elements (if an element doesn’t exist, removing it is a no-op but no error is thrown):

    -
    UPDATE images SET tags = tags - { 'cat' } WHERE name = 'cat.jpg';
    -
    -
    -
  • -
-

Lastly, as for maps, TTLs if used only apply to the newly inserted values.

-
-
-

Lists

-
-

Note

-

As mentioned above and further discussed at the end of this section, lists have limitations and specific -performance considerations that you should take into account before using them. In general, if you can use a -set instead of list, always prefer a set.

-
-

A list is a (sorted) collection of non-unique values where elements are ordered by there position in the list. You -can define and insert a list with:

-
CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int> // A list of integers
-)
-
-INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-
-// Replace the existing list entirely
-UPDATE plays SET scores = [ 3, 9, 4] WHERE id = '123-afde';
-
-
-

Further, lists support:

-
    -
  • Appending and prepending values to a list:

    -
    UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
    -UPDATE plays SET players = 6, scores = [ 3 ] + scores WHERE id = '123-afde';
    -
    -
    -
  • -
  • Setting the value at a particular position in the list. This imply that the list has a pre-existing element for that -position or an error will be thrown that the list is too small:

    -
    UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';
    -
    -
    -
  • -
  • Removing an element by its position in the list. This imply that the list has a pre-existing element for that position -or an error will be thrown that the list is too small. Further, as the operation removes an element from the list, the -list size will be diminished by 1, shifting the position of all the elements following the one deleted:

    -
    DELETE scores[1] FROM plays WHERE id = '123-afde';
    -
    -
    -
  • -
  • Deleting all the occurrences of particular values in the list (if a particular element doesn’t occur at all in the -list, it is simply ignored and no error is thrown):

    -
    UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde';
    -
    -
    -
  • -
-
-

Warning

-

The append and prepend operations are not idempotent by nature. So in particular, if one of these operation -timeout, then retrying the operation is not safe and it may (or may not) lead to appending/prepending the value -twice.

-
-
-

Warning

-

Setting and removing an element by position and removing occurences of particular values incur an internal -read-before-write. They will thus run more slowly and take more ressources than usual updates (with the exclusion -of conditional write that have their own cost).

-
-

Lastly, as for maps, TTLs when used only apply to the newly inserted values.

-
-
-
-

User-Defined Types

-

CQL support the definition of user-defined types (UDT for short). Such a type can be created, modified and removed using -the create_type_statement, alter_type_statement and drop_type_statement described below. But -once created, a UDT is simply referred to by its name:

-
-user_defined_type ::=  udt_name
-udt_name          ::=  [ keyspace_name '.' ] identifier
-
-
-

Creating a UDT

-

Creating a new user-defined type is done using a CREATE TYPE statement defined by:

-
-create_type_statement ::=  CREATE TYPE [ IF NOT EXISTS ] udt_name
-                               '(' field_definition ( ',' field_definition )* ')'
-field_definition      ::=  identifier cql_type
-
-

A UDT has a name (used to declared columns of that type) and is a set of named and typed fields. Fields name can be any -type, including collections or other UDT. For instance:

-
CREATE TYPE phone (
-    country_code int,
-    number text,
-)
-
-CREATE TYPE address (
-    street text,
-    city text,
-    zip text,
-    phones map<text, phone>
-)
-
-CREATE TABLE user (
-    name text PRIMARY KEY,
-    addresses map<text, frozen<address>>
-)
-
-
-

Note that:

-
    -
  • Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If -it is used, the statement will be a no-op if the type already exists.
  • -
  • A type is intrinsically bound to the keyspace in which it is created, and can only be used in that keyspace. At -creation, if the type name is prefixed by a keyspace name, it is created in that keyspace. Otherwise, it is created in -the current keyspace.
  • -
  • As of Cassandra 4.0-alpha5, UDT have to be frozen in most cases, hence the frozen<address> in the table definition -above. Please see the section on frozen for more details.
  • -
-
-
-

UDT literals

-

Once a used-defined type has been created, value can be input using a UDT literal:

-
-udt_literal ::=  '{' identifier ':' term ( ',' identifier ':' term )* '}'
-
-

In other words, a UDT literal is like a map literal but its keys are the names of the fields of the type. -For instance, one could insert into the table define in the previous section using:

-
INSERT INTO user (name, addresses)
-          VALUES ('z3 Pr3z1den7', {
-              'home' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'cell' : { country_code: 1, number: '202 456-1111' },
-                            'landline' : { country_code: 1, number: '...' } }
-              },
-              'work' : {
-                  street: '1600 Pennsylvania Ave NW',
-                  city: 'Washington',
-                  zip: '20500',
-                  phones: { 'fax' : { country_code: 1, number: '...' } }
-              }
-          })
-
-
-

To be valid, a UDT literal should only include fields defined by the type it is a literal of, but it can omit some field -(in which case those will be null).

-
-
-

Altering a UDT

-

An existing user-defined type can be modified using an ALTER TYPE statement:

-
-alter_type_statement    ::=  ALTER TYPE udt_name alter_type_modification
-alter_type_modification ::=  ADD field_definition
-                             | RENAME identifier TO identifier ( identifier TO identifier )*
-
-

You can:

-
    -
  • add a new field to the type (ALTER TYPE address ADD country text). That new field will be null for any values -of the type created before the addition.
  • -
  • rename the fields of the type (ALTER TYPE address RENAME zip TO zipcode).
  • -
-
-
-

Dropping a UDT

-

You can drop an existing user-defined type using a DROP TYPE statement:

-
-drop_type_statement ::=  DROP TYPE [ IF EXISTS ] udt_name
-
-

Dropping a type results in the immediate, irreversible removal of that type. However, attempting to drop a type that is -still in use by another type, table or function will result in an error.

-

If the type dropped does not exist, an error will be returned unless IF EXISTS is used, in which case the operation -is a no-op.

-
-
-
-

Tuples

-

CQL also support tuples and tuple types (where the elements can be of different types). Functionally, tuples can be -though as anonymous UDT with anonymous fields. Tuple types and tuple literals are defined by:

-
-tuple_type    ::=  TUPLE '<' cql_type ( ',' cql_type )* '>'
-tuple_literal ::=  '(' term ( ',' term )* ')'
-
-

and can be used thusly:

-
CREATE TABLE durations (
-    event text,
-    duration tuple<int, text>,
-)
-
-INSERT INTO durations (event, duration) VALUES ('ev1', (3, 'hours'));
-
-
-

Unlike other “composed” types (collections and UDT), a tuple is always frozen (without the need of the -frozen keyword) and it is not possible to update only some elements of a tuple (without updating the whole tuple). -Also, a tuple literal should always have the same number of value than declared in the type it is a tuple of (some of -those values can be null but they need to be explicitly declared as so).

-
-
-

Custom Types

-
-

Note

-

Custom types exists mostly for backward compatiliby purposes and their usage is discouraged. Their usage is -complex, not user friendly and the other provided types, particularly user-defined types, should almost -always be enough.

-
-

A custom type is defined by:

-
-custom_type ::=  string
-
-

A custom type is a string that contains the name of Java class that extends the server side AbstractType -class and that can be loaded by Cassandra (it should thus be in the CLASSPATH of every node running Cassandra). That -class will define what values are valid for the type and how the time sorts when used for a clustering column. For any -other purpose, a value of a custom type is the same than that of a blob, and can in particular be input using the -blob literal syntax.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_conceptual.html b/src/doc/4.0-beta1/data_modeling/data_modeling_conceptual.html deleted file mode 100644 index 8897f5dd4..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_conceptual.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Conceptual Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Conceptual Data Modeling

-

First, let’s create a simple domain model that is easy to understand in -the relational world, and then see how you might map it from a relational -to a distributed hashtable model in Cassandra.

-

Let’s use an example that is complex enough -to show the various data structures and design patterns, but not -something that will bog you down with details. Also, a domain that’s -familiar to everyone will allow you to concentrate on how to work with -Cassandra, not on what the application domain is all about.

-

For example, let’s use a domain that is easily understood and that -everyone can relate to: making hotel reservations.

-

The conceptual domain includes hotels, guests that stay in the hotels, a -collection of rooms for each hotel, the rates and availability of those -rooms, and a record of reservations booked for guests. Hotels typically -also maintain a collection of “points of interest,” which are parks, -museums, shopping galleries, monuments, or other places near the hotel -that guests might want to visit during their stay. Both hotels and -points of interest need to maintain geolocation data so that they can be -found on maps for mashups, and to calculate distances.

-

The conceptual domain is depicted below using the entity–relationship -model popularized by Peter Chen. This simple diagram represents the -entities in the domain with rectangles, and attributes of those entities -with ovals. Attributes that represent unique identifiers for items are -underlined. Relationships between entities are represented as diamonds, -and the connectors between the relationship and each entity show the -multiplicity of the connection.

-../_images/data_modeling_hotel_erd.png -

Obviously, in the real world, there would be many more considerations -and much more complexity. For example, hotel rates are notoriously -dynamic, and calculating them involves a wide array of factors. Here -you’re defining something complex enough to be interesting and touch on -the important points, but simple enough to maintain the focus on -learning Cassandra.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_logical.html b/src/doc/4.0-beta1/data_modeling/data_modeling_logical.html deleted file mode 100644 index 85b820e39..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_logical.html +++ /dev/null @@ -1,285 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Logical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Logical Data Modeling

-

Now that you have defined your queries, you’re ready to begin designing -Cassandra tables. First, create a logical model containing a table -for each query, capturing entities and relationships from the conceptual -model.

-

To name each table, you’ll identify the primary entity type for which you -are querying and use that to start the entity name. If you are querying -by attributes of other related entities, append those to the table -name, separated with _by_. For example, hotels_by_poi.

-

Next, you identify the primary key for the table, adding partition key -columns based on the required query attributes, and clustering columns -in order to guarantee uniqueness and support desired sort ordering.

-

The design of the primary key is extremely important, as it will -determine how much data will be stored in each partition and how that -data is organized on disk, which in turn will affect how quickly -Cassandra processes reads.

-

Complete each table by adding any additional attributes identified by -the query. If any of these additional attributes are the same for every -instance of the partition key, mark the column as static.

-

Now that was a pretty quick description of a fairly involved process, so -it will be worthwhile to work through a detailed example. First, -let’s introduce a notation that you can use to represent logical -models.

-

Several individuals within the Cassandra community have proposed -notations for capturing data models in diagrammatic form. This document -uses a notation popularized by Artem Chebotko which provides a simple, -informative way to visualize the relationships between queries and -tables in your designs. This figure shows the Chebotko notation for a -logical data model.

-../_images/data_modeling_chebotko_logical.png -

Each table is shown with its title and a list of columns. Primary key -columns are identified via symbols such as K for partition key -columns and C↑ or C↓ to represent clustering columns. Lines -are shown entering tables or between tables to indicate the queries that -each table is designed to support.

-
-

Hotel Logical Data Model

-

The figure below shows a Chebotko logical data model for the queries -involving hotels, points of interest, rooms, and amenities. One thing you’ll -notice immediately is that the Cassandra design doesn’t include dedicated -tables for rooms or amenities, as you had in the relational design. This -is because the workflow didn’t identify any queries requiring this -direct access.

-../_images/data_modeling_hotel_logical.png -

Let’s explore the details of each of these tables.

-

The first query Q1 is to find hotels near a point of interest, so you’ll -call this table hotels_by_poi. Searching by a named point of -interest is a clue that the point of interest should be a part -of the primary key. Let’s reference the point of interest by name, -because according to the workflow that is how users will start their -search.

-

You’ll note that you certainly could have more than one hotel near a -given point of interest, so you’ll need another component in the primary -key in order to make sure you have a unique partition for each hotel. So -you add the hotel key as a clustering column.

-

An important consideration in designing your table’s primary key is -making sure that it defines a unique data element. Otherwise you run the -risk of accidentally overwriting data.

-

Now for the second query (Q2), you’ll need a table to get information -about a specific hotel. One approach would have been to put all of the -attributes of a hotel in the hotels_by_poi table, but you added -only those attributes that were required by the application workflow.

-

From the workflow diagram, you know that the hotels_by_poi table is -used to display a list of hotels with basic information on each hotel, -and the application knows the unique identifiers of the hotels returned. -When the user selects a hotel to view details, you can then use Q2, which -is used to obtain details about the hotel. Because you already have the -hotel_id from Q1, you use that as a reference to the hotel you’re -looking for. Therefore the second table is just called hotels.

-

Another option would have been to store a set of poi_names in the -hotels table. This is an equally valid approach. You’ll learn through -experience which approach is best for your application.

-

Q3 is just a reverse of Q1—looking for points of interest near a hotel, -rather than hotels near a point of interest. This time, however, you need -to access the details of each point of interest, as represented by the -pois_by_hotel table. As previously, you add the point of -interest name as a clustering key to guarantee uniqueness.

-

At this point, let’s now consider how to support query Q4 to help the -user find available rooms at a selected hotel for the nights they are -interested in staying. Note that this query involves both a start date -and an end date. Because you’re querying over a range instead of a single -date, you know that you’ll need to use the date as a clustering key. -Use the hotel_id as a primary key to group room data for each hotel -on a single partition, which should help searches be super fast. Let’s -call this the available_rooms_by_hotel_date table.

-

To support searching over a range, use clustering columns to store -attributes that you need to access in a range query. Remember that the -order of the clustering columns is important.

-

The design of the available_rooms_by_hotel_date table is an instance -of the wide partition pattern. This -pattern is sometimes called the wide row pattern when discussing -databases that support similar models, but wide partition is a more -accurate description from a Cassandra perspective. The essence of the -pattern is to group multiple related rows in a partition in order to -support fast access to multiple rows within the partition in a single -query.

-

In order to round out the shopping portion of the data model, add the -amenities_by_room table to support Q5. This will allow users to -view the amenities of one of the rooms that is available for the desired -stay dates.

-
-
-

Reservation Logical Data Model

-

Now let’s switch gears to look at the reservation queries. The figure -shows a logical data model for reservations. You’ll notice that these -tables represent a denormalized design; the same data appears in -multiple tables, with differing keys.

-../_images/data_modeling_reservation_logical.png -

In order to satisfy Q6, the reservations_by_guest table can be used -to look up the reservation by guest name. You could envision query Q7 -being used on behalf of a guest on a self-serve website or a call center -agent trying to assist the guest. Because the guest name might not be -unique, you include the guest ID here as a clustering column as well.

-

Q8 and Q9 in particular help to remind you to create queries -that support various stakeholders of the application, not just customers -but staff as well, and perhaps even the analytics team, suppliers, and so -on.

-

The hotel staff might wish to see a record of upcoming reservations by -date in order to get insight into how the hotel is performing, such as -what dates the hotel is sold out or undersold. Q8 supports the retrieval -of reservations for a given hotel by date.

-

Finally, you create a guests table. This provides a single -location that used to store guest information. In this case, you specify a -separate unique identifier for guest records, as it is not uncommon -for guests to have the same name. In many organizations, a customer -database such as the guests table would be part of a separate -customer management application, which is why other guest -access patterns were omitted from the example.

-
-
-

Patterns and Anti-Patterns

-

As with other types of software design, there are some well-known -patterns and anti-patterns for data modeling in Cassandra. You’ve already -used one of the most common patterns in this hotel model—the wide -partition pattern.

-

The time series pattern is an extension of the wide partition -pattern. In this pattern, a series of measurements at specific time -intervals are stored in a wide partition, where the measurement time is -used as part of the partition key. This pattern is frequently used in -domains including business analysis, sensor data management, and -scientific experiments.

-

The time series pattern is also useful for data other than measurements. -Consider the example of a banking application. You could store each -customer’s balance in a row, but that might lead to a lot of read and -write contention as various customers check their balance or make -transactions. You’d probably be tempted to wrap a transaction around -writes just to protect the balance from being updated in error. In -contrast, a time series–style design would store each transaction as a -timestamped row and leave the work of calculating the current balance to -the application.

-

One design trap that many new users fall into is attempting to use -Cassandra as a queue. Each item in the queue is stored with a timestamp -in a wide partition. Items are appended to the end of the queue and read -from the front, being deleted after they are read. This is a design that -seems attractive, especially given its apparent similarity to the time -series pattern. The problem with this approach is that the deleted items -are now tombstones that Cassandra must scan past -in order to read from the front of the queue. Over time, a growing number -of tombstones begins to degrade read performance.

-

The queue anti-pattern serves as a reminder that any design that relies -on the deletion of data is potentially a poorly performing design.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_physical.html b/src/doc/4.0-beta1/data_modeling/data_modeling_physical.html deleted file mode 100644 index 4ee47e203..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_physical.html +++ /dev/null @@ -1,200 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Physical Data Modeling" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Physical Data Modeling

-

Once you have a logical data model defined, creating the physical model -is a relatively simple process.

-

You walk through each of the logical model tables, assigning types to -each item. You can use any valid CQL data type, -including the basic types, collections, and user-defined types. You may -identify additional user-defined types that can be created to simplify -your design.

-

After you’ve assigned data types, you analyze the model by performing -size calculations and testing out how the model works. You may make some -adjustments based on your findings. Once again let’s cover the data -modeling process in more detail by working through an example.

-

Before getting started, let’s look at a few additions to the Chebotko -notation for physical data models. To draw physical models, you need to -be able to add the typing information for each column. This figure -shows the addition of a type for each column in a sample table.

-../_images/data_modeling_chebotko_physical.png -

The figure includes a designation of the keyspace containing each table -and visual cues for columns represented using collections and -user-defined types. Note the designation of static columns and -secondary index columns. There is no restriction on assigning these as -part of a logical model, but they are typically more of a physical data -modeling concern.

-
-

Hotel Physical Data Model

-

Now let’s get to work on the physical model. First, you need keyspaces -to contain the tables. To keep the design relatively simple, create a -hotel keyspace to contain tables for hotel and availability -data, and a reservation keyspace to contain tables for reservation -and guest data. In a real system, you might divide the tables across even -more keyspaces in order to separate concerns.

-

For the hotels table, use Cassandra’s text type to -represent the hotel’s id. For the address, create an -address user defined type. Use the text type to represent the -phone number, as there is considerable variance in the formatting of -numbers between countries.

-

While it would make sense to use the uuid type for attributes such -as the hotel_id, this document uses mostly text attributes as -identifiers, to keep the samples simple and readable. For example, a -common convention in the hospitality industry is to reference properties -by short codes like “AZ123” or “NY229”. This example uses these values -for hotel_ids, while acknowledging they are not necessarily globally -unique.

-

You’ll find that it’s often helpful to use unique IDs to uniquely -reference elements, and to use these uuids as references in tables -representing other entities. This helps to minimize coupling between -different entity types. This may prove especially effective if you are -using a microservice architectural style for your application, in which -there are separate services responsible for each entity type.

-

As you work to create physical representations of various tables in the -logical hotel data model, you use the same approach. The resulting design -is shown in this figure:

-../_images/data_modeling_hotel_physical.png -

Note that the address type is also included in the design. It -is designated with an asterisk to denote that it is a user-defined type, -and has no primary key columns identified. This type is used in -the hotels and hotels_by_poi tables.

-

User-defined types are frequently used to help reduce duplication of -non-primary key columns, as was done with the address -user-defined type. This can reduce complexity in the design.

-

Remember that the scope of a UDT is the keyspace in which it is defined. -To use address in the reservation keyspace defined below -design, you’ll have to declare it again. This is just one of the many -trade-offs you have to make in data model design.

-
-
-

Reservation Physical Data Model

-

Now, let’s examine reservation tables in the design. -Remember that the logical model contained three denormalized tables to -support queries for reservations by confirmation number, guest, and -hotel and date. For the first iteration of your physical data model -design, assume you’re going to manage this denormalization -manually. Note that this design could be revised to use Cassandra’s -(experimental) materialized view feature.

-../_images/data_modeling_reservation_physical.png -

Note that the address type is reproduced in this keyspace and -guest_id is modeled as a uuid type in all of the tables.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_queries.html b/src/doc/4.0-beta1/data_modeling/data_modeling_queries.html deleted file mode 100644 index 3c897cfcb..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_queries.html +++ /dev/null @@ -1,171 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Application Queries" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Application Queries

-

Let’s try the query-first approach to start designing the data model for -a hotel application. The user interface design for the application is -often a great artifact to use to begin identifying queries. Let’s assume -that you’ve talked with the project stakeholders and your UX designers -have produced user interface designs or wireframes for the key use -cases. You’ll likely have a list of shopping queries like the following:

-
    -
  • Q1. Find hotels near a given point of interest.
  • -
  • Q2. Find information about a given hotel, such as its name and -location.
  • -
  • Q3. Find points of interest near a given hotel.
  • -
  • Q4. Find an available room in a given date range.
  • -
  • Q5. Find the rate and amenities for a room.
  • -
-

It is often helpful to be able to refer -to queries by a shorthand number rather that explaining them in full. -The queries listed here are numbered Q1, Q2, and so on, which is how they -are referenced in diagrams throughout the example.

-

Now if the application is to be a success, you’ll certainly want -customers to be able to book reservations at hotels. This includes -steps such as selecting an available room and entering their guest -information. So clearly you will also need some queries that address the -reservation and guest entities from the conceptual data model. Even -here, however, you’ll want to think not only from the customer -perspective in terms of how the data is written, but also in terms of -how the data will be queried by downstream use cases.

-

You natural tendency as might be to focus first on -designing the tables to store reservation and guest records, and only -then start thinking about the queries that would access them. You may -have felt a similar tension already when discussing the -shopping queries before, thinking “but where did the hotel and point of -interest data come from?” Don’t worry, you will see soon enough. -Here are some queries that describe how users will access -reservations:

-
    -
  • Q6. Lookup a reservation by confirmation number.
  • -
  • Q7. Lookup a reservation by hotel, date, and guest name.
  • -
  • Q8. Lookup all reservations by guest name.
  • -
  • Q9. View guest details.
  • -
-

All of the queries are shown in the context of the workflow of the -application in the figure below. Each box on the diagram represents a -step in the application workflow, with arrows indicating the flows -between steps and the associated query. If you’ve modeled the application -well, each step of the workflow accomplishes a task that “unlocks” -subsequent steps. For example, the “View hotels near POI” task helps -the application learn about several hotels, including their unique keys. -The key for a selected hotel may be used as part of Q2, in order to -obtain detailed description of the hotel. The act of booking a room -creates a reservation record that may be accessed by the guest and -hotel staff at a later time through various additional queries.

-../_images/data_modeling_hotel_queries.png -

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_rdbms.html b/src/doc/4.0-beta1/data_modeling/data_modeling_rdbms.html deleted file mode 100644 index 6a195cc56..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_rdbms.html +++ /dev/null @@ -1,252 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "RDBMS Design" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

RDBMS Design

-

When you set out to build a new data-driven application that will use a -relational database, you might start by modeling the domain as a set of -properly normalized tables and use foreign keys to reference related -data in other tables.

-

The figure below shows how you might represent the data storage for your application -using a relational database model. The relational model includes a -couple of “join” tables in order to realize the many-to-many -relationships from the conceptual model of hotels-to-points of interest, -rooms-to-amenities, rooms-to-availability, and guests-to-rooms (via a -reservation).

-../_images/data_modeling_hotel_relational.png -
-

Design Differences Between RDBMS and Cassandra

-

Let’s take a minute to highlight some of the key differences in doing -ata modeling for Cassandra versus a relational database.

-
-

No joins

-

You cannot perform joins in Cassandra. If you have designed a data model -and find that you need something like a join, you’ll have to either do -the work on the client side, or create a denormalized second table that -represents the join results for you. This latter option is preferred in -Cassandra data modeling. Performing joins on the client should be a very -rare case; you really want to duplicate (denormalize) the data instead.

-
-
-

No referential integrity

-

Although Cassandra supports features such as lightweight transactions -and batches, Cassandra itself has no concept of referential integrity -across tables. In a relational database, you could specify foreign keys -in a table to reference the primary key of a record in another table. -But Cassandra does not enforce this. It is still a common design -requirement to store IDs related to other entities in your tables, but -operations such as cascading deletes are not available.

-
-
-

Denormalization

-

In relational database design, you are often taught the importance of -normalization. This is not an advantage when working with Cassandra -because it performs best when the data model is denormalized. It is -often the case that companies end up denormalizing data in relational -databases as well. There are two common reasons for this. One is -performance. Companies simply can’t get the performance they need when -they have to do so many joins on years’ worth of data, so they -denormalize along the lines of known queries. This ends up working, but -goes against the grain of how relational databases are intended to be -designed, and ultimately makes one question whether using a relational -database is the best approach in these circumstances.

-

A second reason that relational databases get denormalized on purpose is -a business document structure that requires retention. That is, you have -an enclosing table that refers to a lot of external tables whose data -could change over time, but you need to preserve the enclosing document -as a snapshot in history. The common example here is with invoices. You -already have customer and product tables, and you’d think that you could -just make an invoice that refers to those tables. But this should never -be done in practice. Customer or price information could change, and -then you would lose the integrity of the invoice document as it was on -the invoice date, which could violate audits, reports, or laws, and -cause other problems.

-

In the relational world, denormalization violates Codd’s normal forms, -and you try to avoid it. But in Cassandra, denormalization is, well, -perfectly normal. It’s not required if your data model is simple. But -don’t be afraid of it.

-

Historically, denormalization in Cassandra has required designing and -managing multiple tables using techniques described in this documentation. -Beginning with the 3.0 release, Cassandra provides a feature known -as materialized views -which allows you to create multiple denormalized -views of data based on a base table design. Cassandra manages -materialized views on the server, including the work of keeping the -views in sync with the table.

-
-
-

Query-first design

-

Relational modeling, in simple terms, means that you start from the -conceptual domain and then represent the nouns in the domain in tables. -You then assign primary keys and foreign keys to model relationships. -When you have a many-to-many relationship, you create the join tables -that represent just those keys. The join tables don’t exist in the real -world, and are a necessary side effect of the way relational models -work. After you have all your tables laid out, you can start writing -queries that pull together disparate data using the relationships -defined by the keys. The queries in the relational world are very much -secondary. It is assumed that you can always get the data you want as -long as you have your tables modeled properly. Even if you have to use -several complex subqueries or join statements, this is usually true.

-

By contrast, in Cassandra you don’t start with the data model; you start -with the query model. Instead of modeling the data first and then -writing queries, with Cassandra you model the queries and let the data -be organized around them. Think of the most common query paths your -application will use, and then create the tables that you need to -support them.

-

Detractors have suggested that designing the queries first is overly -constraining on application design, not to mention database modeling. -But it is perfectly reasonable to expect that you should think hard -about the queries in your application, just as you would, presumably, -think hard about your relational domain. You may get it wrong, and then -you’ll have problems in either world. Or your query needs might change -over time, and then you’ll have to work to update your data set. But -this is no different from defining the wrong tables, or needing -additional tables, in an RDBMS.

-
-
-

Designing for optimal storage

-

In a relational database, it is frequently transparent to the user how -tables are stored on disk, and it is rare to hear of recommendations -about data modeling based on how the RDBMS might store tables on disk. -However, that is an important consideration in Cassandra. Because -Cassandra tables are each stored in separate files on disk, it’s -important to keep related columns defined together in the same table.

-

A key goal that you will see as you begin creating data models in -Cassandra is to minimize the number of partitions that must be searched -in order to satisfy a given query. Because the partition is a unit of -storage that does not get divided across nodes, a query that searches a -single partition will typically yield the best performance.

-
-
-

Sorting is a design decision

-

In an RDBMS, you can easily change the order in which records are -returned to you by using ORDER BY in your query. The default sort -order is not configurable; by default, records are returned in the order -in which they are written. If you want to change the order, you just -modify your query, and you can sort by any list of columns.

-

In Cassandra, however, sorting is treated differently; it is a design -decision. The sort order available on queries is fixed, and is -determined entirely by the selection of clustering columns you supply in -the CREATE TABLE command. The CQL SELECT statement does support -ORDER BY semantics, but only in the order specified by the -clustering columns.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_refining.html b/src/doc/4.0-beta1/data_modeling/data_modeling_refining.html deleted file mode 100644 index cdc291e9c..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_refining.html +++ /dev/null @@ -1,288 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Evaluating and Refining Data Models" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Evaluating and Refining Data Models

-

Once you’ve created a physical model, there are some steps you’ll want -to take to evaluate and refine table designs to help ensure optimal -performance.

-
-

Calculating Partition Size

-

The first thing that you want to look for is whether your tables will have -partitions that will be overly large, or to put it another way, too -wide. Partition size is measured by the number of cells (values) that -are stored in the partition. Cassandra’s hard limit is 2 billion cells -per partition, but you’ll likely run into performance issues before -reaching that limit.

-

In order to calculate the size of partitions, use the following -formula:

-
-\[N_v = N_r (N_c - N_{pk} - N_s) + N_s\]
-

The number of values (or cells) in the partition (Nv) is equal to -the number of static columns (Ns) plus the product of the number -of rows (Nr) and the number of of values per row. The number of -values per row is defined as the number of columns (Nc) minus the -number of primary key columns (Npk) and static columns -(Ns).

-

The number of columns tends to be relatively static, although it -is possible to alter tables at runtime. For this reason, a -primary driver of partition size is the number of rows in the partition. -This is a key factor that you must consider in determining whether a -partition has the potential to get too large. Two billion values sounds -like a lot, but in a sensor system where tens or hundreds of values are -measured every millisecond, the number of values starts to add up pretty -fast.

-

Let’s take a look at one of the tables to analyze the partition size. -Because it has a wide partition design with one partition per hotel, -look at the available_rooms_by_hotel_date table. The table has -four columns total (Nc = 4), including three primary key columns -(Npk = 3) and no static columns (Ns = 0). Plugging these -values into the formula, the result is:

-
-\[N_v = N_r (4 - 3 - 0) + 0 = 1N_r\]
-

Therefore the number of values for this table is equal to the number of -rows. You still need to determine a number of rows. To do this, make -estimates based on the application design. The table is -storing a record for each room, in each of hotel, for every night. -Let’s assume the system will be used to store two years of -inventory at a time, and there are 5,000 hotels in the system, with an -average of 100 rooms in each hotel.

-

Since there is a partition for each hotel, the estimated number of rows -per partition is as follows:

-
-\[N_r = 100 rooms/hotel \times 730 days = 73,000 rows\]
-

This relatively small number of rows per partition is not going to get -you in too much trouble, but if you start storing more dates of inventory, -or don’t manage the size of the inventory well using TTL, you could start -having issues. You still might want to look at breaking up this large -partition, which you’ll see how to do shortly.

-

When performing sizing calculations, it is tempting to assume the -nominal or average case for variables such as the number of rows. -Consider calculating the worst case as well, as these sorts of -predictions have a way of coming true in successful systems.

-
-
-

Calculating Size on Disk

-

In addition to calculating the size of a partition, it is also an -excellent idea to estimate the amount of disk space that will be -required for each table you plan to store in the cluster. In order to -determine the size, use the following formula to determine the size -St of a partition:

-
-\[S_t = \displaystyle\sum_i sizeOf\big (c_{k_i}\big) + \displaystyle\sum_j sizeOf\big(c_{s_j}\big) + N_r\times \bigg(\displaystyle\sum_k sizeOf\big(c_{r_k}\big) + \displaystyle\sum_l sizeOf\big(c_{c_l}\big)\bigg) +\]
-
-\[N_v\times sizeOf\big(t_{avg}\big)\]
-

This is a bit more complex than the previous formula, but let’s break it -down a bit at a time. Let’s take a look at the notation first:

-
    -
  • In this formula, ck refers to partition key columns, -cs to static columns, cr to regular columns, and -cc to clustering columns.
  • -
  • The term tavg refers to the average number of bytes of -metadata stored per cell, such as timestamps. It is typical to use an -estimate of 8 bytes for this value.
  • -
  • You’ll recognize the number of rows Nr and number of values -Nv from previous calculations.
  • -
  • The sizeOf() function refers to the size in bytes of the CQL data -type of each referenced column.
  • -
-

The first term asks you to sum the size of the partition key columns. For -this example, the available_rooms_by_hotel_date table has a single -partition key column, the hotel_id, which is of type -text. Assuming that hotel identifiers are simple 5-character codes, -you have a 5-byte value, so the sum of the partition key column sizes is -5 bytes.

-

The second term asks you to sum the size of the static columns. This table -has no static columns, so the size is 0 bytes.

-

The third term is the most involved, and for good reason—it is -calculating the size of the cells in the partition. Sum the size of -the clustering columns and regular columns. The two clustering columns -are the date, which is 4 bytes, and the room_number, -which is a 2-byte short integer, giving a sum of 6 bytes. -There is only a single regular column, the boolean is_available, -which is 1 byte in size. Summing the regular column size -(1 byte) plus the clustering column size (6 bytes) gives a total of 7 -bytes. To finish up the term, multiply this value by the number of -rows (73,000), giving a result of 511,000 bytes (0.51 MB).

-

The fourth term is simply counting the metadata that that Cassandra -stores for each cell. In the storage format used by Cassandra 3.0 and -later, the amount of metadata for a given cell varies based on the type -of data being stored, and whether or not custom timestamp or TTL values -are specified for individual cells. For this table, reuse the number -of values from the previous calculation (73,000) and multiply by 8, -which gives 0.58 MB.

-

Adding these terms together, you get a final estimate:

-
-\[Partition size = 16 bytes + 0 bytes + 0.51 MB + 0.58 MB = 1.1 MB\]
-

This formula is an approximation of the actual size of a partition on -disk, but is accurate enough to be quite useful. Remembering that the -partition must be able to fit on a single node, it looks like the table -design will not put a lot of strain on disk storage.

-

Cassandra’s storage engine was re-implemented for the 3.0 release, -including a new format for SSTable files. The previous format stored a -separate copy of the clustering columns as part of the record for each -cell. The newer format eliminates this duplication, which reduces the -size of stored data and simplifies the formula for computing that size.

-

Keep in mind also that this estimate only counts a single replica of -data. You will need to multiply the value obtained here by the number of -partitions and the number of replicas specified by the keyspace’s -replication strategy in order to determine the total required total -capacity for each table. This will come in handy when you -plan your cluster.

-
-
-

Breaking Up Large Partitions

-

As discussed previously, the goal is to design tables that can provide -the data you need with queries that touch a single partition, or failing -that, the minimum possible number of partitions. However, as shown in -the examples, it is quite possible to design wide -partition-style tables that approach Cassandra’s built-in limits. -Performing sizing analysis on tables may reveal partitions that are -potentially too large, either in number of values, size on disk, or -both.

-

The technique for splitting a large partition is straightforward: add an -additional column to the partition key. In most cases, moving one of the -existing columns into the partition key will be sufficient. Another -option is to introduce an additional column to the table to act as a -sharding key, but this requires additional application logic.

-

Continuing to examine the available rooms example, if you add the date -column to the partition key for the available_rooms_by_hotel_date -table, each partition would then represent the availability of rooms -at a specific hotel on a specific date. This will certainly yield -partitions that are significantly smaller, perhaps too small, as the -data for consecutive days will likely be on separate nodes.

-

Another technique known as bucketing is often used to break the data -into moderate-size partitions. For example, you could bucketize the -available_rooms_by_hotel_date table by adding a month column to -the partition key, perhaps represented as an integer. The comparision -with the original design is shown in the figure below. While the -month column is partially duplicative of the date, it provides -a nice way of grouping related data in a partition that will not get -too large.

-../_images/data_modeling_hotel_bucketing.png -

If you really felt strongly about preserving a wide partition design, you -could instead add the room_id to the partition key, so that each -partition would represent the availability of the room across all -dates. Because there was no query identified that involves searching -availability of a specific room, the first or second design approach -is most suitable to the application needs.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_schema.html b/src/doc/4.0-beta1/data_modeling/data_modeling_schema.html deleted file mode 100644 index bc91307d7..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_schema.html +++ /dev/null @@ -1,236 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Defining Database Schema" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Defining Database Schema

-

Once you have finished evaluating and refining the physical model, you’re -ready to implement the schema in CQL. Here is the schema for the -hotel keyspace, using CQL’s comment feature to document the query -pattern supported by each table:

-
CREATE KEYSPACE hotel WITH replication =
-  {class: SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE hotel.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE hotel.hotels_by_poi (
-  poi_name text,
-  hotel_id text,
-  name text,
-  phone text,
-  address frozen<address>,
-  PRIMARY KEY ((poi_name), hotel_id) )
-  WITH comment = Q1. Find hotels near given poi
-  AND CLUSTERING ORDER BY (hotel_id ASC) ;
-
-CREATE TABLE hotel.hotels (
-  id text PRIMARY KEY,
-  name text,
-  phone text,
-  address frozen<address>,
-  pois set )
-  WITH comment = Q2. Find information about a hotel;
-
-CREATE TABLE hotel.pois_by_hotel (
-  poi_name text,
-  hotel_id text,
-  description text,
-  PRIMARY KEY ((hotel_id), poi_name) )
-  WITH comment = Q3. Find pois near a hotel;
-
-CREATE TABLE hotel.available_rooms_by_hotel_date (
-  hotel_id text,
-  date date,
-  room_number smallint,
-  is_available boolean,
-  PRIMARY KEY ((hotel_id), date, room_number) )
-  WITH comment = Q4. Find available rooms by hotel date;
-
-CREATE TABLE hotel.amenities_by_room (
-  hotel_id text,
-  room_number smallint,
-  amenity_name text,
-  description text,
-  PRIMARY KEY ((hotel_id, room_number), amenity_name) )
-  WITH comment = Q5. Find amenities for a room;
-
-
-

Notice that the elements of the partition key are surrounded -with parentheses, even though the partition key consists -of the single column poi_name. This is a best practice that makes -the selection of partition key more explicit to others reading your CQL.

-

Similarly, here is the schema for the reservation keyspace:

-
CREATE KEYSPACE reservation WITH replication = {class:
-  SimpleStrategy, replication_factor : 3};
-
-CREATE TYPE reservation.address (
-  street text,
-  city text,
-  state_or_province text,
-  postal_code text,
-  country text );
-
-CREATE TABLE reservation.reservations_by_confirmation (
-  confirm_number text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  guest_id uuid,
-  PRIMARY KEY (confirm_number) )
-  WITH comment = Q6. Find reservations by confirmation number;
-
-CREATE TABLE reservation.reservations_by_hotel_date (
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((hotel_id, start_date), room_number) )
-  WITH comment = Q7. Find reservations by hotel and date;
-
-CREATE TABLE reservation.reservations_by_guest (
-  guest_last_name text,
-  hotel_id text,
-  start_date date,
-  end_date date,
-  room_number smallint,
-  confirm_number text,
-  guest_id uuid,
-  PRIMARY KEY ((guest_last_name), hotel_id) )
-  WITH comment = Q8. Find reservations by guest name;
-
-CREATE TABLE reservation.guests (
-  guest_id uuid PRIMARY KEY,
-  first_name text,
-  last_name text,
-  title text,
-  emails set,
-  phone_numbers list,
-  addresses map<text,
-  frozen<address>,
-  confirm_number text )
-  WITH comment = Q9. Find guest by ID;
-
-
-

You now have a complete Cassandra schema for storing data for a hotel -application.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/data_modeling_tools.html b/src/doc/4.0-beta1/data_modeling/data_modeling_tools.html deleted file mode 100644 index d37562a0e..000000000 --- a/src/doc/4.0-beta1/data_modeling/data_modeling_tools.html +++ /dev/null @@ -1,157 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Cassandra Data Modeling Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Data Modeling Tools

-

There are several tools available to help you design and -manage your Cassandra schema and build queries.

-
    -
  • Hackolade -is a data modeling tool that supports schema design for Cassandra and -many other NoSQL databases. Hackolade supports the unique concepts of -CQL such as partition keys and clustering columns, as well as data types -including collections and UDTs. It also provides the ability to create -Chebotko diagrams.
  • -
  • Kashlev Data Modeler is a Cassandra -data modeling tool that automates the data modeling methodology -described in this documentation, including identifying -access patterns, conceptual, logical, and physical data modeling, and -schema generation. It also includes model patterns that you can -optionally leverage as a starting point for your designs.
  • -
  • DataStax DevCenter is a tool for managing -schema, executing queries and viewing results. While the tool is no -longer actively supported, it is still popular with many developers and -is available as a free download. -DevCenter features syntax highlighting for CQL commands, types, and name -literals. DevCenter provides command completion as you type out CQL -commands and interprets the commands you type, highlighting any errors -you make. The tool provides panes for managing multiple CQL scripts and -connections to multiple clusters. The connections are used to run CQL -commands against live clusters and view the results. The tool also has a -query trace feature that is useful for gaining insight into the -performance of your queries.
  • -
  • IDE Plugins - There are CQL plugins available for several Integrated -Development Environments (IDEs), such as IntelliJ IDEA and Apache -NetBeans. These plugins typically provide features such as schema -management and query execution.
  • -
-

Some IDEs and tools that claim to support Cassandra do not actually support -CQL natively, but instead access Cassandra using a JDBC/ODBC driver and -interact with Cassandra as if it were a relational database with SQL -support. Wnen selecting tools for working with Cassandra you’ll want to -make sure they support CQL and reinforce Cassandra best practices for -data modeling as presented in this documentation.

-

Material adapted from Cassandra, The Definitive Guide. Published by -O’Reilly Media, Inc. Copyright © 2020 Jeff Carpenter, Eben Hewitt. -All rights reserved. Used with permission.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/index.html b/src/doc/4.0-beta1/data_modeling/index.html deleted file mode 100644 index e0143f17a..000000000 --- a/src/doc/4.0-beta1/data_modeling/index.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Data Modeling" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-beta1/data_modeling/intro.html b/src/doc/4.0-beta1/data_modeling/intro.html deleted file mode 100644 index 9c6d4e275..000000000 --- a/src/doc/4.0-beta1/data_modeling/intro.html +++ /dev/null @@ -1,230 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Data Modeling" - -doc-title: "Introduction" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Introduction

-

Apache Cassandra stores data in tables, with each table consisting of rows and columns. CQL (Cassandra Query Language) is used to query the data stored in tables. Apache Cassandra data model is based around and optimized for querying. Cassandra does not support relational data modeling intended for relational databases.

-
-

What is Data Modeling?

-

Data modeling is the process of identifying entities and their relationships. In relational databases, data is placed in normalized tables with foreign keys used to reference related data in other tables. Queries that the application will make are driven by the structure of the tables and related data are queried as table joins.

-

In Cassandra, data modeling is query-driven. The data access patterns and application queries determine the structure and organization of data which then used to design the database tables.

-

Data is modeled around specific queries. Queries are best designed to access a single table, which implies that all entities involved in a query must be in the same table to make data access (reads) very fast. Data is modeled to best suit a query or a set of queries. A table could have one or more entities as best suits a query. As entities do typically have relationships among them and queries could involve entities with relationships among them, a single entity may be included in multiple tables.

-
-
-

Query-driven modeling

-

Unlike a relational database model in which queries make use of table joins to get data from multiple tables, joins are not supported in Cassandra so all required fields (columns) must be grouped together in a single table. Since each query is backed by a table, data is duplicated across multiple tables in a process known as denormalization. Data duplication and a high write throughput are used to achieve a high read performance.

-
-
-

Goals

-

The choice of the primary key and partition key is important to distribute data evenly across the cluster. Keeping the number of partitions read for a query to a minimum is also important because different partitions could be located on different nodes and the coordinator would need to send a request to each node adding to the request overhead and latency. Even if the different partitions involved in a query are on the same node, fewer partitions make for a more efficient query.

-
-
-

Partitions

-

Apache Cassandra is a distributed database that stores data across a cluster of nodes. A partition key is used to partition data among the nodes. Cassandra partitions data over the storage nodes using a variant of consistent hashing for data distribution. Hashing is a technique used to map data with which given a key, a hash function generates a hash value (or simply a hash) that is stored in a hash table. A partition key is generated from the first field of a primary key. Data partitioned into hash tables using partition keys provides for rapid lookup. Fewer the partitions used for a query faster is the response time for the query.

-

As an example of partitioning, consider table t in which id is the only field in the primary key.

-
CREATE TABLE t (
-   id int,
-   k int,
-   v text,
-   PRIMARY KEY (id)
-);
-
-
-

The partition key is generated from the primary key id for data distribution across the nodes in a cluster.

-

Consider a variation of table t that has two fields constituting the primary key to make a composite or compound primary key.

-
CREATE TABLE t (
-   id int,
-   c text,
-   k int,
-   v text,
-   PRIMARY KEY (id,c)
-);
-
-
-

For the table t with a composite primary key the first field id is used to generate the partition key and the second field c is the clustering key used for sorting within a partition. Using clustering keys to sort data makes retrieval of adjacent data more efficient.

-

In general, the first field or component of a primary key is hashed to generate the partition key and the remaining fields or components are the clustering keys that are used to sort data within a partition. Partitioning data improves the efficiency of reads and writes. The other fields that are not primary key fields may be indexed separately to further improve query performance.

-

The partition key could be generated from multiple fields if they are grouped as the first component of a primary key. As another variation of the table t, consider a table with the first component of the primary key made of two fields grouped using parentheses.

-
CREATE TABLE t (
-   id1 int,
-   id2 int,
-   c1 text,
-   c2 text
-   k int,
-   v text,
-   PRIMARY KEY ((id1,id2),c1,c2)
-);
-
-
-

For the preceding table t the first component of the primary key constituting fields id1 and id2 is used to generate the partition key and the rest of the fields c1 and c2 are the clustering keys used for sorting within a partition.

-
-
-

Comparing with Relational Data Model

-

Relational databases store data in tables that have relations with other tables using foreign keys. A relational database’s approach to data modeling is table-centric. Queries must use table joins to get data from multiple tables that have a relation between them. Apache Cassandra does not have the concept of foreign keys or relational integrity. Apache Cassandra’s data model is based around designing efficient queries; queries that don’t involve multiple tables. Relational databases normalize data to avoid duplication. Apache Cassandra in contrast de-normalizes data by duplicating data in multiple tables for a query-centric data model. If a Cassandra data model cannot fully integrate the complexity of relationships between the different entities for a particular query, client-side joins in application code may be used.

-
-
-

Examples of Data Modeling

-

As an example, a magazine data set consists of data for magazines with attributes such as magazine id, magazine name, publication frequency, publication date, and publisher. A basic query (Q1) for magazine data is to list all the magazine names including their publication frequency. As not all data attributes are needed for Q1 the data model would only consist of id ( for partition key), magazine name and publication frequency as shown in Figure 1.

-
-../_images/Figure_1_data_model.jpg -
-

Figure 1. Data Model for Q1

-

Another query (Q2) is to list all the magazine names by publisher. For Q2 the data model would consist of an additional attribute publisher for the partition key. The id would become the clustering key for sorting within a partition. Data model for Q2 is illustrated in Figure 2.

-
-../_images/Figure_2_data_model.jpg -
-

Figure 2. Data Model for Q2

-
-
-

Designing Schema

-

After the conceptual data model has been created a schema may be designed for a query. For Q1 the following schema may be used.

-
CREATE TABLE magazine_name (id int PRIMARY KEY, name text, publicationFrequency text)
-
-
-

For Q2 the schema definition would include a clustering key for sorting.

-
CREATE TABLE magazine_publisher (publisher text,id int,name text, publicationFrequency text,
-PRIMARY KEY (publisher, id)) WITH CLUSTERING ORDER BY (id DESC)
-
-
-
-
-

Data Model Analysis

-

The data model is a conceptual model that must be analyzed and optimized based on storage, capacity, redundancy and consistency. A data model may need to be modified as a result of the analysis. Considerations or limitations that are used in data model analysis include:

-
    -
  • Partition Size
  • -
  • Data Redundancy
  • -
  • Disk space
  • -
  • Lightweight Transactions (LWT)
  • -
-

The two measures of partition size are the number of values in a partition and partition size on disk. Though requirements for these measures may vary based on the application a general guideline is to keep number of values per partition to below 100,000 and disk space per partition to below 100MB.

-

Data redundancies as duplicate data in tables and multiple partition replicates are to be expected in the design of a data model , but nevertheless should be kept in consideration as a parameter to keep to the minimum. LWT transactions (compare-and-set, conditional update) could affect performance and queries using LWT should be kept to the minimum.

-
-
-

Using Materialized Views

-
-

Warning

-

Materialized views (MVs) are experimental in the latest (4.0) release.

-
-

Materialized views (MVs) could be used to implement multiple queries for a single table. A materialized view is a table built from data from another table, the base table, with new primary key and new properties. Changes to the base table data automatically add and update data in a MV. Different queries may be implemented using a materialized view as an MV’s primary key differs from the base table. Queries are optimized by the primary key definition.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/ci.html b/src/doc/4.0-beta1/development/ci.html deleted file mode 100644 index d65589cbc..000000000 --- a/src/doc/4.0-beta1/development/ci.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Jenkins CI Environment" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Jenkins CI Environment

-
-

About CI testing and Apache Cassandra

-

Cassandra can be automatically tested using various test suites, that are either implemented based on JUnit or the dtest scripts written in Python. As outlined in Testing, each kind of test suite addresses a different way how to test Cassandra. But in the end, all of them will be executed together on our CI platform at builds.apache.org, running Jenkins.

-
-
-

Setting up your own Jenkins server

-

Jenkins is an open source solution that can be installed on a large number of platforms. Setting up a custom Jenkins instance for Cassandra may be desirable for users who have hardware to spare, or organizations that want to run Cassandra tests for custom patches before contribution.

-

Please refer to the Jenkins download and documentation pages for details on how to get Jenkins running, possibly also including slave build executor instances. The rest of the document will focus on how to setup Cassandra jobs in your Jenkins environment.

-
-

Required plugins

-

The following plugins need to be installed additionally to the standard plugins (git, ant, ..).

-

You can install any missing plugins through the install manager.

-

Go to Manage Jenkins -> Manage Plugins -> Available and install the following plugins and respective dependencies:

-
    -
  • Job DSL
  • -
  • Javadoc Plugin
  • -
  • description setter plugin
  • -
  • Throttle Concurrent Builds Plug-in
  • -
  • Test stability history
  • -
  • Hudson Post build task
  • -
-
-
-

Setup seed job

-

Config New Item

-
    -
  • Name it Cassandra-Job-DSL
  • -
  • Select Freestyle project
  • -
-

Under Source Code Management select Git using the repository: https://github.com/apache/cassandra-builds

-

Under Build, confirm Add build step -> Process Job DSLs and enter at Look on Filesystem: jenkins-dsl/cassandra_job_dsl_seed.groovy

-

Generated jobs will be created based on the Groovy script’s default settings. You may want to override settings by checking This project is parameterized and add String Parameter for on the variables that can be found in the top of the script. This will allow you to setup jobs for your own repository and branches (e.g. working branches).

-

When done, confirm “Save”

-

You should now find a new entry with the given name in your project list. However, building the project will still fail and abort with an error message “Processing DSL script cassandra_job_dsl_seed.groovy ERROR: script not yet approved for use”. Goto Manage Jenkins -> In-process Script Approval to fix this issue. Afterwards you should be able to run the script and have it generate numerous new jobs based on the found branches and configured templates.

-

Jobs are triggered by either changes in Git or are scheduled to execute periodically, e.g. on daily basis. Jenkins will use any available executor with the label “cassandra”, once the job is to be run. Please make sure to make any executors available by selecting Build Executor Status -> Configure -> Add “cassandra” as label and save.

-

Executors need to have “JDK 1.8 (latest)” installed. This is done under Manage Jenkins -> Global Tool Configuration -> JDK Installations…. Executors also need to have the virtualenv package installed on their system.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/code_style.html b/src/doc/4.0-beta1/development/code_style.html deleted file mode 100644 index 7b35ff251..000000000 --- a/src/doc/4.0-beta1/development/code_style.html +++ /dev/null @@ -1,215 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Code Style" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Code Style

-
-

General Code Conventions

-
-
-
-
-
-

Exception handling

-
-
    -
  • Never ever write catch (...) {} or catch (...) { logger.error() } merely to satisfy Java’s compile-time exception checking. Always propagate the exception up or throw RuntimeException (or, if it “can’t happen,” AssertionError). This makes the exceptions visible to automated tests.
  • -
  • Avoid propagating up checked exceptions that no caller handles. Rethrow as RuntimeException (or IOError, if that is more applicable).
  • -
  • Similarly, logger.warn() is often a cop-out: is this an error or not? If it is don’t hide it behind a warn; if it isn’t, no need for the warning.
  • -
  • If you genuinely know an exception indicates an expected condition, it’s okay to ignore it BUT this must be explicitly explained in a comment.
  • -
-
-
-
-

Boilerplate

-
-
    -
  • Avoid redundant @Override annotations when implementing abstract or interface methods.
  • -
  • Do not implement equals or hashcode methods unless they are actually needed.
  • -
  • Prefer public final fields to private fields with getters. (But prefer encapsulating behavior in “real” methods to either.)
  • -
  • Prefer requiring initialization in the constructor to setters.
  • -
  • Avoid redundant this references to member fields or methods.
  • -
  • Do not extract interfaces (or abstract classes) unless you actually need multiple implementations of it.
  • -
  • Always include braces for nested levels of conditionals and loops. Only avoid braces for single level.
  • -
-
-
-
-

Multiline statements

-
-
    -
  • Try to keep lines under 120 characters, but use good judgement – it’s better to exceed 120 by a little, than split a line that has no natural splitting points.
  • -
  • When splitting inside a method call, use one line per parameter and align them, like this:
  • -
-
SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(),
-                                         columnFamilies.size(),
-                                         StorageService.getPartitioner());
-
-
-
    -
  • When splitting a ternary, use one line per clause, carry the operator, and align like this:
  • -
-
var = bar == null
-    ? doFoo()
-    : doBar();
-
-
-
-
-
-

Whitespace

-
-
    -
  • Please make sure to use 4 spaces instead of the tab character for all your indentation.
  • -
  • Many lines in many files have a bunch of trailing whitespace… Please either clean these up in a separate patch, or leave them alone, so that reviewers now and anyone reading code history later doesn’t have to pay attention to whitespace diffs.
  • -
-
-
-
-

Imports

-

Please observe the following order for your imports:

-
java
-[blank line]
-com.google.common
-org.apache.commons
-org.junit
-org.slf4j
-[blank line]
-everything else alphabetically
-
-
-
-
-

Format files for IDEs

-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/dependencies.html b/src/doc/4.0-beta1/development/dependencies.html deleted file mode 100644 index 2973a6973..000000000 --- a/src/doc/4.0-beta1/development/dependencies.html +++ /dev/null @@ -1,155 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Dependency Management" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Dependency Management

-

Managing libraries for Cassandra is a bit less straight forward compared to other projects, as the build process is based on ant, maven and manually managed jars. Make sure to follow the steps below carefully and pay attention to any emerging issues in the Jenkins CI Environment and reported related issues on Jira/ML, in case of any project dependency changes.

-

As Cassandra is an Apache product, all included libraries must follow Apache’s software license requirements.

-
-

Required steps to add or update libraries

-
    -
  • Add or replace jar file in lib directory
  • -
  • Add or update lib/license files
  • -
  • Update dependencies in build.xml
      -
    • Add to parent-pom with correct version
    • -
    • Add to all-pom if simple Cassandra dependency (see below)
    • -
    -
  • -
-
-
-

POM file types

-
    -
  • parent-pom - contains all dependencies with the respective version. All other poms will refer to the artifacts with specified versions listed here.
  • -
  • build-deps-pom(-sources) + coverage-deps-pom - used by ant build compile target. Listed dependenices will be resolved and copied to build/lib/{jar,sources} by executing the maven-ant-tasks-retrieve-build target. This should contain libraries that are required for build tools (grammar, docs, instrumentation), but are not shipped as part of the Cassandra distribution.
  • -
  • test-deps-pom - refered by maven-ant-tasks-retrieve-test to retrieve and save dependencies to build/test/lib. Exclusively used during JUnit test execution.
  • -
  • all-pom - pom for cassandra-all.jar that can be installed or deployed to public maven repos via ant publish
  • -
-
-
-

Troubleshooting and conflict resolution

-

Here are some useful commands that may help you out resolving conflicts.

-
    -
  • ant realclean - gets rid of the build directory, including build artifacts.
  • -
  • mvn dependency:tree -f build/apache-cassandra-*-SNAPSHOT.pom -Dverbose -Dincludes=org.slf4j - shows transitive dependency tree for artifacts, e.g. org.slf4j. In case the command above fails due to a missing parent pom file, try running ant mvn-install.
  • -
  • rm ~/.m2/repository/org/apache/cassandra/apache-cassandra/ - removes cached local Cassandra maven artifacts
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/documentation.html b/src/doc/4.0-beta1/development/documentation.html deleted file mode 100644 index 3d3b10e84..000000000 --- a/src/doc/4.0-beta1/development/documentation.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Working on Documentation" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Working on Documentation

-
-

How Cassandra is documented

-

The official Cassandra documentation lives in the project’s git repository. We use a static site generator, Sphinx, to create pages hosted at cassandra.apache.org. You’ll also find developer centric content about Cassandra internals in our retired wiki (not covered by this guide).

-

Using a static site generator often requires to use a markup language instead of visual editors (which some people would call good news). Sphinx, the tool-set we use to generate our documentation, uses reStructuredText for that. Markup languages allow you to format text by making use of certain syntax elements. Your document structure will also have to follow specific conventions. Feel free to take a look at existing documents to get a better idea how we use reStructuredText to write our documents.

-

So how do you actually start making contributions?

-
-
-

GitHub based work flow

-

Recommended for shorter documents and minor changes on existing content (e.g. fixing typos or updating descriptions)

-

Follow these steps to contribute using GitHub. It’s assumed that you’re logged in with an existing account.

-
    -
  1. Fork the GitHub mirror of the Cassandra repository
  2. -
-../_images/docs_fork.png -
    -
  1. Create a new branch that you can use to make your edits. It’s recommended to have a separate branch for each of your working projects. It will also make it easier to create a pull request later to when you decide you’re ready to contribute your work.
  2. -
-../_images/docs_create_branch.png -
    -
  1. Navigate to document sources doc/source to find the .rst file to edit. The URL of the document should correspond to the directory structure. New files can be created using the “Create new file” button:
  2. -
-../_images/docs_create_file.png -
    -
  1. At this point you should be able to edit the file using the GitHub web editor. Start by naming your file and add some content. Have a look at other existing .rst files to get a better idea what format elements to use.
  2. -
-../_images/docs_editor.png -

Make sure to preview added content before committing any changes.

-../_images/docs_preview.png -
    -
  1. Commit your work when you’re done. Make sure to add a short description of all your edits since the last time you committed before.
  2. -
-../_images/docs_commit.png -
    -
  1. Finally if you decide that you’re done working on your branch, it’s time to create a pull request!
  2. -
-../_images/docs_pr.png -

Afterwards the GitHub Cassandra mirror will list your pull request and you’re done. Congratulations! Please give us some time to look at your suggested changes before we get back to you.

-
-
-

Jira based work flow

-

Recommended for major changes

-

Significant changes to the documentation are best managed through our Jira issue tracker. Please follow the same contribution guides as for regular code contributions. Creating high quality content takes a lot of effort. It’s therefor always a good idea to create a ticket before you start and explain what you’re planing to do. This will create the opportunity for other contributors and committers to comment on your ideas and work so far. Eventually your patch gets a formal review before it is committed.

-
-
-

Working on documents locally using Sphinx

-

Recommended for advanced editing

-

Using the GitHub web interface should allow you to use most common layout elements including images. More advanced formatting options and navigation elements depend on Sphinx to render correctly. Therefor it’s a good idea to setup Sphinx locally for any serious editing. Please follow the instructions in the Cassandra source directory at doc/README.md. Setup is very easy (at least on OSX and Linux).

-
-
-

Notes for committers

-

Please feel free to get involved and merge pull requests created on the GitHub mirror if you’re a committer. As this is a read-only repository, you won’t be able to merge a PR directly on GitHub. You’ll have to commit the changes against the Apache repository with a comment that will close the PR when the committ syncs with GitHub.

-

You may use a git work flow like this:

-
git remote add github https://github.com/apache/cassandra.git
-git fetch github pull/<PR-ID>/head:<PR-ID>
-git checkout <PR-ID>
-
-
-

Now either rebase or squash the commit, e.g. for squashing:

-
git reset --soft origin/trunk
-git commit --author <PR Author>
-
-
-

Make sure to add a proper commit message including a “Closes #<PR-ID>” text to automatically close the PR.

-
-

Publishing

-

Details for building and publishing of the site at cassandra.apache.org can be found here.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/gettingstarted.html b/src/doc/4.0-beta1/development/gettingstarted.html deleted file mode 100644 index 815ad9538..000000000 --- a/src/doc/4.0-beta1/development/gettingstarted.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Getting Started" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Getting Started

-
-

Initial Contributions

-
-
Writing a new feature is just one way to contribute to the Cassandra project. In fact, making sure that supporting tasks, such as QA, documentation and helping users, keep up with the development of new features is an ongoing challenge for the project (and most open source projects). So, before firing up your IDE to create that new feature, we’d suggest you consider some of the following activities as a way of introducing yourself to the project and getting to know how things work.
-
    -
  • Add to or update the documentation
  • -
  • Answer questions on the user list
  • -
  • Review and test a submitted patch
  • -
  • Investigate and fix a reported bug
  • -
  • Create unit tests and d-tests
  • -
-
-
-
-
-

Updating documentation

-

The Cassandra documentation is maintained in the Cassandra source repository along with the Cassandra code base. To submit changes to the documentation, follow the standard process for submitting a patch (Contributing Code Changes).

-
-
-

Answering questions on the user list

-

Subscribe to the user list, look out for some questions you know the answer to and reply with an answer. Simple as that! -See the community page for details on how to subscribe to the mailing list.

-
-
-

Reviewing and testing a submitted patch

-

Reviewing patches is not the sole domain of committers, if others have reviewed a patch it can reduce the load on the committers allowing them to write more great features or review more patches. Follow the instructions in _development_how_to_review or create a build with the patch and test it with your own workload. Add a comment to the JIRA ticket to let others know what you have done and the results of your work. (For example, “I tested this performance enhacement on our application’s standard production load test and found a 3% improvement.”)

-
-
-

Investigate and/or fix a reported bug

-

Often, the hardest work in fixing a bug is reproducing it. Even if you don’t have the knowledge to produce a fix, figuring out a way to reliable reproduce an issues can be a massive contribution to getting a bug fixed. Document your method of reproduction in a JIRA comment or, better yet, produce an automated test that reproduces the issue and attach it to the ticket. If you go as far as producing a fix, follow the process for submitting a patch (Contributing Code Changes).

-
-
-

Create unit tests and Dtests

-

Test coverage in Cassandra is improving but, as with most code bases, it could benefit from more automated test coverage. Before starting work in an area, consider reviewing and enhancing the existing test coverage. This will both improve your knowledge of the code before you start on an enhancement and reduce the chances of your change in introducing new issues. See Testing and Contributing Code Changes for more detail.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/how_to_commit.html b/src/doc/4.0-beta1/development/how_to_commit.html deleted file mode 100644 index 23deba219..000000000 --- a/src/doc/4.0-beta1/development/how_to_commit.html +++ /dev/null @@ -1,187 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "How-to Commit" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

How-to Commit

-

If you are a committer, feel free to pick any process that works for you - so long as you are planning to commit the work yourself.

-

Here is how committing and merging will usually look for merging and pushing for tickets that follow the convention (if patch-based):

-

Hypothetical CASSANDRA-12345 ticket is a cassandra-3.0 based bug fix that requires different code for cassandra-3.3, and trunk. Contributor Jackie supplied a patch for the root branch (12345-3.0.patch), and patches for the remaining branches (12345-3.3.patch, 12345-trunk.patch).

-
-
On cassandra-3.0:
-
    -
  1. git am -3 12345-3.0.patch (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git apply -3 12345-3.3.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git apply -3 12345-trunk.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  4. -
  5. git commit -amend
  6. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-

Same scenario, but a branch-based contribution:

-
-
On cassandra-3.0:
-
    -
  1. git cherry-pick <sha-of-3.0-commit> (if we have a problem b/c of CHANGES.txt not merging anymore, we fix it ourselves, in place)
  2. -
-
-
On cassandra-3.3:
-
    -
  1. git merge cassandra-3.0 -s ours
  2. -
  3. git format-patch -1 <sha-of-3.3-commit>
  4. -
  5. git apply -3 <sha-of-3.3-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On trunk:
-
    -
  1. git merge cassandra-3.3 -s ours
  2. -
  3. git format-patch -1 <sha-of-trunk-commit>
  4. -
  5. git apply -3 <sha-of-trunk-commit>.patch (likely to have an issue with CHANGES.txt here: fix it ourselves, then git add CHANGES.txt)
  6. -
  7. git commit -amend
  8. -
-
-
On any branch:
-
    -
  1. git push origin cassandra-3.0 cassandra-3.3 trunk -atomic
  2. -
-
-
-
-

Tip

-

Notes on git flags: --3 flag to am and apply will instruct git to perform a 3-way merge for you. If a conflict is detected, you can either resolve it manually or invoke git mergetool - for both am and apply.

-

-atomic flag to git push does the obvious thing: pushes all or nothing. Without the flag, the command is equivalent to running git push once per each branch. This is nifty in case a race condition happens - you won’t push half the branches, blocking other committers’ progress while you are resolving the issue.

-
-
-

Tip

-

The fastest way to get a patch from someone’s commit in a branch on GH - if you don’t have their repo in remotes - is to append .patch to the commit url, e.g. -curl -O https://github.com/apache/cassandra/commit/7374e9b5ab08c1f1e612bf72293ea14c959b0c3c.patch

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/how_to_review.html b/src/doc/4.0-beta1/development/how_to_review.html deleted file mode 100644 index b71fd7527..000000000 --- a/src/doc/4.0-beta1/development/how_to_review.html +++ /dev/null @@ -1,179 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Review Checklist" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Review Checklist

-

When reviewing tickets in Apache JIRA, the following items should be covered as part of the review process:

-

General

-
-
    -
  • Does it conform to the Code Style guidelines?
  • -
  • Is there any redundant or duplicate code?
  • -
  • Is the code as modular as possible?
  • -
  • Can any singletons be avoided?
  • -
  • Can any of the code be replaced with library functions?
  • -
  • Are units of measurement used in the code consistent, both internally and with the rest of the ecosystem?
  • -
-
-

Error-Handling

-
-
    -
  • Are all data inputs and outputs checked (for the correct type, length, format, and range) and encoded?
  • -
  • Where third-party utilities are used, are returning errors being caught?
  • -
  • Are invalid parameter values handled?
  • -
  • Are any Throwable/Exceptions passed to the JVMStabilityInspector?
  • -
  • Are errors well-documented? Does the error message tell the user how to proceed?
  • -
  • Do exceptions propagate to the appropriate level in the code?
  • -
-
-

Documentation

-
-
    -
  • Do comments exist and describe the intent of the code (the “why”, not the “how”)?
  • -
  • Are javadocs added where appropriate?
  • -
  • Is any unusual behavior or edge-case handling described?
  • -
  • Are data structures and units of measurement explained?
  • -
  • Is there any incomplete code? If so, should it be removed or flagged with a suitable marker like ‘TODO’?
  • -
  • Does the code self-document via clear naming, abstractions, and flow control?
  • -
  • Have NEWS.txt, the cql3 docs, and the native protocol spec been updated if needed?
  • -
  • Is the ticket tagged with “client-impacting” and “doc-impacting”, where appropriate?
  • -
  • Has lib/licences been updated for third-party libs? Are they Apache License compatible?
  • -
  • Is the Component on the JIRA ticket set appropriately?
  • -
-
-

Testing

-
-
    -
  • Is the code testable? i.e. don’t add too many or hide dependencies, unable to initialize objects, test frameworks can use methods etc.
  • -
  • Do tests exist and are they comprehensive?
  • -
  • Do unit tests actually test that the code is performing the intended functionality?
  • -
  • Could any test code use common functionality (e.g. ccm, dtest, or CqlTester methods) or abstract it there for reuse?
  • -
  • If the code may be affected by multi-node clusters, are there dtests?
  • -
  • If the code may take a long time to test properly, are there CVH tests?
  • -
  • Is the test passing on CI for all affected branches (up to trunk, if applicable)? Are there any regressions?
  • -
  • If patch affects read/write path, did we test for performance regressions w/multiple workloads?
  • -
  • If adding a new feature, were tests added and performed confirming it meets the expected SLA/use-case requirements for the feature?
  • -
-
-

Logging

-
-
    -
  • Are logging statements logged at the correct level?
  • -
  • Are there logs in the critical path that could affect performance?
  • -
  • Is there any log that could be added to communicate status or troubleshoot potential problems in this feature?
  • -
  • Can any unnecessary logging statement be removed?
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/ide.html b/src/doc/4.0-beta1/development/ide.html deleted file mode 100644 index e57f259ac..000000000 --- a/src/doc/4.0-beta1/development/ide.html +++ /dev/null @@ -1,268 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Building and IDE Integration" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Building and IDE Integration

-
-

Building From Source

-

Getting started with Cassandra and IntelliJ IDEA or Eclipse is simple, once you manage to build Cassandra from source using Java 8, Git and Ant.

-

The source code for Cassandra is shared through the central Apache Git repository and organized by different branches. You can access the code for the current development branch through git as follows:

-
git clone https://gitbox.apache.org/repos/asf/cassandra.git cassandra-trunk
-
-
-

Other branches will point to different versions of Cassandra. Switching to a different branch requires checking out the branch by its name:

-
git checkout cassandra-3.0
-
-
-

You can get a list of available branches with git branch.

-

Finally build Cassandra using ant:

-
ant
-
-
-

This may take a significant amount of time depending on whether artifacts have to be downloaded and the number of classes that need to be compiled.

-
-

Hint

-

You can setup multiple working trees for different Cassandra versions from the same repository using git-worktree.

-
-
-

-
-
-
-

Setting up Cassandra in IntelliJ IDEA

-

IntelliJ IDEA by JetBrains is one of the most popular IDEs for Cassandra and Java development in general. The Community Edition is provided as a free download with all features needed to get started developing Cassandra.

-
-

Setup Cassandra as a Project (C* 2.1 and newer)

-

Since 2.1.5, there is a new ant target: generate-idea-files. Please see our wiki for instructions for older Cassandra versions.

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Once Cassandra is built, generate the IDEA files using ant:
  2. -
-
ant generate-idea-files
-
-
-
    -
  1. Start IDEA
  2. -
  3. Open the IDEA project from the checked out Cassandra directory using the menu item Open in IDEA’s File menu
  4. -
-

The project generated by the ant task generate-idea-files contains nearly everything you need to debug Cassandra and execute unit tests.

-
-
    -
  • Run/debug defaults for JUnit
  • -
  • Run/debug configuration for Cassandra daemon
  • -
  • License header for Java source files
  • -
  • Cassandra code style
  • -
  • Inspections
  • -
-
-
-

-
-
-
-
-

Opening Cassandra in Apache NetBeans

-

Apache NetBeans is the elder of the open sourced java IDEs, and can be used for Cassandra development. There is no project setup or generation required to open Cassandra in NetBeans.

-
-

Open Cassandra as a Project (C* 4.0 and newer)

-

Please clone and build Cassandra as described above and execute the following steps:

-
    -
  1. Start Apache NetBeans
  2. -
  3. Open the NetBeans project from the ide/ folder of the checked out Cassandra directory using the menu item “Open Project…” in NetBeans’ File menu
  4. -
-

The project opened supports building, running, debugging, and profiling Cassandra from within the IDE. These actions delegate to the ant build.xml script.

-
-
    -
  • Build/Run/Debug Project is available via the Run/Debug menus, or the project context menu.
  • -
  • Profile Project is available via the Profile menu. In the opened Profiler tab, click the green “Profile” button.
  • -
  • Cassandra’s code style is honored in ide/nbproject/project.properties
  • -
-
-

The JAVA8_HOME system variable must be set in the environment that NetBeans starts in for the Run/Debug/Profile ant targets to execute.

-
-

-
-
-
-
-

Setting up Cassandra in Eclipse

-

Eclipse is a popular open source IDE that can be used for Cassandra development. Various Eclipse environments are available from the download page. The following guide was created with “Eclipse IDE for Java Developers”.

-

These instructions were tested on Ubuntu 16.04 with Eclipse Neon (4.6) using Cassandra 2.1, 2.2 and 3.x.

-
-

Project Settings

-

It is important that you generate the Eclipse files with Ant before trying to set up the Eclipse project.

-
-
    -
  • Clone and build Cassandra as described above.
  • -
  • Run ant generate-eclipse-files to create the Eclipse settings.
  • -
  • Start Eclipse.
  • -
  • Select File->Import->Existing Projects into Workspace->Select git directory.
  • -
  • Make sure “cassandra-trunk” is recognized and selected as a project (assuming you checked the code out into the folder cassandra-trunk as described above).
  • -
  • Confirm “Finish” to have your project imported.
  • -
-
-

You should now be able to find the project as part of the “Package Explorer” or “Project Explorer” without having Eclipse complain about any errors after building the project automatically.

-
-
-

Unit Tests

-

Unit tests can be run from Eclipse by simply right-clicking the class file or method and selecting Run As->JUnit Test. Tests can be debugged this way as well by defining breakpoints (double-click line number) and selecting Debug As->JUnit Test.

-

Alternatively all unit tests can be run from the command line as described in Testing

-
-
-

Debugging Cassandra Using Eclipse

-

There are two ways how to start and debug a local Cassandra instance with Eclipse. You can either start Cassandra just as you normally would by using the ./bin/cassandra script and connect to the JVM through remotely from Eclipse or start Cassandra from Eclipse right away.

-
-

Starting Cassandra From Command Line

-
-
    -
  • Set environment variable to define remote debugging options for the JVM: -export JVM_EXTRA_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=1414"
  • -
  • Start Cassandra by executing the ./bin/cassandra
  • -
-
-

Afterwards you should be able to connect to the running Cassandra process through the following steps:

-

From the menu, select Run->Debug Configurations..

-../_images/eclipse_debug0.png -

Create new remote application

-../_images/eclipse_debug1.png -

Configure connection settings by specifying a name and port 1414

-../_images/eclipse_debug2.png -

Afterwards confirm “Debug” to connect to the JVM and start debugging Cassandra!

-
-
-

Starting Cassandra From Eclipse

-

Cassandra can also be started directly from Eclipse if you don’t want to use the command line.

-

From the menu, select Run->Run Configurations..

-../_images/eclipse_debug3.png -

Create new application

-../_images/eclipse_debug4.png -

Specify name, project and main class org.apache.cassandra.service.CassandraDaemon

-../_images/eclipse_debug5.png -

Configure additional JVM specific parameters that will start Cassandra with some of the settings created by the regular startup script. Change heap related values as needed.

-
-Xms1024M -Xmx1024M -Xmn220M -Xss256k -ea -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark -javaagent:./lib/jamm-0.3.0.jar -Djava.net.preferIPv4Stack=true
-
-
-../_images/eclipse_debug6.png -

Now just confirm “Debug” and you should see the output of Cassandra starting up in the Eclipse console and should be able to set breakpoints and start debugging!

-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/index.html b/src/doc/4.0-beta1/development/index.html deleted file mode 100644 index c71b98616..000000000 --- a/src/doc/4.0-beta1/development/index.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Contributing to Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- - -
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/patches.html b/src/doc/4.0-beta1/development/patches.html deleted file mode 100644 index bb0134576..000000000 --- a/src/doc/4.0-beta1/development/patches.html +++ /dev/null @@ -1,274 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Contributing Code Changes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Contributing Code Changes

-
-

Choosing What to Work on

-

Submitted patches can include bug fixes, changes to the Java code base, improvements for tooling (both Java or Python), documentation, testing or any other changes that requires changing the code base. Although the process of contributing code is always the same, the amount of work and time it takes to get a patch accepted also depends on the kind of issue you’re addressing.

-
-
As a general rule of thumb:
-
    -
  • Major new features and significant changes to the code based will likely not going to be accepted without deeper discussion within the developer community
  • -
  • Bug fixes take higher priority compared to features
  • -
  • The extend to which tests are required depend on how likely your changes will effect the stability of Cassandra in production. Tooling changes requires fewer tests than storage engine changes.
  • -
  • Less complex patches will be faster to review: consider breaking up an issue into individual tasks and contributions that can be reviewed separately
  • -
-
-
-
-

Hint

-

Not sure what to work? Just pick an issue marked as Low Hanging Fruit Complexity in JIRA, which we use to flag issues that could turn out to be good starter tasks for beginners.

-
-
-
-

Before You Start Coding

-

Although contributions are highly appreciated, we do not guarantee that each contribution will become a part of Cassandra. Therefore it’s generally a good idea to first get some feedback on the things you plan to work on, especially about any new features or major changes to the code base. You can reach out to other developers on the mailing list or Slack.

-
-
You should also
-
    -
  • Avoid redundant work by searching for already reported issues in JIRA
  • -
  • Create a new issue early in the process describing what you’re working on - not just after finishing your patch
  • -
  • Link related JIRA issues with your own ticket to provide a better context
  • -
  • Update your ticket from time to time by giving feedback on your progress and link a GitHub WIP branch with your current code
  • -
  • Ping people who you actively like to ask for advice on JIRA by mentioning users
  • -
-
-
There are also some fixed rules that you need to be aware:
-
    -
  • Patches will only be applied to branches by following the release model
  • -
  • Code must be testable
  • -
  • Code must follow the Code Style convention
  • -
  • Changes must not break compatibility between different Cassandra versions
  • -
  • Contributions must be covered by the Apache License
  • -
-
-
-
-

Choosing the Right Branches to Work on

-

There are currently multiple Cassandra versions maintained in individual branches:

- ---- - - - - - - - - - - - - - - - - - - - - - - -
VersionPolicy
4.0Code freeze (see below)
3.11Critical bug fixes only
3.0Critical bug fixes only
2.2Critical bug fixes only
2.1Critical bug fixes only
-

Corresponding branches in git are easy to recognize as they are named cassandra-<release> (e.g. cassandra-3.0). The trunk branch is an exception, as it contains the most recent commits from all other branches and is used for creating new branches for future tick-tock releases.

-
-

4.0 Code Freeze

-

Patches for new features are currently not accepted for 4.0 or any earlier versions. Starting with the code freeze in September, all efforts should focus on stabilizing the 4.0 branch before the first official release. During that time, only the following patches will be considered for acceptance:

-
-
    -
  • Bug fixes
  • -
  • Measurable performance improvements
  • -
  • Changes not distributed as part of the release such as:
  • -
  • Testing related improvements and fixes
  • -
  • Build and infrastructure related changes
  • -
  • Documentation
  • -
-
-
-
-

Bug Fixes

-

Creating patches for bug fixes is a bit more complicated as this will depend on how many different versions of Cassandra are affected. In each case, the order for merging such changes will be cassandra-2.1 -> cassandra-2.2 -> cassandra-3.0 -> cassandra-3.x -> trunk. But don’t worry, merging from 2.1 would be the worst case for bugs that affect all currently supported versions, which isn’t very common. As a contributor, you’re also not expected to provide a single patch for each version. What you need to do however is:

-
-
    -
  • Be clear about which versions you could verify to be affected by the bug
  • -
  • For 2.x: ask if a bug qualifies to be fixed in this release line, as this may be handled on case by case bases
  • -
  • If possible, create a patch against the lowest version in the branches listed above (e.g. if you found the bug in 3.9 you should try to fix it already in 3.0)
  • -
  • Test if the patch can be merged cleanly across branches in the direction listed above
  • -
  • Be clear which branches may need attention by the committer or even create custom patches for those if you can
  • -
-
-
-
-
-
-

Creating a Patch

-

So you’ve finished coding and the great moment arrives: it’s time to submit your patch!

-
-
    -
  1. Create a branch for your changes if you haven’t done already. Many contributors name their branches based on ticket number and Cassandra version, e.g. git checkout -b 12345-3.0
  2. -
  3. Verify that you follow Cassandra’s Code Style
  4. -
  5. Make sure all tests (including yours) pass using ant as described in Testing. If you suspect a test failure is unrelated to your change, it may be useful to check the test’s status by searching the issue tracker or looking at CI results for the relevant upstream version. Note that the full test suites take many hours to complete, so it is common to only run specific relevant tests locally before uploading a patch. Once a patch has been uploaded, the reviewer or committer can help setup CI jobs to run the full test suites.
  6. -
  7. Consider going through the Review Checklist for your code. This will help you to understand how others will consider your change for inclusion.
  8. -
  9. Don’t make the committer squash commits for you in the root branch either. Multiple commits are fine - and often preferable - during review stage, especially for incremental review, but once +1d, do either:
  10. -
-
-
    -
  1. Attach a patch to JIRA with a single squashed commit in it (per branch), or
  2. -
  3. Squash the commits in-place in your branches into one
  4. -
-
-
    -
  1. Include a CHANGES.txt entry (put it at the top of the list), and format the commit message appropriately in your patch as below. Please note that only user-impacting items should be listed in CHANGES.txt. If you fix a test that does not affect users and does not require changes in runtime code, then no CHANGES.txt entry is necessary.

    -
    <One sentence description, usually Jira title and CHANGES.txt summary>
    -<Optional lengthier description>
    -patch by <Authors>; reviewed by <Reviewers> for CASSANDRA-#####
    -
    -
    -
  2. -
  3. When you’re happy with the result, create a patch:

    -
  4. -
-
-
git add <any new or modified file>
-git commit -m '<message>'
-git format-patch HEAD~1
-mv <patch-file> <ticket-branchname.txt> (e.g. 12345-trunk.txt, 12345-3.0.txt)
-
-
-

Alternatively, many contributors prefer to make their branch available on GitHub. In this case, fork the Cassandra repository on GitHub and push your branch:

-
git push --set-upstream origin 12345-3.0
-
-
-
-
    -
  1. To make life easier for your reviewer/committer, you may want to make sure your patch applies cleanly to later branches and create additional patches/branches for later Cassandra versions to which your original patch does not apply cleanly. That said, this is not critical, and you will receive feedback on your patch regardless.
  2. -
  3. Attach the newly generated patch to the ticket/add a link to your branch and click “Submit Patch” at the top of the ticket. This will move the ticket into “Patch Available” status, indicating that your submission is ready for review.
  4. -
  5. Wait for other developers or committers to review it and hopefully +1 the ticket (see Review Checklist). If your change does not receive a +1, do not be discouraged. If possible, the reviewer will give suggestions to improve your patch or explain why it is not suitable.
  6. -
  7. If the reviewer has given feedback to improve the patch, make the necessary changes and move the ticket into “Patch Available” once again.
  8. -
-
-

Once the review process is complete, you will receive a +1. Wait for a committer to commit it. Do not delete your branches immediately after they’ve been committed - keep them on GitHub for a while. Alternatively, attach a patch to JIRA for historical record. It’s not that uncommon for a committer to mess up a merge. In case of that happening, access to the original code is required, or else you’ll have to redo some of the work.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/release_process.html b/src/doc/4.0-beta1/development/release_process.html deleted file mode 100644 index c9e8fdbb4..000000000 --- a/src/doc/4.0-beta1/development/release_process.html +++ /dev/null @@ -1,362 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Release Process" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Release Process

- -
-

-

-
-

The steps for Release Managers to create, vote and publish releases for Apache Cassandra.

-

While a committer can perform the initial steps of creating and calling a vote on a proposed release, only a PMC member can complete the process of publishing and announcing the release.

-
-

Prerequisites

-
-
Background docs
-
-
-
-

A debian based linux OS is required to run the release steps from. Debian-based distros provide the required RPM, dpkg and repository management tools.

-
-

Create and publish your GPG key

-

To create a GPG key, follow the guidelines. -The key must be 4096 bit RSA. -Include your public key in:

-
https://dist.apache.org/repos/dist/release/cassandra/KEYS
-
-
-

Publish your GPG key in a PGP key server, such as MIT Keyserver.

-
-
-

Bintray account with access to Apache organisation

-

Publishing a successfully voted upon release requires bintray access to the Apache organisation. Please verify that you have a bintray account and the Apache organisation is listed here.

-
-
-
-

Create Release Artifacts

-

Any committer can perform the following steps to create and call a vote on a proposed release.

-

Check that there are no open urgent jira tickets currently being worked on. Also check with the PMC that there’s security vulnerabilities currently being worked on in private.’ -Current project habit is to check the timing for a new release on the dev mailing lists.

-
-

Perform the Release

-

Run the following commands to generate and upload release artifacts, to the ASF nexus staging repository and dev distribution location:

-
cd ~/git
-git clone https://github.com/apache/cassandra-builds.git
-git clone https://github.com/apache/cassandra.git
-
-# Edit the variables at the top of the `prepare_release.sh` file
-edit cassandra-builds/cassandra-release/prepare_release.sh
-
-# Ensure your 4096 RSA key is the default secret key
-edit ~/.gnupg/gpg.conf # update the `default-key` line
-edit ~/.rpmmacros # update the `%gpg_name <key_id>` line
-
-# Ensure DEBFULLNAME and DEBEMAIL is defined and exported, in the debian scripts configuration
-edit ~/.devscripts
-
-# The prepare_release.sh is run from the actual cassandra git checkout,
-# on the branch/commit that we wish to tag for the tentative release along with version number to tag.
-cd cassandra
-git switch cassandra-<version-branch>
-
-# The following cuts the release artifacts (including deb and rpm packages) and deploy to staging environments
-../cassandra-builds/cassandra-release/prepare_release.sh -v <version>
-
-
-

Follow the prompts.

-

If building the deb or rpm packages fail, those steps can be repeated individually using the -d and -r flags, respectively.

-
-
-
-

Call for a Vote

-

Fill out the following email template and send to the dev mailing list:

-
I propose the following artifacts for release as <version>.
-
-sha1: <git-sha>
-
-Git: https://gitbox.apache.org/repos/asf?p=cassandra.git;a=shortlog;h=refs/tags/<version>-tentative
-
-Artifacts: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/org/apache/cassandra/apache-cassandra/<version>/
-
-Staging repository: https://repository.apache.org/content/repositories/orgapachecassandra-<nexus-id>/
-
-The distribution packages are available here: https://dist.apache.org/repos/dist/dev/cassandra/${version}/
-
-The vote will be open for 72 hours (longer if needed).
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>-tentative
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>-tentative
-
-
-
-
-

Post-vote operations

-

Any PMC member can perform the following steps to formalize and publish a successfully voted release.

-
-

Publish Artifacts

-

Run the following commands to publish the voted release artifacts:

-
cd ~/git
-# edit the variables at the top of the `finish_release.sh` file
-edit cassandra-builds/cassandra-release/finish_release.sh
-
-# After cloning cassandra-builds repo, `finish_release.sh` is run from the actual cassandra git checkout,
-# on the tentative release tag that we wish to tag for the final release version number tag.
-cd ~/git/cassandra/
-git checkout <version>-tentative
-../cassandra-builds/cassandra-release/finish_release.sh -v <version>
-
-
-

If successful, take note of the email text output which can be used in the next section “Send Release Announcement”. -The output will also list the next steps that are required.

-
-
-

Promote Nexus Repository

-
    -
  • Login to Nexus repository again.
  • -
  • Click on “Staging” and then on the repository with id “cassandra-staging”.
  • -
  • Find your closed staging repository, right click on it and choose “Promote”.
  • -
  • Select the “Releases” repository and click “Promote”.
  • -
  • Next click on “Repositories”, select the “Releases” repository and validate that your artifacts exist as you expect them.
  • -
-
-
-

Publish the Bintray Uploaded Distribution Packages

-

Log into bintray and publish the uploaded artifacts.

-
-
-

Update and Publish Website

-

See docs for building and publishing the website.

-

Also update the CQL doc if appropriate.

-
-
-

Release version in JIRA

-

Release the JIRA version.

-
    -
  • In JIRA go to the version that you want to release and release it.
  • -
  • Create a new version, if it has not been done before.
  • -
-
-
-

Update to Next Development Version

-

Update the codebase to point to the next development version:

-
cd ~/git/cassandra/
-git checkout cassandra-<version-branch>
-edit build.xml          # update `<property name="base.version" value="…"/> `
-edit debian/changelog   # add entry for new version
-edit CHANGES.txt        # add entry for new version
-git commit -m "Increment version to <next-version>" build.xml debian/changelog CHANGES.txt
-
-# …and forward merge and push per normal procedure
-
-
-
- -
-

Send Release Announcement

-

Fill out the following email template and send to both user and dev mailing lists:

-
The Cassandra team is pleased to announce the release of Apache Cassandra version <version>.
-
-Apache Cassandra is a fully distributed database. It is the right choice
-when you need scalability and high availability without compromising
-performance.
-
- http://cassandra.apache.org/
-
-Downloads of source and binary distributions are listed in our download
-section:
-
- http://cassandra.apache.org/download/
-
-This version is <the first|a bug fix> release[1] on the <version-base> series. As always,
-please pay attention to the release notes[2] and let us know[3] if you
-were to encounter any problem.
-
-Enjoy!
-
-[1]: (CHANGES.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=CHANGES.txt;hb=<version>
-[2]: (NEWS.txt) https://git1-us-west.apache.org/repos/asf?p=cassandra.git;a=blob_plain;f=NEWS.txt;hb=<version>
-[3]: https://issues.apache.org/jira/browse/CASSANDRA
-
-
-
-
-

Update Slack Cassandra topic

-
-
Update topic in cassandra Slack room
-
/topic cassandra.apache.org | Latest releases: 3.11.4, 3.0.18, 2.2.14, 2.1.21 | ask, don’t ask to ask
-
-
-
-

Tweet from @Cassandra

-

Tweet the new release, from the @Cassandra account

-
-
-

Delete Old Releases

-

As described in When to Archive.

-

An example of removing old releases:

-
svn co https://dist.apache.org/repos/dist/release/cassandra/ cassandra-dist
-svn rm <previous_version> debian/pool/main/c/cassandra/<previous_version>*
-svn st
-# check and commit
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/development/testing.html b/src/doc/4.0-beta1/development/testing.html deleted file mode 100644 index a431dddae..000000000 --- a/src/doc/4.0-beta1/development/testing.html +++ /dev/null @@ -1,185 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Contributing to Cassandra" - -doc-title: "Testing" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Testing

-

Creating tests is one of the most important and also most difficult parts of developing Cassandra. There are different ways to test your code depending on what you’re working on.

-
-

Unit Testing

-

The most simple way to test code in Cassandra is probably by writing a unit test. Cassandra uses JUnit as a testing framework and test cases can be found in the test/unit directory. Ideally you’d be able to create a unit test for your implementation that would exclusively cover the class you created (the unit under test). Unfortunately this is not always possible and Cassandra doesn’t have a very mock friendly code base. Often you’ll find yourself in a situation where you have to make use of an embedded Cassandra instance that you’ll be able to interact with in your test. If you want to make use of CQL in your test, you can simply extend CQLTester and use some of the convenient helper methods such as in the following example.

-
@Test
-public void testBatchAndList() throws Throwable
-{
-   createTable("CREATE TABLE %s (k int PRIMARY KEY, l list<int>)");
-   execute("BEGIN BATCH " +
-           "UPDATE %1$s SET l = l +[ 1 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 2 ] WHERE k = 0; " +
-           "UPDATE %1$s SET l = l + [ 3 ] WHERE k = 0; " +
-           "APPLY BATCH");
-
-   assertRows(execute("SELECT l FROM %s WHERE k = 0"),
-              row(list(1, 2, 3)));
-}
-
-
-

Unit tests can be run from the command line using the ant test command, ant test -Dtest.name=<simple_classname> to execute a test suite or ant testsome -Dtest.name=<FQCN> -Dtest.methods=<testmethod1>[,testmethod2] for individual tests. For example, to run all test methods in the org.apache.cassandra.cql3.SimpleQueryTest class, you would run:

-
ant test -Dtest.name=SimpleQueryTest
-
-
-

To run only the testStaticCompactTables() test method from that class, you would run:

-
ant testsome -Dtest.name=org.apache.cassandra.cql3.SimpleQueryTest -Dtest.methods=testStaticCompactTables
-
-
-

If you see an error like this:

-
Throws: cassandra-trunk/build.xml:1134: taskdef A class needed by class org.krummas.junit.JStackJUnitTask cannot be found:
-org/apache/tools/ant/taskdefs/optional/junit/JUnitTask  using the classloader
-AntClassLoader[/.../cassandra-trunk/lib/jstackjunit-0.0.1.jar]
-
-
-

You will need to install the ant-optional package since it contains the JUnitTask class.

-
-

Long running tests

-

Test that consume a significant amount of time during execution can be found in the test/long directory and executed as a regular JUnit test or standalone program. Except for the execution time, there’s nothing really special about them. However, ant will execute tests under test/long only when using the ant long-test target.

-
-
-
-

DTests

-

One way of doing integration or system testing at larger scale is by using dtest, which stands for “Cassandra Distributed Tests”. The idea is to automatically setup Cassandra clusters using various configurations and simulate certain use cases you want to test. This is done using Python scripts and ccmlib from the ccm project. Dtests will setup clusters using this library just as you do running ad-hoc ccm commands on your local machine. Afterwards dtests will use the Python driver to interact with the nodes, manipulate the file system, analyze logs or mess with individual nodes.

-

Using dtests helps us to prevent regression bugs by continually executing tests on the CI server against new patches. Committers will be able to set up build branches there and your reviewer may use the CI environment to run tests for your patch. Read more on the motivation behind continuous integration here.

-

The best way to learn how to write dtests is probably by reading the introduction “How to Write a Dtest” and by looking at existing, recently updated tests in the project. New tests must follow certain style conventions that are being checked before accepting contributions. In contrast to Cassandra, dtest issues and pull-requests are managed on github, therefor you should make sure to link any created dtests in your Cassandra ticket and also refer to the ticket number in your dtest PR.

-

Creating a good dtest can be tough, but it should not prevent you from submitting patches! Please ask in the corresponding JIRA ticket how to write a good dtest for the patch. In most cases a reviewer or committer will able to support you, and in some cases they may offer to write a dtest for you.

-
-
-

Performance Testing

-

Performance tests for Cassandra are a special breed of tests that are not part of the usual patch contribution process. In fact you can contribute tons of patches to Cassandra without ever running performance tests. They are important however when working on performance improvements, as such improvements must be measurable.

-
-

Cassandra Stress Tool

-

See Cassandra Stress

-
-
-

cstar_perf

-

Another tool available on github is cstar_perf that can be used for intensive performance testing in large clusters or locally. Please refer to the project page on how to set it up and how to use it.

-
-
-

CircleCI

-

Cassandra ships with a default CircleCI configuration, to enable running tests on your branches, you need to go the CircleCI website, click “Login” and log in with your github account. Then you need to give CircleCI permission to watch your repositories. Once you have done that, you can optionally configure CircleCI to run tests in parallel - click “Projects”, then your github account and then click the settings for the project. If you leave the parallelism at 1 for Cassandra, only ant eclipse-warnings and ant test will be run. If you up the parallelism to 4, it also runs ant long-test, ant test-compression and ant stress-test

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/faq/index.html b/src/doc/4.0-beta1/faq/index.html deleted file mode 100644 index 2cab52f60..000000000 --- a/src/doc/4.0-beta1/faq/index.html +++ /dev/null @@ -1,318 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Frequently Asked Questions" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
-
-
- -
-
-
-
-
- -
-

Frequently Asked Questions

- -
-

Why can’t I set listen_address to listen on 0.0.0.0 (all my addresses)?

-

Cassandra is a gossip-based distributed system and listen_address is the address a node tells other nodes to reach -it at. Telling other nodes “contact me on any of my addresses” is a bad idea; if different nodes in the cluster pick -different addresses for you, Bad Things happen.

-

If you don’t want to manually specify an IP to listen_address for each node in your cluster (understandable!), leave -it blank and Cassandra will use InetAddress.getLocalHost() to pick an address. Then it’s up to you or your ops team -to make things resolve correctly (/etc/hosts/, dns, etc).

-

One exception to this process is JMX, which by default binds to 0.0.0.0 (Java bug 6425769).

-

See CASSANDRA-256 and CASSANDRA-43 for more gory details.

-
-
-

What ports does Cassandra use?

-

By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, -and 7199 for JMX. The internode communication and native protocol ports -are configurable in the Cassandra Configuration File. The JMX port is configurable in cassandra-env.sh (through JVM -options). All ports are TCP.

-
-
-

What happens to existing data in my cluster when I add new nodes?

-

When a new nodes joins a cluster, it will automatically contact the other nodes in the cluster and copy the right data -to itself. See Adding, replacing, moving and removing nodes.

-
-
-

I delete data from Cassandra, but disk usage stays the same. What gives?

-

Data you write to Cassandra gets persisted to SSTables. Since SSTables are immutable, the data can’t actually be removed -when you perform a delete, instead, a marker (also called a “tombstone”) is written to indicate the value’s new status. -Never fear though, on the first compaction that occurs between the data and the tombstone, the data will be expunged -completely and the corresponding disk space recovered. See Compaction for more detail.

-
-
-

Why does nodetool ring only show one entry, even though my nodes logged that they see each other joining the ring?

-

This happens when you have the same token assigned to each node. Don’t do that.

-

Most often this bites people who deploy by installing Cassandra on a VM (especially when using the Debian package, which -auto-starts Cassandra after installation, thus generating and saving a token), then cloning that VM to other nodes.

-

The easiest fix is to wipe the data and commitlog directories, thus making sure that each node will generate a random -token on the next restart.

-
-
-

Can I change the replication factor (a a keyspace) on a live cluster?

-

Yes, but it will require running a full repair (or cleanup) to change the replica count of existing data:

-
    -
  • Alter the replication factor for desired keyspace (using cqlsh for instance).
  • -
  • If you’re reducing the replication factor, run nodetool cleanup on the cluster to remove surplus replicated data. -Cleanup runs on a per-node basis.
  • -
  • If you’re increasing the replication factor, run nodetool repair -full to ensure data is replicated according to the new -configuration. Repair runs on a per-replica set basis. This is an intensive process that may result in adverse cluster -performance. It’s highly recommended to do rolling repairs, as an attempt to repair the entire cluster at once will -most likely swamp it. Note that you will need to run a full repair (-full) to make sure that already repaired -sstables are not skipped.
  • -
-
-
-

Can I Store (large) BLOBs in Cassandra?

-

Cassandra isn’t optimized for large file or BLOB storage and a single blob value is always read and send to the -client entirely. As such, storing small blobs (less than single digit MB) should not be a problem, but it is advised to -manually split large blobs into smaller chunks.

-

Please note in particular that by default, any value greater than 16MB will be rejected by Cassandra due the -max_mutation_size_in_kb configuration of the Cassandra Configuration File file (which default to half of -commitlog_segment_size_in_mb, which itself default to 32MB).

-
-
-

Nodetool says “Connection refused to host: 127.0.1.1” for any remote host. What gives?

-

Nodetool relies on JMX, which in turn relies on RMI, which in turn sets up its own listeners and connectors as needed on -each end of the exchange. Normally all of this happens behind the scenes transparently, but incorrect name resolution -for either the host connecting, or the one being connected to, can result in crossed wires and confusing exceptions.

-

If you are not using DNS, then make sure that your /etc/hosts files are accurate on both ends. If that fails, try -setting the -Djava.rmi.server.hostname=<public name> JVM option near the bottom of cassandra-env.sh to an -interface that you can reach from the remote machine.

-
-
-

Will batching my operations speed up my bulk load?

-

No. Using batches to load data will generally just add “spikes” of latency. Use asynchronous INSERTs instead, or use -true Bulk Loading.

-

An exception is batching updates to a single partition, which can be a Good Thing (as long as the size of a single batch -stay reasonable). But never ever blindly batch everything!

-
-
-

On RHEL nodes are unable to join the ring

-

Check if SELinux is on; if it is, turn it off.

-
-
-

How do I unsubscribe from the email list?

-

Send an email to user-unsubscribe@cassandra.apache.org.

-
-
-

Why does top report that Cassandra is using a lot more memory than the Java heap max?

-

Cassandra uses Memory Mapped Files (mmap) internally. That is, we -use the operating system’s virtual memory system to map a number of on-disk files into the Cassandra process’ address -space. This will “use” virtual memory; i.e. address space, and will be reported by tools like top accordingly, but on 64 -bit systems virtual address space is effectively unlimited so you should not worry about that.

-

What matters from the perspective of “memory use” in the sense as it is normally meant, is the amount of data allocated -on brk() or mmap’d /dev/zero, which represent real memory used. The key issue is that for a mmap’d file, there is never -a need to retain the data resident in physical memory. Thus, whatever you do keep resident in physical memory is -essentially just there as a cache, in the same way as normal I/O will cause the kernel page cache to retain data that -you read/write.

-

The difference between normal I/O and mmap() is that in the mmap() case the memory is actually mapped to the process, -thus affecting the virtual size as reported by top. The main argument for using mmap() instead of standard I/O is the -fact that reading entails just touching memory - in the case of the memory being resident, you just read it - you don’t -even take a page fault (so no overhead in entering the kernel and doing a semi-context switch). This is covered in more -detail here.

-
-
-

What are seeds?

-

Seeds are used during startup to discover the cluster.

-

If you configure your nodes to refer some node as seed, nodes in your ring tend to send Gossip message to seeds more -often (also see the section on gossip) than to non-seeds. In other words, seeds are worked as hubs of -Gossip network. With seeds, each node can detect status changes of other nodes quickly.

-

Seeds are also referred by new nodes on bootstrap to learn other nodes in ring. When you add a new node to ring, you -need to specify at least one live seed to contact. Once a node join the ring, it learns about the other nodes, so it -doesn’t need seed on subsequent boot.

-

You can make a seed a node at any time. There is nothing special about seed nodes. If you list the node in seed list it -is a seed

-

Seeds do not auto bootstrap (i.e. if a node has itself in its seed list it will not automatically transfer data to itself) -If you want a node to do that, bootstrap it first and then add it to seeds later. If you have no data (new install) you -do not have to worry about bootstrap at all.

-

Recommended usage of seeds:

-
    -
  • pick two (or more) nodes per data center as seed nodes.
  • -
  • sync the seed list to all your nodes
  • -
-
-
-

Does single seed mean single point of failure?

-

The ring can operate or boot without a seed; however, you will not be able to add new nodes to the cluster. It is -recommended to configure multiple seeds in production system.

-
-
-

Why can’t I call jmx method X on jconsole?

-

Some of JMX operations use array argument and as jconsole doesn’t support array argument, those operations can’t be -called with jconsole (the buttons are inactive for them). You need to write a JMX client to call such operations or need -array-capable JMX monitoring tool.

-
-
-

Why do I see “… messages dropped …” in the logs?

-

This is a symptom of load shedding – Cassandra defending itself against more requests than it can handle.

-

Internode messages which are received by a node, but do not get not to be processed within their proper timeout (see -read_request_timeout, write_request_timeout, … in the Cassandra Configuration File), are dropped rather than -processed (since the as the coordinator node will no longer be waiting for a response).

-

For writes, this means that the mutation was not applied to all replicas it was sent to. The inconsistency will be -repaired by read repair, hints or a manual repair. The write operation may also have timeouted as a result.

-

For reads, this means a read request may not have completed.

-

Load shedding is part of the Cassandra architecture, if this is a persistent issue it is generally a sign of an -overloaded node or cluster.

-
-
-

Cassandra dies with java.lang.OutOfMemoryError: Map failed

-

If Cassandra is dying specifically with the “Map failed” message, it means the OS is denying java the ability to -lock more memory. In linux, this typically means memlock is limited. Check /proc/<pid of cassandra>/limits to verify -this and raise it (eg, via ulimit in bash). You may also need to increase vm.max_map_count. Note that the debian -package handles this for you automatically.

-
-
-

What happens if two updates are made with the same timestamp?

-

Updates must be commutative, since they may arrive in different orders on different replicas. As long as Cassandra has a -deterministic way to pick the winner (in a timestamp tie), the one selected is as valid as any other, and the specifics -should be treated as an implementation detail. That said, in the case of a timestamp tie, Cassandra follows two rules: -first, deletes take precedence over inserts/updates. Second, if there are two updates, the one with the lexically larger -value is selected.

-
-
-

Why bootstrapping a new node fails with a “Stream failed” error?

-

Two main possibilities:

-
    -
  1. the GC may be creating long pauses disrupting the streaming process
  2. -
  3. compactions happening in the background hold streaming long enough that the TCP connection fails
  4. -
-

In the first case, regular GC tuning advices apply. In the second case, you need to set TCP keepalive to a lower value -(default is very high on Linux). Try to just run the following:

-
$ sudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5
-
-
-

To make those settings permanent, add them to your /etc/sysctl.conf file.

-

Note: GCE’s firewall will always interrupt TCP connections that are inactive for -more than 10 min. Running the above command is highly recommended in that environment.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/genindex.html b/src/doc/4.0-beta1/genindex.html deleted file mode 100644 index c62b3a7d1..000000000 --- a/src/doc/4.0-beta1/genindex.html +++ /dev/null @@ -1,95 +0,0 @@ - ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Index" -doc-header-links: ' - -' -doc-search-path: "search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/configuring.html b/src/doc/4.0-beta1/getting_started/configuring.html deleted file mode 100644 index 8b94a51aa..000000000 --- a/src/doc/4.0-beta1/getting_started/configuring.html +++ /dev/null @@ -1,161 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Configuring Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Configuring Cassandra

-

For running Cassandra on a single node, the default configuration file present at ./conf/cassandra.yaml is enough, -you shouldn’t need to change any configuration. However, when you deploy a cluster of nodes, or use clients that -are not on the same host, then there are some parameters that must be changed.

-

The Cassandra configuration files can be found in the conf directory of tarballs. For packages, the configuration -files will be located in /etc/cassandra.

-
-

Main runtime properties

-

Most of configuration in Cassandra is done via yaml properties that can be set in cassandra.yaml. At a minimum you -should consider setting the following properties:

-
    -
  • cluster_name: the name of your cluster.
  • -
  • seeds: a comma separated list of the IP addresses of your cluster seeds.
  • -
  • storage_port: you don’t necessarily need to change this but make sure that there are no firewalls blocking this -port.
  • -
  • listen_address: the IP address of your node, this is what allows other nodes to communicate with this node so it -is important that you change it. Alternatively, you can set listen_interface to tell Cassandra which interface to -use, and consecutively which address to use. Set only one, not both.
  • -
  • native_transport_port: as for storage_port, make sure this port is not blocked by firewalls as clients will -communicate with Cassandra on this port.
  • -
-
-
-

Changing the location of directories

-

The following yaml properties control the location of directories:

-
    -
  • data_file_directories: one or more directories where data files are located.
  • -
  • commitlog_directory: the directory where commitlog files are located.
  • -
  • saved_caches_directory: the directory where saved caches are located.
  • -
  • hints_directory: the directory where hints are located.
  • -
-

For performance reasons, if you have multiple disks, consider putting commitlog and data files on different disks.

-
-
-

Environment variables

-

JVM-level settings such as heap size can be set in cassandra-env.sh. You can add any additional JVM command line -argument to the JVM_OPTS environment variable; when Cassandra starts these arguments will be passed to the JVM.

-
-
-

Logging

-

The logger in use is logback. You can change logging properties by editing logback.xml. By default it will log at -INFO level into a file called system.log and at debug level into a file called debug.log. When running in the -foreground, it will also log at INFO level to the console.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/drivers.html b/src/doc/4.0-beta1/getting_started/drivers.html deleted file mode 100644 index 5d851c1a1..000000000 --- a/src/doc/4.0-beta1/getting_started/drivers.html +++ /dev/null @@ -1,248 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Client drivers" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Client drivers

-

Here are known Cassandra client drivers organized by language. Before choosing a driver, you should verify the Cassandra -version and functionality supported by a specific driver.

- -
-

Python

- -
- - - - - - -
-

Clojure

- -
-
-

Erlang

- -
-
-

Go

- -
-
-

Haskell

- -
-
-

Rust

- -
- -
-

Elixir

- -
-
-

Dart

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/index.html b/src/doc/4.0-beta1/getting_started/index.html deleted file mode 100644 index bb2977657..000000000 --- a/src/doc/4.0-beta1/getting_started/index.html +++ /dev/null @@ -1,165 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Getting Started" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- - \ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/installing.html b/src/doc/4.0-beta1/getting_started/installing.html deleted file mode 100644 index aaa921c21..000000000 --- a/src/doc/4.0-beta1/getting_started/installing.html +++ /dev/null @@ -1,388 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Installing Cassandra" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Installing Cassandra

-

These are the instructions for deploying the supported releases of Apache Cassandra on Linux servers.

-

Cassandra runs on a wide array of Linux distributions including (but not limited to):

-
    -
  • Ubuntu, most notably LTS releases 16.04 to 18.04
  • -
  • CentOS & RedHat Enterprise Linux (RHEL) including 6.6 to 7.7
  • -
  • Amazon Linux AMIs including 2016.09 through to Linux 2
  • -
  • Debian versions 8 & 9
  • -
  • SUSE Enterprise Linux 12
  • -
-

This is not an exhaustive list of operating system platforms, nor is it prescriptive. However users will be -well-advised to conduct exhaustive tests of their own particularly for less-popular distributions of Linux. -Deploying on older versions is not recommended unless you have previous experience with the older distribution -in a production environment.

-
-

Prerequisites

-
    -
  • Install the latest version of Java 8, either the Oracle Java Standard Edition 8 or OpenJDK 8. To -verify that you have the correct version of java installed, type java -version.
  • -
  • NOTE: Experimental support for Java 11 was added in Cassandra 4.0 (CASSANDRA-9608). -Running Cassandra on Java 11 is experimental. Do so at your own risk. For more information, see -NEWS.txt.
  • -
  • For using cqlsh, the latest version of Python 2.7 or Python 3.6+. To verify that you have -the correct version of Python installed, type python --version.
  • -
-
-
-

Choosing an installation method

-

For most users, installing the binary tarball is the simplest choice. The tarball unpacks all its contents -into a single location with binaries and configuration files located in their own subdirectories. The most -obvious attribute of the tarball installation is it does not require root permissions and can be -installed on any Linux distribution.

-

Packaged installations require root permissions. Install the RPM build on CentOS and RHEL-based -distributions if you want to install Cassandra using YUM. Install the Debian build on Ubuntu and other -Debian-based distributions if you want to install Cassandra using APT. Note that both the YUM and APT -methods required root permissions and will install the binaries and configuration files as the -cassandra OS user.

-
-
-

Installing the binary tarball

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10)
-OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode)
-
-
-
    -
  1. Download the binary tarball from one of the mirrors on the Apache Cassandra Download -site. For example, to download 4.0:
  2. -
-
$ curl -OL http://apache.mirror.digitalpacific.com.au/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz
-
-
-

NOTE: The mirrors only host the latest versions of each major supported release. To download an earlier -version of Cassandra, visit the Apache Archives.

-
    -
  1. OPTIONAL: Verify the integrity of the downloaded tarball using one of the methods here. -For example, to verify the hash of the downloaded file using GPG:
  2. -
-
$ gpg --print-md SHA256 apache-cassandra-4.0.0-bin.tar.gz
-apache-cassandra-4.0.0-bin.tar.gz: 28757DDE 589F7041 0F9A6A95 C39EE7E6
-                                   CDE63440 E2B06B91 AE6B2006 14FA364D
-
-
-

Compare the signature with the SHA256 file from the Downloads site:

-
$ curl -L https://downloads.apache.org/cassandra/4.0.0/apache-cassandra-4.0.0-bin.tar.gz.sha256
-28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d
-
-
-
    -
  1. Unpack the tarball:
  2. -
-
$ tar xzvf apache-cassandra-4.0.0-bin.tar.gz
-
-
-

The files will be extracted to the apache-cassandra-4.0.0/ directory. This is the tarball installation -location.

-
    -
  1. Located in the tarball installation location are the directories for the scripts, binaries, utilities, configuration, data and log files:
  2. -
-
<tarball_installation>/
-    bin/
-    conf/
-    data/
-    doc/
-    interface/
-    javadoc/
-    lib/
-    logs/
-    pylib/
-    tools/
-
-
-

For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Start Cassandra:
  2. -
-
$ cd apache-cassandra-4.0.0/
-$ bin/cassandra
-
-
-

NOTE: This will run Cassandra as the authenticated Linux user.

-

You can monitor the progress of the startup with:

-
$ tail -f logs/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-
    -
  1. Check the status of Cassandra:
  2. -
-
$ bin/nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ bin/cqlsh
-
-
-
-
-

Installing the Debian packages

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_222-8u222-b10-1ubuntu1~16.04.1-b10)
-OpenJDK 64-Bit Server VM (build 25.222-b10, mixed mode)
-
-
-
    -
  1. Add the Apache repository of Cassandra to the file cassandra.sources.list. The latest major version -is 4.0 and the corresponding distribution name is 40x (with an “x” as the suffix). -For older releases use 311x for C* 3.11 series, 30x for 3.0, 22x for 2.2 and 21x for 2.1. -For example, to add the repository for version 4.0 (40x):
  2. -
-
$ echo "deb http://www.apache.org/dist/cassandra/debian 40x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list
-deb http://www.apache.org/dist/cassandra/debian 40x main
-
-
-
    -
  1. Add the Apache Cassandra repository keys to the list of trusted keys on the server:
  2. -
-
$ curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add -
-  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-100  266k  100  266k    0     0   320k      0 --:--:-- --:--:-- --:--:--  320k
-OK
-
-
-
    -
  1. Update the package index from sources:
  2. -
-
$ sudo apt-get update
-
-
-
    -
  1. Install Cassandra with APT:
  2. -
-
$ sudo apt-get install cassandra
-
-
-

NOTE: A new Linux user cassandra will get created as part of the installation. The Cassandra service -will also be run as this user.

-
    -
  1. The Cassandra service gets started automatically after installation. Monitor the progress of -the startup with:
  2. -
-
$ tail -f /var/log/cassandra/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-

NOTE: For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Check the status of Cassandra:
  2. -
-
$ nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ cqlsh
-
-
-
-
-

Installing the RPM packages

-
    -
  1. Verify the version of Java installed. For example:
  2. -
-
$ java -version
-openjdk version "1.8.0_222"
-OpenJDK Runtime Environment (build 1.8.0_232-b09)
-OpenJDK 64-Bit Server VM (build 25.232-b09, mixed mode)
-
-
-
    -
  1. Add the Apache repository of Cassandra to the file /etc/yum.repos.d/cassandra.repo (as the root -user). The latest major version is 4.0 and the corresponding distribution name is 40x (with an “x” as the suffix). -For older releases use 311x for C* 3.11 series, 30x for 3.0, 22x for 2.2 and 21x for 2.1. -For example, to add the repository for version 4.0 (40x):
  2. -
-
[cassandra]
-name=Apache Cassandra
-baseurl=https://downloads.apache.org/cassandra/redhat/40x/
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://downloads.apache.org/cassandra/KEYS
-
-
-
    -
  1. Update the package index from sources:
  2. -
-
$ sudo yum update
-
-
-
    -
  1. Install Cassandra with YUM:
  2. -
-
$ sudo yum install cassandra
-
-
-

NOTE: A new Linux user cassandra will get created as part of the installation. The Cassandra service -will also be run as this user.

-
    -
  1. Start the Cassandra service:
  2. -
-
$ sudo service cassandra start
-
-
-
    -
  1. Monitor the progress of the startup with:
  2. -
-
$ tail -f /var/log/cassandra/system.log
-
-
-

Cassandra is ready when you see an entry like this in the system.log:

-
INFO  [main] 2019-12-17 03:03:37,526 Server.java:156 - Starting listening for CQL clients on localhost/127.0.0.1:9042 (unencrypted)...
-
-
-

NOTE: For information on how to configure your installation, see -Configuring Cassandra.

-
    -
  1. Check the status of Cassandra:
  2. -
-
$ nodetool status
-
-
-

The status column in the output should report UN which stands for “Up/Normal”.

-

Alternatively, connect to the database with:

-
$ cqlsh
-
-
-
-
-

Further installation info

-

For help with installation issues, see the Troubleshooting section.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/production.html b/src/doc/4.0-beta1/getting_started/production.html deleted file mode 100644 index c8bc49561..000000000 --- a/src/doc/4.0-beta1/getting_started/production.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Production Recommendations" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Production Recommendations

-

The cassandra.yaml and jvm.options files have a number of notes and recommendations for production usage. This page -expands on some of the notes in these files with additional information.

-
-

Tokens

-

Using more than 1 token (referred to as vnodes) allows for more flexible expansion and more streaming peers when -bootstrapping new nodes into the cluster. This can limit the negative impact of streaming (I/O and CPU overhead) -as well as allow for incremental cluster expansion.

-

As a tradeoff, more tokens will lead to sharing data with more peers, which can result in decreased availability. To learn more about this we -recommend reading this paper.

-

The number of tokens can be changed using the following setting:

-

num_tokens: 16

-

Here are the most common token counts with a brief explanation of when and why you would use each one.

- ---- - - - - - - - - - - - - - - - - -
Token CountDescription
1Maximum availablility, maximum cluster size, fewest peers, -but inflexible expansion. Must always -double size of cluster to expand and remain balanced.
4A healthy mix of elasticity and availability. Recommended for clusters which will eventually -reach over 30 nodes. Requires adding approximately 20% more nodes to remain balanced. -Shrinking a cluster may result in cluster imbalance.
16Best for heavily elastic clusters which expand and shrink regularly, but may have issues -availability with larger clusters. Not recommended for clusters over 50 nodes.
-

In addition to setting the token count, it’s extremely important that allocate_tokens_for_local_replication_factor be -set as well, to ensure even token allocation.

-
-
-

Read Ahead

-

Read ahead is an operating system feature that attempts to keep as much data loaded in the page cache as possible. The -goal is to decrease latency by using additional throughput on reads where the latency penalty is high due to seek times -on spinning disks. By leveraging read ahead, the OS can pull additional data into memory without the cost of additional -seeks. This works well when available RAM is greater than the size of the hot dataset, but can be problematic when the -hot dataset is much larger than available RAM. The benefit of read ahead decreases as the size of your hot dataset gets -bigger in proportion to available memory.

-

With small partitions (usually tables with no partition key, but not limited to this case) and solid state drives, read -ahead can increase disk usage without any of the latency benefits, and in some cases can result in up to -a 5x latency and throughput performance penalty. Read heavy, key/value tables with small (under 1KB) rows are especially -prone to this problem.

-

We recommend the following read ahead settings:

- ---- - - - - - - - - - - - - - -
HardwareInitial Recommendation
Spinning Disks64KB
SSD4KB
-

Read ahead can be adjusted on Linux systems by using the blockdev tool.

-

For example, we can set read ahead of ``/dev/sda1` to 4KB by doing the following:

-
blockdev --setra 8 /dev/sda1
-
-
-

Note: blockdev accepts the number of 512 byte sectors to read ahead. The argument of 8 above is equivilent to 4KB.

-

Since each system is different, use the above recommendations as a starting point and tuning based on your SLA and -throughput requirements. To understand how read ahead impacts disk resource usage we recommend carefully reading through the -troubleshooting portion of the documentation.

-
-
-

Compression

-

Compressed data is stored by compressing fixed size byte buffers and writing the data to disk. The buffer size is -determined by the chunk_length_in_kb element in the compression map of the schema settings.

-

The default setting is 16KB starting with Cassandra 4.0.

-

Since the entire compressed buffer must be read off disk, using too high of a compression chunk length can lead to -significant overhead when reading small records. Combined with the default read ahead setting this can result in massive -read amplification for certain workloads.

-

LZ4Compressor is the default and recommended compression algorithm.

-

There is additional information on this topic on The Last Pickle Blog.

-
-
-

Compaction

-

There are different compaction strategies available for different workloads. -We recommend reading up on the different strategies to understand which is the best for your environment. Different tables -may (and frequently do) use different compaction strategies on the same cluster.

-
-
-

Encryption

-

It is significantly easier to set up peer to peer encryption and client server encryption when setting up your production -cluster as opposed to setting it up once the cluster is already serving production traffic. If you are planning on using network encryption -eventually (in any form), we recommend setting it up now. Changing these configurations down the line is not impossible, -but mistakes can result in downtime or data loss.

-
-
-

Ensure Keyspaces are Created with NetworkTopologyStrategy

-

Production clusters should never use SimpleStrategy. Production keyspaces should use the NetworkTopologyStrategy (NTS).

-

For example:

-
create KEYSPACE mykeyspace WITH replication =
-{'class': 'NetworkTopologyStrategy', 'datacenter1': 3};
-
-
-

NetworkTopologyStrategy allows Cassandra to take advantage of multiple racks and data centers.

-
-
-

Configure Racks and Snitch

-

Correctly configuring or changing racks after a cluster has been provisioned is an unsupported process. Migrating from -a single rack to multiple racks is also unsupported and can result in data loss.

-

Using GossipingPropertyFileSnitch is the most flexible solution for on premise or mixed cloud environments. Ec2Snitch -is reliable for AWS EC2 only environments.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/getting_started/querying.html b/src/doc/4.0-beta1/getting_started/querying.html deleted file mode 100644 index 981412ce5..000000000 --- a/src/doc/4.0-beta1/getting_started/querying.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Getting Started" - -doc-title: "Inserting and querying" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Inserting and querying

-

The API to Cassandra is CQL, the Cassandra Query Language. To use CQL, you will need to connect to the -cluster, which can be done:

-
    -
  • either using cqlsh,
  • -
  • or through a client driver for Cassandra.
  • -
-
-

CQLSH

-

cqlsh is a command line shell for interacting with Cassandra through CQL. It is shipped with every Cassandra package, -and can be found in the bin/ directory alongside the cassandra executable. It connects to the single node specified on -the command line. For example:

-
$ bin/cqlsh localhost
-Connected to Test Cluster at localhost:9042.
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-Use HELP for help.
-cqlsh> SELECT cluster_name, listen_address FROM system.local;
-
- cluster_name | listen_address
---------------+----------------
- Test Cluster |      127.0.0.1
-
-(1 rows)
-cqlsh>
-
-
-

See the cqlsh section for full documentation.

-
-
-

Client drivers

-

A lot of client drivers are provided by the Community and a list of known drivers is provided in the next section. You should refer to the documentation of each drivers for more information on how to use them.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/index.html b/src/doc/4.0-beta1/index.html deleted file mode 100644 index 114fe9cff..000000000 --- a/src/doc/4.0-beta1/index.html +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: doclandingpage -title: "Documentation" -is_homepage: false -is_sphinx_doc: false ---- - -

Apache Cassandra Documentation v4.0-alpha5

- -
This documentation is a work-in-progress. - Contributions are welcome.
- -

Main documentation

- -
-
-
- - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - -
- - - -
- -

Meta informations

- - - - diff --git a/src/doc/4.0-beta1/new/auditlogging.html b/src/doc/4.0-beta1/new/auditlogging.html deleted file mode 100644 index 82aa67f00..000000000 --- a/src/doc/4.0-beta1/new/auditlogging.html +++ /dev/null @@ -1,549 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Audit Logging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit Logging is a new feature in Apache Cassandra 4.0 (CASSANDRA-12151). All database activity is logged to a directory in the local filesystem and the audit log files are rolled periodically. All database operations are monitored and recorded. Audit logs are stored in local directory files instead of the database itself as it provides several benefits, some of which are:

-
    -
  • No additional database capacity is needed to store audit logs
  • -
  • No query tool is required while storing the audit logs in the database would require a query tool
  • -
  • Latency of database operations is not affected; no performance impact
  • -
  • It is easier to implement file based logging than database based logging
  • -
-
-

What does Audit Logging Log?

-

Audit logging logs:

-
    -
  1. All authentication which includes successful and failed login attempts
  2. -
  3. All database command requests to CQL. Both failed and successful CQL is logged
  4. -
-

More specifically an audit log entry could be one of two types:

-
    -
  1. CQL Audit Log Entry Type or
  2. -
  3. Common Audit Log Entry Type
  4. -
-

Each of these types comprises of several database operations. The CQL Audit Log Entry Type could be one of the following; the category of the CQL audit log entry type is listed in parentheses.

-
    -
  1. SELECT(QUERY),
  2. -
  3. UPDATE(DML),
  4. -
  5. DELETE(DML),
  6. -
  7. TRUNCATE(DDL),
  8. -
  9. CREATE_KEYSPACE(DDL),
  10. -
  11. ALTER_KEYSPACE(DDL),
  12. -
  13. DROP_KEYSPACE(DDL),
  14. -
  15. CREATE_TABLE(DDL),
  16. -
  17. DROP_TABLE(DDL),
  18. -
  19. PREPARE_STATEMENT(PREPARE),
  20. -
  21. DROP_TRIGGER(DDL),
  22. -
  23. LIST_USERS(DCL),
  24. -
  25. CREATE_INDEX(DDL),
  26. -
  27. DROP_INDEX(DDL),
  28. -
  29. GRANT(DCL),
  30. -
  31. REVOKE(DCL),
  32. -
  33. CREATE_TYPE(DDL),
  34. -
  35. DROP_AGGREGATE(DDL),
  36. -
  37. ALTER_VIEW(DDL),
  38. -
  39. CREATE_VIEW(DDL),
  40. -
  41. DROP_ROLE(DCL),
  42. -
  43. CREATE_FUNCTION(DDL),
  44. -
  45. ALTER_TABLE(DDL),
  46. -
  47. BATCH(DML),
  48. -
  49. CREATE_AGGREGATE(DDL),
  50. -
  51. DROP_VIEW(DDL),
  52. -
  53. DROP_TYPE(DDL),
  54. -
  55. DROP_FUNCTION(DDL),
  56. -
  57. ALTER_ROLE(DCL),
  58. -
  59. CREATE_TRIGGER(DDL),
  60. -
  61. LIST_ROLES(DCL),
  62. -
  63. LIST_PERMISSIONS(DCL),
  64. -
  65. ALTER_TYPE(DDL),
  66. -
  67. CREATE_ROLE(DCL),
  68. -
  69. USE_KEYSPACE (OTHER).
  70. -
-

The Common Audit Log Entry Type could be one of the following; the category of the Common audit log entry type is listed in parentheses.

-
    -
  1. REQUEST_FAILURE(ERROR),
  2. -
  3. LOGIN_ERROR(AUTH),
  4. -
  5. UNAUTHORIZED_ATTEMPT(AUTH),
  6. -
  7. LOGIN_SUCCESS (AUTH).
  8. -
-
-
-

What Audit Logging does not Log?

-

Audit logging does not log:

-
    -
  1. Configuration changes made in cassandra.yaml
  2. -
  3. Nodetool Commands
  4. -
-
-
-

Audit Logging is Flexible and Configurable

-

Audit logging is flexible and configurable in cassandra.yaml as follows:

-
    -
  • Keyspaces and tables to be monitored and audited may be specified.
  • -
  • Users to be included/excluded may be specified. By default all users are audit logged.
  • -
  • Categories of operations to audit or exclude may be specified.
  • -
  • The frequency at which to roll the log files may be specified. Default frequency is hourly.
  • -
-
-
-

Configuring Audit Logging

-

Audit Logging is configured on each node separately. Audit Logging is configured in cassandra.yaml in the audit_logging_options setting. -The settings may be same/different on each node.

-
-

Enabling Audit Logging

-

Audit logging is enabled by setting the enabled option to true in the audit_logging_options setting.

-
audit_logging_options:
-   enabled: true
-
-
-
-
-

Setting the Logger

-

The audit logger is set with the logger option.

-
logger: BinAuditLogger
-
-
-

Two types of audit loggers are supported: FileAuditLogger and BinAuditLogger. -BinAuditLogger is the default setting. The BinAuditLogger is an efficient way to log events to file in a binary format.

-

FileAuditLogger is synchronous, file-based audit logger; just uses the standard logging mechanism. FileAuditLogger logs events to audit/audit.log file using slf4j logger.

-

The NoOpAuditLogger is a No-Op implementation of the audit logger to be used as a default audit logger when audit logging is disabled.

-
-
-

Setting the Audit Logs Directory

-

The audit logs directory is set with the audit_logs_dir option. A new directory is not created automatically and an existing directory must be set. Audit Logs directory can be configured using cassandra.logdir.audit system property or default is set to cassandra.logdir + /audit/. A user created directory may be set. As an example, create a directory for the audit logs and set its permissions.

-
sudo mkdir –p  /cassandra/audit/logs/hourly
-sudo chmod -R 777 /cassandra/audit/logs/hourly
-
-
-

Set the directory for the audit logs directory using the audit_logs_dir option.

-
audit_logs_dir: "/cassandra/audit/logs/hourly"
-
-
-
-
-

Setting Keyspaces to Audit

-

Set the keyspaces to include with the included_keyspaces option and the keyspaces to exclude with the excluded_keyspaces option. By default all keyspaces are included. By default, system, system_schema and system_virtual_schema are excluded.

-
# included_keyspaces:
-# excluded_keyspaces: system, system_schema, system_virtual_schema
-
-
-
-
-

Setting Categories to Audit

-

The categories of database operations to be included are specified with the included_categories option as a comma separated list. By default all supported categories are included. The categories of database operations to be excluded are specified with excluded_categories option as a comma separated list. By default no category is excluded.

-
# included_categories:
-# excluded_categories:
-
-
-

The supported categories for audit log are:

-
    -
  1. QUERY
  2. -
  3. DML
  4. -
  5. DDL
  6. -
  7. DCL
  8. -
  9. OTHER
  10. -
  11. AUTH
  12. -
  13. ERROR
  14. -
  15. PREPARE
  16. -
-
-
-

Setting Users to Audit

-

Users to audit log are set with the included_users and excluded_users options. The included_users option specifies a comma separated list of users to include explicitly and by default all users are included. The excluded_users option specifies a comma separated list of users to exclude explicitly and by default no user is excluded.

-
# included_users:
-# excluded_users:
-
-
-
-
-

Setting the Roll Frequency

-

The roll_cycle option sets the frequency at which the audit log file is rolled. Supported values are MINUTELY, HOURLY, and DAILY. Default value is HOURLY, which implies that after every hour a new audit log file is created.

-
roll_cycle: HOURLY
-
-
-

An audit log file could get rolled for other reasons as well such as a log file reaches the configured size threshold.

-
-
-

Setting Archiving Options

-

The archiving options are for archiving the rolled audit logs. The archive command to use is set with the archive_command option and the max_archive_retries sets the maximum # of tries of failed archive commands.

-
# archive_command:
-# max_archive_retries: 10
-
-
-

Default archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:

-
-
-

Other Settings

-

The other audit logs settings are as follows.

-
# block: true
-# max_queue_weight: 268435456 # 256 MiB
-# max_log_size: 17179869184 # 16 GiB
-
-
-

The block option specifies whether the audit logging should block if the logging falls behind or should drop log records.

-

The max_queue_weight option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping.

-

The max_log_size option sets the maximum size of the rolled files to retain on disk before deleting the oldest.

-
-
-
-

Using Nodetool to Enable Audit Logging

-

The nodetool  enableauditlog command may be used to enable audit logs and it overrides the settings in cassandra.yaml. The nodetool enableauditlog command syntax is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-        [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-        [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-        [(-u <username> | --username <username>)] enableauditlog
-        [--excluded-categories <excluded_categories>]
-        [--excluded-keyspaces <excluded_keyspaces>]
-        [--excluded-users <excluded_users>]
-        [--included-categories <included_categories>]
-        [--included-keyspaces <included_keyspaces>]
-        [--included-users <included_users>] [--logger <logger>]
-
-
-
-
OPTIONS
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
---excluded-categories <excluded_categories>
 Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
---excluded-keyspaces <excluded_keyspaces>
 Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used
---excluded-users <excluded_users>
 Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
--h <host>, --host <host>
 Node hostname or ip address
---included-categories <included_categories>
 Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
---included-keyspaces <included_keyspaces>
 Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
---included-users <included_users>
 Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
---logger <logger>
 Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
--p <port>, --port <port>
 Remote jmx agent port number
--pp, --print-port
 Operate in 4.0 mode with hosts disambiguated by port number
-
-
-pw <password>, –password <password>
-
Remote jmx agent password
-
-pwf <passwordFilePath>, –password-file <passwordFilePath>
-
Path to the JMX password file
-
- --- - - - - -
--u <username>, --username <username>
 Remote jmx agent username
-
-
-

The nodetool disableauditlog command disables audit log. The command syntax is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-        [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-        [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-        [(-u <username> | --username <username>)] disableauditlog
-
-
-
-
OPTIONS
-
--- - - - - - - - - - - -
--h <host>, --host <host>
 Node hostname or ip address
--p <port>, --port <port>
 Remote jmx agent port number
--pp, --print-port
 Operate in 4.0 mode with hosts disambiguated by port number
-
-
-pw <password>, –password <password>
-
Remote jmx agent password
-
-pwf <passwordFilePath>, –password-file <passwordFilePath>
-
Path to the JMX password file
-
- --- - - - - -
--u <username>, --username <username>
 Remote jmx agent username
-
-
-
-
-

Viewing the Audit Logs

-

An audit log event comprises of a keyspace that is being audited, the operation that is being logged, the scope and the user. An audit log entry comprises of the following attributes concatenated with a “|”.

-
type (AuditLogEntryType): Type of request
-source (InetAddressAndPort): Source IP Address from which request originated
-user (String): User name
-timestamp (long ): Timestamp of the request
-batch (UUID): Batch of request
-keyspace (String): Keyspace on which request is made
-scope (String): Scope of request such as Table/Function/Aggregate name
-operation (String): Database operation such as CQL command
-options (QueryOptions): CQL Query options
-state (QueryState): State related to a given query
-
-
-

Some of these attributes may not be applicable to a given request and not all of these options must be set.

-
-
-

An Audit Logging Demo

-

To demonstrate audit logging enable and configure audit logs with following settings.

-
audit_logging_options:
-   enabled: true
-   logger: BinAuditLogger
-   audit_logs_dir: "/cassandra/audit/logs/hourly"
-   # included_keyspaces:
-   # excluded_keyspaces: system, system_schema, system_virtual_schema
-   # included_categories:
-   # excluded_categories:
-   # included_users:
-   # excluded_users:
-   roll_cycle: HOURLY
-   # block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

Create the audit log directory /cassandra/audit/logs/hourly and set its permissions as discussed earlier. Run some CQL commands such as create a keyspace, create a table and query a table. Any supported CQL commands may be run as discussed in section What does Audit Logging Log?. Change directory (with cd command) to the audit logs directory.

-
cd /cassandra/audit/logs/hourly
-
-
-

List the files/directories and some .cq4 files should get listed. These are the audit logs files.

-
[ec2-user@ip-10-0-2-238 hourly]$ ls -l
-total 28
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 03:01 20190802-02.cq4
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 03:01 20190802-03.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 03:01 directory-listing.cq4t
-
-
-

The auditlogviewer tool is used to dump audit logs. Run the auditlogviewer tool. Audit log files directory path is a required argument. The output should be similar to the following output.

-
[ec2-user@ip-10-0-2-238 hourly]$ auditlogviewer /cassandra/audit/logs/hourly
-WARN  03:12:11,124 Using Pauser.sleepy() as not enough processors, have 2, needs 8+
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427328|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE AuditLogKeyspace;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711427329|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE "auditlogkeyspace"
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564711446279|type :SELECT|category:QUERY|ks:auditlogkeyspace|scope:t|operation:SELECT * FROM t;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564713878834|type :DROP_TABLE|category:DDL|ks:auditlogkeyspace|scope:t|operation:DROP TABLE IF EXISTS
-AuditLogKeyspace.t;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42382|timestamp:1564714618360|ty
-pe:REQUEST_FAILURE|category:ERROR|operation:CREATE KEYSPACE AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};; Cannot add
-existing keyspace "auditlogkeyspace"
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714690968|type :DROP_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:DROP KEYSPACE AuditLogKeyspace;
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/3.91.56.164|port:42406|timestamp:1564714708329|ty pe:CREATE_KEYSPACE|category:DDL|ks:auditlogkeyspace|operation:CREATE KEYSPACE
-AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-Type: AuditLog
-LogMessage:
-user:anonymous|host:10.0.2.238:7000|source:/127.0.0.1|port:46264|timestamp:1564714870678|type :USE_KEYSPACE|category:OTHER|ks:auditlogkeyspace|operation:USE auditlogkeyspace;
-[ec2-user@ip-10-0-2-238 hourly]$
-
-
-

The auditlogviewer tool usage syntax is as follows.

-
./auditlogviewer
-Audit log files directory path is a required argument.
-usage: auditlogviewer <path1> [<path2>...<pathN>] [options]
---
-View the audit log contents in human readable format
---
-Options are:
--f,--follow       Upon reaching the end of the log continue indefinitely
-                  waiting for more records
--h,--help         display this help message
--r,--roll_cycle   How often to roll the log file was rolled. May be
-                  necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY,
-                  DAILY). Default HOURLY.
-
-
-
-
-

Diagnostic events for user audit logging

-

Any native transport enabled client is able to subscribe to diagnostic events that are raised around authentication and CQL operations. These events can then be consumed and used by external tools to implement a Cassandra user auditing solution.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/fqllogging.html b/src/doc/4.0-beta1/new/fqllogging.html deleted file mode 100644 index ae96971c7..000000000 --- a/src/doc/4.0-beta1/new/fqllogging.html +++ /dev/null @@ -1,721 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Full Query Logging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Full Query Logging

-

Apache Cassandra 4.0 adds a new feature to support a means of logging all queries as they were invoked (CASSANDRA-13983). For correctness testing it’s useful to be able to capture production traffic so that it can be replayed against both the old and new versions of Cassandra while comparing the results.

-

Cassandra 4.0 includes an implementation of a full query logging (FQL) that uses chronicle-queue to implement a rotating log of queries. Some of the features of FQL are:

-
    -
  • Single thread asynchronously writes log entries to disk to reduce impact on query latency
  • -
  • Heap memory usage bounded by a weighted queue with configurable maximum weight sitting in front of logging thread
  • -
  • If the weighted queue is full producers can be blocked or samples can be dropped
  • -
  • Disk utilization is bounded by deleting old log segments once a configurable size is reached
  • -
  • The on disk serialization uses a flexible schema binary format (chronicle-wire) making it easy to skip unrecognized fields, add new ones, and omit old ones.
  • -
  • Can be enabled and configured via JMX, disabled, and reset (delete on disk data), logging path is configurable via both JMX and YAML
  • -
  • Introduce new fqltool in /bin that currently implements Dump which can dump in a readable format full query logs as well as follow active full query logs. FQL Replay and Compare are also available.
  • -
-

Cassandra 4.0 has a binary full query log based on Chronicle Queue that can be controlled using nodetool enablefullquerylog, disablefullquerylog, and resetfullquerylog. The log contains all queries invoked, approximate time they were invoked, any parameters necessary to bind wildcard values, and all query options. A readable version of the log can be dumped or tailed using the new bin/fqltool utility. The full query log is designed to be safe to use in production and limits utilization of heap memory and disk space with limits you can specify when enabling the log.

-
-

Objective

-

Full Query Logging logs all requests to the CQL interface. The full query logs could be used for debugging, performance benchmarking, testing and auditing CQL queries. The audit logs also include CQL requests but full query logging is dedicated to CQL requests only with features such as FQL Replay and FQL Compare that are not available in audit logging.

-
-
-

Full Query Logger

-

The Full Query Logger is a logger that logs entire query contents after the query finishes. FQL only logs the queries that successfully complete. The other queries (e.g. timed out, failed) are not to be logged. Queries are logged in one of two modes: single query or batch of queries. The log for an invocation of a batch of queries includes the following attributes:

-
type - The type of the batch
-queries - CQL text of the queries
-values - Values to bind to as parameters for the queries
-queryOptions - Options associated with the query invocation
-queryState - Timestamp state associated with the query invocation
-batchTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked
-
-
-

The log for single CQL query includes the following attributes:

-
query - CQL query text
-queryOptions - Options associated with the query invocation
-queryState - Timestamp state associated with the query invocation
-queryTimeMillis - Approximate time in milliseconds since the epoch since the batch was invoked
-
-
-

Full query logging is backed up by BinLog. BinLog is a quick and dirty binary log. Its goal is good enough performance, predictable footprint, simplicity in terms of implementation and configuration and most importantly minimal impact on producers of log records. Performance safety is accomplished by feeding items to the binary log using a weighted queue and dropping records if the binary log falls sufficiently far behind. Simplicity and good enough performance is achieved by using a single log writing thread as well as Chronicle Queue to handle writing the log, making it available for readers, as well as log rolling.

-

Weighted queue is a wrapper around any blocking queue that turns it into a blocking weighted queue. The queue will weigh each element being added and removed. Adding to the queue is blocked if adding would violate the weight bound. If an element weighs in at larger than the capacity of the queue then exactly one such element will be allowed into the queue at a time. If the weight of an object changes after it is added it could create issues. Checking weight should be cheap so memorize expensive to compute weights. If weight throws that can also result in leaked permits so it’s always a good idea to memorize weight so it doesn’t throw. In the interests of not writing unit tests for methods no one uses there is a lot of UnsupportedOperationException. If you need them then add them and add proper unit tests to WeightedQueueTest. “Good” tests. 100% coverage including exception paths and resource leaks.

-

The FQL tracks information about store files:

-
    -
  • Store files as they are added and their storage impact. Delete them if over storage limit.
  • -
  • The files in the chronicle queue that have already rolled
  • -
  • The number of bytes in store files that have already rolled
  • -
-

FQL logger sequence is as follows:

-
    -
  1. Start the consumer thread that writes log records. Can only be done once.
  2. -
  3. Offer a record to the log. If the in memory queue is full the record will be dropped and offer will return false.
  4. -
  5. Put a record into the log. If the in memory queue is full the putting thread will be blocked until there is space or it is interrupted.
  6. -
  7. Clean up the buffers on thread exit, finalization will check again once this is no longer reachable ensuring there are no stragglers in the queue.
  8. -
  9. Stop the consumer thread that writes log records. Can be called multiple times.
  10. -
-

Next, we shall demonstrate full query logging with an example.

-
-
-

Configuring Full Query Logging

-

Full Query Logger default options are configured on a per node basis in cassandra.yaml with following configuration property.

-
full_query_logging_options:
-
-
-

As an example setup create a three node Cassandra 4.0 cluster. The nodetool status command lists the nodes in the cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool status
-Datacenter: us-east-1
-=====================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  AddressLoad   Tokens  Owns (effective)  Host ID Rack
-UN  10.0.1.115  442.42 KiB  25632.6%   b64cb32a-b32a-46b4-9eeb-e123fa8fc287  us-east-1b
-UN  10.0.3.206  559.52 KiB  25631.9%   74863177-684b-45f4-99f7-d1006625dc9e  us-east-1d
-UN  10.0.2.238  587.87 KiB  25635.5%   4dcdadd2-41f9-4f34-9892-1f20868b27c7  us-east-1c
-
-
-

In subsequent sub-sections we shall discuss enabling and configuring full query logging.

-
-

Setting the FQL Directory

-

A dedicated directory path must be provided to write full query log data to when the full query log is enabled. The directory for FQL must exist, and have permissions set. The full query log will recursively delete the contents of this path at times. It is recommended not to place links in this directory to other sections of the filesystem. The full_query_log_dir property in cassandra.yaml is pre-configured.

-
full_query_log_dir: /tmp/cassandrafullquerylog
-
-
-

The log_dir option may be used to configure the FQL directory if the full_query_log_dir is not set.

-
full_query_logging_options:
-   # log_dir:
-
-
-

Create the FQL directory if it does not exist and set its permissions.

-
sudo mkdir -p /tmp/cassandrafullquerylog
-sudo chmod -R 777 /tmp/cassandrafullquerylog
-
-
-
-
-

Setting the Roll Cycle

-

The roll_cycle option sets how often to roll FQL log segments so they can potentially be reclaimed. Supported values are MINUTELY, HOURLY and DAILY. Default setting is HOURLY.

-
roll_cycle: HOURLY
-
-
-
-
-

Setting Other Options

-

The block option specifies whether the FQL should block if the FQL falls behind or should drop log records. Default value of block is true. The max_queue_weight option sets the maximum weight of in memory queue for records waiting to be written to the file before blocking or dropping. The max_log_size option sets the maximum size of the rolled files to retain on disk before deleting the oldest file. The archive_command option sets the archive command to execute on rolled log files. The max_archive_retries option sets the max number of retries of failed archive commands.

-
# block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file
-being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

The max_queue_weight must be > 0. Similarly max_log_size must be > 0. An example full query logging options is as follows.

-
full_query_log_dir: /tmp/cassandrafullquerylog
-
-# default options for full query logging - these can be overridden from command line when
-executing
-# nodetool enablefullquerylog
-# nodetool enablefullquerylog
-#full_query_logging_options:
-   # log_dir:
-   roll_cycle: HOURLY
-   # block: true
-   # max_queue_weight: 268435456 # 256 MiB
-   # max_log_size: 17179869184 # 16 GiB
-   ## archive command is "/path/to/script.sh %path" where %path is replaced with the file
-being rolled:
-   # archive_command:
-   # max_archive_retries: 10
-
-
-

The full_query_log_dir setting is not within the full_query_logging_options but still is for full query logging.

-
-
-

Enabling Full Query Logging

-

Full Query Logging is enabled on a per-node basis. . The nodetool enablefullquerylog command is used to enable full query logging. Defaults for the options are configured in cassandra.yaml and these can be overridden from command line.

-

The syntax of the nodetool enablefullquerylog command is as follows:

-
 nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-[(-pp | --print-port)] [(-pw <password> | --password <password>)]
-[(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-[(-u <username> | --username <username>)] enablefullquerylog
-[--archive-command <archive_command>] [--blocking]
-[--max-archive-retries <archive_retries>]
-[--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-[--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-  --archive-command <archive_command>
- Command that will handle archiving rolled full query log files.
- Format is "/path/to/script.sh %path" where %path will be replaced
- with the file to archive
-
-  --blocking
- If the queue is full whether to block producers or drop samples.
-
-  -h <host>, --host <host>
- Node hostname or ip address
-
-  --max-archive-retries <archive_retries>
- Max number of archive retries.
-
-  --max-log-size <max_log_size>
- How many bytes of log data to store before dropping segments. Might
- not be respected if a log file hasn't rolled so it can be deleted.
-
-  --max-queue-weight <max_queue_weight>
- Maximum number of bytes of query data to queue to disk before
- blocking or dropping samples.
-
-  -p <port>, --port <port>
- Remote jmx agent port number
-
-  --path <path>
- Path to store the full query log at. Will have it's contents
- recursively deleted.
-
-  -pp, --print-port
- Operate in 4.0 mode with hosts disambiguated by port number
-
-  -pw <password>, --password <password>
- Remote jmx agent password
-
-  -pwf <passwordFilePath>, --password-file <passwordFilePath>
- Path to the JMX password file
-
-  --roll-cycle <roll_cycle>
- How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-  -u <username>, --username <username>
- Remote jmx agent username
-
-
-

Run the following command on each node in the cluster.

-
nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-
-
-

After the full query logging has been enabled run some CQL statements to generate full query logs.

-
-
-
-

Running CQL Statements

-

Start CQL interface with cqlsh command.

-
[ec2-user@ip-10-0-2-238 ~]$ cqlsh
-Connected to Cassandra Cluster at 127.0.0.1:9042.
-[cqlsh 5.0.1 | Cassandra 4.0-SNAPSHOT | CQL spec 3.4.5 | Native protocol v4]
-Use HELP for help.
-cqlsh>
-
-
-

Run some CQL statements. Create a keyspace. Create a table and add some data. Query the table.

-
cqlsh> CREATE KEYSPACE AuditLogKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-cqlsh> USE AuditLogKeyspace;
-cqlsh:auditlogkeyspace> CREATE TABLE t (
-...id int,
-...k int,
-...v text,
-...PRIMARY KEY (id)
-... );
-cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:auditlogkeyspace> INSERT INTO t (id, k, v) VALUES (0, 1, 'val1');
-cqlsh:auditlogkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 0 | 1 | val1
-
-(1 rows)
-cqlsh:auditlogkeyspace>
-
-
-
-
-

Viewing the Full Query Logs

-

The fqltool is used to view the full query logs. The fqltool has the following usage syntax.

-
fqltool <command> [<args>]
-
-The most commonly used fqltool commands are:
-   compare   Compare result files generated by fqltool replay
-   dump Dump the contents of a full query log
-   help Display help information
-   replay    Replay full query logs
-
-See 'fqltool help <command>' for more information on a specific command.
-
-
-

The fqltool dump command is used to dump (list) the contents of a full query log. Run the fqltool dump command after some CQL statements have been run.

-

The full query logs get listed. Truncated output is as follows:

-
[ec2-user@ip-10-0-2-238 cassandrafullquerylog]$ fqltool dump ./
-WARN  [main] 2019-08-02 03:07:53,635 Slf4jExceptionHandler.java:42 - Using Pauser.sleepy() as not enough processors, have 2, needs 8+
-Type: single-query
-Query start time: 1564708322030
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system.peers
-Values:
-
-Type: single-query
-Query start time: 1564708322054
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system.local WHERE key='local'
-Values:
-
-Type: single-query
-Query start time: 1564708322109
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.keyspaces
-Values:
-
-Type: single-query
-Query start time: 1564708322116
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.tables
-Values:
-
-Type: single-query
-Query start time: 1564708322139
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.columns
-Values:
-
-Type: single-query
-Query start time: 1564708322142
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.functions
-Values:
-
-Type: single-query
-Query start time: 1564708322141
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.aggregates
-Values:
-
-Type: single-query
-Query start time: 1564708322143
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.types
-Values:
-
-Type: single-query
-Query start time: 1564708322144
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.indexes
-Values:
-
-Type: single-query
-Query start time: 1564708322142
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.triggers
-Values:
-
-Type: single-query
-Query start time: 1564708322145
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708322
-Query: SELECT * FROM system_schema.views
-Values:
-
-Type: single-query
-Query start time: 1564708345408
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: CREATE KEYSPACE AuditLogKeyspace
-WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
-Values:
-
-Type: single-query
-Query start time: 1564708345675
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708345
-Query: SELECT peer, rpc_address, schema_version FROM system.peers
-Values:
-
-Type: single-query
-Query start time: 1564708345676
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708345
-Query: SELECT schema_version FROM system.local WHERE key='local'
-Values:
-
-Type: single-query
-Query start time: 1564708346323
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708346
-Query: SELECT * FROM system_schema.keyspaces WHERE keyspace_name = 'auditlogkeyspace'
-Values:
-
-Type: single-query
-Query start time: 1564708360873
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: USE AuditLogKeyspace;
-Values:
-
-Type: single-query
-Query start time: 1564708360874
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: USE "auditlogkeyspace"
-Values:
-
-Type: single-query
-Query start time: 1564708378837
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:-2147483648
-Query: CREATE TABLE t (
-    id int,
-    k int,
-    v text,
-    PRIMARY KEY (id)
-);
-Values:
-
-Type: single-query
-Query start time: 1564708379247
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708379
-Query: SELECT * FROM system_schema.tables WHERE keyspace_name = 'auditlogkeyspace' AND table_name = 't'
-Values:
-
-Type: single-query
-Query start time: 1564708379255
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708379
-Query: SELECT * FROM system_schema.views WHERE keyspace_name = 'auditlogkeyspace' AND view_name = 't'
-Values:
-
-Type: single-query
-Query start time: 1564708397144
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708397
-Query: INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-Values:
-
-Type: single-query
-Query start time: 1564708397167
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708397
-Query: INSERT INTO t (id, k, v) VALUES (0, 1, 'val1');
-Values:
-
-Type: single-query
-Query start time: 1564708434782
-Protocol version: 4
-Generated timestamp:-9223372036854775808
-Generated nowInSeconds:1564708434
-Query: SELECT * FROM t;
-Values:
-
-[ec2-user@ip-10-0-2-238 cassandrafullquerylog]$
-
-
-

Full query logs are generated on each node. Enabling of full query logging on one node and the log files generated on the node are as follows:

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@52.1.243.83
-Last login: Fri Aug  2 00:14:53 2019 from 75.155.255.51
-[ec2-user@ip-10-0-3-206 ~]$ sudo mkdir /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 ~]$ cd /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-3-206 cassandrafullquerylog]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 01:24 20190802-01.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 01:23 directory-listing.cq4t
-[ec2-user@ip-10-0-3-206 cassandrafullquerylog]$
-
-
-

Enabling of full query logging on another node and the log files generated on the node are as follows:

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@3.86.103.229
-Last login: Fri Aug  2 00:13:04 2019 from 75.155.255.51
-[ec2-user@ip-10-0-1-115 ~]$ sudo mkdir /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ sudo chmod -R 777 /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ nodetool enablefullquerylog --path /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 ~]$ cd /tmp/cassandrafullquerylog
-[ec2-user@ip-10-0-1-115 cassandrafullquerylog]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user 83886080 Aug  2 01:24 20190802-01.cq4
--rw-rw-r--. 1 ec2-user ec2-user    65536 Aug  2 01:23 directory-listing.cq4t
-[ec2-user@ip-10-0-1-115 cassandrafullquerylog]$
-
-
-

The nodetool resetfullquerylog resets the full query logger if it is enabled. Also deletes any generated files in the last used full query log path as well as the one configured in cassandra.yaml. It stops the full query log and cleans files in the configured full query log directory from cassandra.yaml as well as JMX.

-
-
-

Full Query Replay

-

The fqltool provides the replay command (CASSANDRA-14618) to replay the full query logs. The FQL replay could be run on a different machine or even a different cluster for testing, debugging and performance benchmarking.

-

The main objectives of fqltool replay are:

-
    -
  • To be able to compare different runs of production traffic against different versions/configurations of Cassandra.
  • -
  • Take FQL logs from several machines and replay them in “order” by the timestamps recorded.
  • -
  • Record the results from each run to be able to compare different runs (against different clusters/versions/etc).
  • -
  • If fqltool replay is run against 2 or more clusters, the results could be compared.
  • -
-

The FQL replay could also be used on the same node on which the full query log are generated to recreate a dropped database object.

-
-
The syntax of fqltool replay is as follows:
-
 fqltool replay [--keyspace <keyspace>] [--results <results>]
-[--store-queries <store_queries>] --target <target>... [--] <path1>
-[<path2>...<pathN>]
-
-OPTIONS
-  --keyspace <keyspace>
- Only replay queries against this keyspace and queries without
- keyspace set.
-
-  --results <results>
- Where to store the results of the queries, this should be a
- directory. Leave this option out to avoid storing results.
-
-  --store-queries <store_queries>
- Path to store the queries executed. Stores queries in the same order
- as the result sets are in the result files. Requires --results
-
-  --target <target>
- Hosts to replay the logs to, can be repeated to replay to more
- hosts.
-
-  --
- This option can be used to separate command-line options from the
- list of argument, (useful when arguments might be mistaken for
- command-line options
-
-  <path1> [<path2>...<pathN>]
- Paths containing the full query logs to replay.
-
-
-

As an example of using fqltool replay, drop a keyspace.

-
cqlsh:auditlogkeyspace> DROP KEYSPACE AuditLogKeyspace;
-
-
-

Subsequently run fqltool replay. The directory to store results of queries and the directory to store the queries run are specified and these directories must be created and permissions set before running fqltool replay. The --results and --store-queries directories are optional but if --store-queries is to be set the --results must also be set.

-
[ec2-user@ip-10-0-2-238 cassandra]$ fqltool replay --keyspace AuditLogKeyspace --results
-/cassandra/fql/logs/results/replay --store-queries /cassandra/fql/logs/queries/replay --
-target 3.91.56.164 -- /tmp/cassandrafullquerylog
-
-
-

Describe the keyspaces after running fqltool replay and the keyspace that was dropped gets listed again.

-
cqlsh:auditlogkeyspace> DESC KEYSPACES;
-
-system_schema  system  system_distributed  system_virtual_schema
-system_auth    auditlogkeyspace  system_traces  system_views
-
-cqlsh:auditlogkeyspace>
-
-
-
-
-

Full Query Compare

-

The fqltool compare command (CASSANDRA-14619) is used to compare result files generated by fqltool replay. The fqltool compare command that can take the recorded runs from fqltool replay and compares them, it should output any differences and potentially all queries against the mismatching partition up until the mismatch.

-

The fqltool compare could be used for comparing result files generated by different versions of Cassandra or different Cassandra configurations as an example. The command usage is as follows:

-
[ec2-user@ip-10-0-2-238 ~]$ fqltool help compare
-NAME
-  fqltool compare - Compare result files generated by fqltool replay
-
-SYNOPSIS
-  fqltool compare --queries <queries> [--] <path1> [<path2>...<pathN>]
-
-OPTIONS
-  --queries <queries>
- Directory to read the queries from. It is produced by the fqltool
- replay --store-queries option.
-
-  --
- This option can be used to separate command-line options from the
- list of argument, (useful when arguments might be mistaken for
- command-line options
-
-  <path1> [<path2>...<pathN>]
- Directories containing result files to compare.
-
-
-

The fqltool compare stores each row as a separate chronicle document to be able to avoid reading up the entire result set in memory when comparing document formats:

-

To mark the start of a new result set:

-
-------------------
-version: int16
-type: column_definitions
-column_count: int32;
-column_definition: text, text
-column_definition: text, text
-....
---------------------
-
-
-

To mark a failed query set:

-
---------------------
-version: int16
-type: query_failed
-message: text
----------------------
-
-
-

To mark a row set:

-
--------------------
-version: int16
-type: row
-row_column_count: int32
-column: bytes
----------------------
-
-
-

To mark the end of a result set:

-
-------------------
-version: int16
-type: end_resultset
--------------------
-
-
-
-
-

Performance Overhead of FQL

-

In performance testing FQL appears to have little or no overhead in WRITE only workloads, and a minor overhead in MIXED workload.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/index.html b/src/doc/4.0-beta1/new/index.html deleted file mode 100644 index a0da680d9..000000000 --- a/src/doc/4.0-beta1/new/index.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "New Features in Apache Cassandra 4.0" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

New Features in Apache Cassandra 4.0

-

This section covers the new features in Apache Cassandra 4.0.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/java11.html b/src/doc/4.0-beta1/new/java11.html deleted file mode 100644 index 074ccf37c..000000000 --- a/src/doc/4.0-beta1/new/java11.html +++ /dev/null @@ -1,354 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Support for Java 11" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Support for Java 11

-

In the new Java release cadence a new Java version is made available every six months. The more frequent release cycle -is favored as it brings new Java features to the developers as and when they are developed without the wait that the -earlier 3 year release model incurred. Not every Java version is a Long Term Support (LTS) version. After Java 8 the -next LTS version is Java 11. Java 9, 10, 12 and 13 are all non-LTS versions.

-

One of the objectives of the Apache Cassandra 4.0 version is to support the recent LTS Java versions 8 and 11 (CASSANDRA-9608). Java 8 and -Java 11 may be used to build and run Apache Cassandra 4.0.

-

Note: Support for JDK 11 in Apache Cassandra 4.0 is an experimental feature, and not recommended for production use.

-
-

Support Matrix

-

The support matrix for the Java versions for compiling and running Apache Cassandra 4.0 is detailed in Table 1. The -build version is along the vertical axis and the run version is along the horizontal axis.

-

Table 1 : Support Matrix for Java

- ----- - - - - - - - - - - - - - - -
 Java 8 (Run)Java 11 (Run)
Java 8 (Build)SupportedSupported
Java 11(Build)Not SupportedSupported
-

Essentially Apache 4.0 source code built with Java 11 cannot be run with Java 8. Next, we shall discuss using each of Java 8 and 11 to build and run Apache Cassandra 4.0.

-
-
-

Using Java 8 to Build

-

To start with, install Java 8. As an example, for installing Java 8 on RedHat Linux the command is as follows:

-
$ sudo yum install java-1.8.0-openjdk-devel
-
-
-

Set JAVA_HOME and JRE_HOME environment variables in the shell bash script. First, open the bash script:

-
$ sudo vi ~/.bashrc
-
-
-

Set the environment variables including the PATH.

-
$ export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

Download and install Apache Cassandra 4.0 source code from the Git along with the dependencies.

-
$ git clone https://github.com/apache/cassandra.git
-
-
-

If Cassandra is already running stop Cassandra with the following command.

-
[ec2-user@ip-172-30-3-146 bin]$ ./nodetool stopdaemon
-
-
-

Build the source code from the cassandra directory, which has the build.xml build script. The Apache Ant uses the Java version set in the JAVA_HOME environment variable.

-
$ cd ~/cassandra
-$ ant
-
-
-

Apache Cassandra 4.0 gets built with Java 8. Set the environment variable for CASSANDRA_HOME in the bash script. Also add the CASSANDRA_HOME/bin to the PATH variable.

-
$ export CASSANDRA_HOME=~/cassandra
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$CASSANDRA_HOME/bin
-
-
-

To run Apache Cassandra 4.0 with either of Java 8 or Java 11 run the Cassandra application in the CASSANDRA_HOME/bin directory, which is in the PATH env variable.

-
$ cassandra
-
-
-

The Java version used to run Cassandra gets output as Cassandra is getting started. As an example if Java 11 is used, the run output should include similar to the following output snippet:

-
INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:480 - Hostname: ip-172-30-3-
-146.ec2.internal:7000:7001
-INFO  [main] 2019-07-31 21:18:16,862 CassandraDaemon.java:487 - JVM vendor/version: OpenJDK
-64-Bit Server VM/11.0.3
-INFO  [main] 2019-07-31 21:18:16,863 CassandraDaemon.java:488 - Heap size:
-1004.000MiB/1004.000MiB
-
-
-

The following output indicates a single node Cassandra 4.0 cluster has started.

-
INFO  [main] 2019-07-31 21:18:19,687 InboundConnectionInitiator.java:130 - Listening on
-address: (127.0.0.1:7000), nic: lo, encryption: enabled (openssl)
-...
-...
-INFO  [main] 2019-07-31 21:18:19,850 StorageService.java:512 - Unable to gossip with any
-peers but continuing anyway since node is in its own seed list
-INFO  [main] 2019-07-31 21:18:19,864 StorageService.java:695 - Loading persisted ring state
-INFO  [main] 2019-07-31 21:18:19,865 StorageService.java:814 - Starting up server gossip
-INFO  [main] 2019-07-31 21:18:20,088 BufferPool.java:216 - Global buffer pool is enabled,
-when pool is exhausted (max is 251.000MiB) it will allocate on heap
-INFO  [main] 2019-07-31 21:18:20,110 StorageService.java:875 - This node will not auto
-bootstrap because it is configured to be a seed node.
-...
-...
-INFO  [main] 2019-07-31 21:18:20,809 StorageService.java:1507 - JOINING: Finish joining ring
-INFO  [main] 2019-07-31 21:18:20,921 StorageService.java:2508 - Node 127.0.0.1:7000 state
-jump to NORMAL
-
-
-
-
-

Using Java 11 to Build

-

If Java 11 is used to build Apache Cassandra 4.0, first Java 11 must be installed and the environment variables set. As an example, to download and install Java 11 on RedHat Linux run the following command.

-
$ yum install java-11-openjdk-devel
-
-
-

Set the environment variables in the bash script for Java 11. The first command is to open the bash script.

-
$ sudo vi ~/.bashrc
-$ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
-$ export JRE_HOME=/usr/lib/jvm/java-11-openjdk/jre
-$ export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
-
-
-

To build source code with Java 11 one of the following two options must be used.

-
-
    -
  1. -
    Include Apache Ant command-line option -Duse.jdk=11 as follows:
    -
    $ ant -Duse.jdk=11
    -
    -
    -
    -
    -
  2. -
  3. -
    Set environment variable CASSANDRA_USE_JDK11 to true:
    -
    $ export CASSANDRA_USE_JDK11=true
    -
    -
    -
    -
    -
  4. -
-
-

As an example, set the environment variable CASSANDRA_USE_JDK11 to true.

-
[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-
-

Or, set the command-line option.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant -Duse.jdk11=true
-
-
-

The build output should include the following.

-
_build_java:
-    [echo] Compiling for Java 11
-...
-...
-build:
-
-_main-jar:
-         [copy] Copying 1 file to /home/ec2-user/cassandra/build/classes/main/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/apache-cassandra-4.0-SNAPSHOT.jar
-...
-...
-_build-test:
-   [javac] Compiling 739 source files to /home/ec2-user/cassandra/build/test/classes
-    [copy] Copying 25 files to /home/ec2-user/cassandra/build/test/classes
-...
-...
-jar:
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/stress/META-INF
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/tools/lib
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/stress.jar
-   [mkdir] Created dir: /home/ec2-user/cassandra/build/classes/fqltool/META-INF
-     [jar] Building jar: /home/ec2-user/cassandra/build/tools/lib/fqltool.jar
-
-BUILD SUCCESSFUL
-Total time: 1 minute 3 seconds
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-
-
-

Common Issues

-

One of the two options mentioned must be used to compile with JDK 11 or the build fails and the following error message is output.

-
[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:293: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true must
-be set when building from java 11
-Total time: 1 second
-[ec2-user@ip-172-30-3-146 cassandra]$
-
-
-

The Java 11 built Apache Cassandra 4.0 source code may be run with Java 11 only. If a Java 11 built code is run with Java 8 the following error message gets output.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 20:47:26 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-[ec2-user@ip-172-30-3-146 ~]$ cassandra
-...
-...
-Error: A JNI error has occurred, please check your installation and try again
-Exception in thread "main" java.lang.UnsupportedClassVersionError:
-org/apache/cassandra/service/CassandraDaemon has been compiled by a more recent version of
-the Java Runtime (class file version 55.0), this version of the Java Runtime only recognizes
-class file versions up to 52.0
-  at java.lang.ClassLoader.defineClass1(Native Method)
-  at java.lang.ClassLoader.defineClass(ClassLoader.java:763)
-  at ...
-...
-
-
-

The CASSANDRA_USE_JDK11 variable or the command-line option -Duse.jdk11 cannot be used to build with Java 8. To demonstrate set JAVA_HOME to version 8.

-
[root@localhost ~]# ssh -i cassandra.pem ec2-user@ec2-3-85-85-75.compute-1.amazonaws.com
-Last login: Wed Jul 31 21:41:50 2019 from 75.155.255.51
-[ec2-user@ip-172-30-3-146 ~]$ echo $JAVA_HOME
-/usr/lib/jvm/java-1.8.0-openjdk
-
-
-

Set the CASSANDRA_USE_JDK11=true or command-line option -Duse.jdk11=true. Subsequently, run Apache Ant to start the build. The build fails with error message listed.

-
[ec2-user@ip-172-30-3-146 ~]$ cd
-cassandra
-[ec2-user@ip-172-30-3-146 cassandra]$ export CASSANDRA_USE_JDK11=true
-[ec2-user@ip-172-30-3-146 cassandra]$ ant
-Buildfile: /home/ec2-user/cassandra/build.xml
-
-validate-build-conf:
-
-BUILD FAILED
-/home/ec2-user/cassandra/build.xml:285: -Duse.jdk11=true or $CASSANDRA_USE_JDK11=true cannot
-be set when building from java 8
-
-Total time: 0 seconds
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/messaging.html b/src/doc/4.0-beta1/new/messaging.html deleted file mode 100644 index 56cb94c7d..000000000 --- a/src/doc/4.0-beta1/new/messaging.html +++ /dev/null @@ -1,344 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Improved Internode Messaging" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Improved Internode Messaging

-

Apache Cassandra 4.0 has added several new improvements to internode messaging.

-
-

Optimized Internode Messaging Protocol

-

The internode messaging protocol has been optimized (CASSANDRA-14485). Previously the IPAddressAndPort of the sender was included with each message that was sent even though the IPAddressAndPort had already been sent once when the initial connection/session was established. In Cassandra 4.0 IPAddressAndPort has been removed from every separate message sent and only sent when connection/session is initiated.

-

Another improvement is that at several instances (listed) a fixed 4-byte integer value has been replaced with vint as a vint is almost always less than 1 byte:

-
    -
  • The paramSize (the number of parameters in the header)
  • -
  • Each individual parameter value
  • -
  • The payloadSize
  • -
-
-
-

NIO Messaging

-

In Cassandra 4.0 peer-to-peer (internode) messaging has been switched to non-blocking I/O (NIO) via Netty (CASSANDRA-8457).

-

As serialization format, each message contains a header with several fixed fields, an optional key-value parameters section, and then the message payload itself. Note: the IP address in the header may be either IPv4 (4 bytes) or IPv6 (16 bytes).

-
-
The diagram below shows the IPv4 address for brevity.
-
           1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4 5 5 5 5 5 6 6
- 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2 4 6 8 0 2
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                       PROTOCOL MAGIC                          |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Message ID                            |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                         Timestamp                             |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|  Addr len |           IP Address (IPv4)                       /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |                 Verb                              /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |            Parameters size                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/           |             Parameter data                        /
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                        Payload size                           |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-|                                                               /
-/                           Payload                             /
-/                                                               |
-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-
-

An individual parameter has a String key and a byte array value. The key is serialized with its length, encoded as two bytes, followed by the UTF-8 byte encoding of the string. The body is serialized with its length, encoded as four bytes, followed by the bytes of the value.

-
-
-

Resource limits on Queued Messages

-

System stability is improved by enforcing strict resource limits (CASSANDRA-15066) on the number of outbound messages that are queued, measured by the serializedSize of the message. There are three separate limits imposed simultaneously to ensure that progress is always made without any reasonable combination of failures impacting a node’s stability.

-
    -
  1. Global, per-endpoint and per-connection limits are imposed on messages queued for delivery to other nodes and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire size of the message being sent or received.
  2. -
  3. The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. Each node-pair has three links: urgent, small and large. So any given node may have a maximum of N*3 * (internode_application_send_queue_capacity_in_bytes + internode_application_receive_queue_capacity_in_bytes) messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens nodes should need to communicate with significant bandwidth.
  4. -
  5. The per-endpoint limit is imposed on all messages exceeding the per-link limit, simultaneously with the global limit, on all links to or from a single node in the cluster. The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, on all links to or from any node in the cluster. The following configuration settings have been added to cassandra.yaml for resource limits on queued messages.
  6. -
-
internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB
-internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728  #128MiB
-internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912    #512MiB
-internode_application_receive_queue_capacity_in_bytes: 4194304                  #4MiB
-internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB
-internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912   #512MiB
-
-
-
-
-

Virtual Tables for Messaging Metrics

-

Metrics is improved by keeping metrics using virtual tables for inter-node inbound and outbound messaging (CASSANDRA-15066). For inbound messaging a virtual table (internode_inbound) has been added to keep metrics for:

-
    -
  • Bytes and count of messages that could not be serialized or flushed due to an error
  • -
  • Bytes and count of messages scheduled
  • -
  • Bytes and count of messages successfully processed
  • -
  • Bytes and count of messages successfully received
  • -
  • Nanos and count of messages throttled
  • -
  • Bytes and count of messages expired
  • -
  • Corrupt frames recovered and unrecovered
  • -
-

A separate virtual table (internode_outbound) has been added for outbound inter-node messaging. The outbound virtual table keeps metrics for:

-
    -
  • Bytes and count of messages pending
  • -
  • Bytes and count of messages sent
  • -
  • Bytes and count of messages expired
  • -
  • Bytes and count of messages that could not be sent due to an error
  • -
  • Bytes and count of messages overloaded
  • -
  • Active Connection Count
  • -
  • Connection Attempts
  • -
  • Successful Connection Attempts
  • -
-
-
-

Hint Messaging

-

A specialized version of hint message that takes an already encoded in a ByteBuffer hint and sends it verbatim has been added. It is an optimization for when dispatching a hint file of the current messaging version to a node of the same messaging version, which is the most common case. It saves on extra ByteBuffer allocations one redundant hint deserialization-serialization cycle.

-
-
-

Internode Application Timeout

-

A configuration setting has been added to cassandra.yaml for the maximum continuous period a connection may be unwritable in application space.

-
# internode_application_timeout_in_ms = 30000
-
-
-

Some other new features include logging of message size to trace message for tracing a query.

-
-
-

Paxos prepare and propose stage for local requests optimized

-

In pre-4.0 Paxos prepare and propose messages always go through entire MessagingService stack in Cassandra even if request is to be served locally, we can enhance and make local requests severed w/o involving MessagingService. Similar things are done elsewhere in Cassandra which skips MessagingService stage for local requests.

-

This is what it looks like in pre 4.0 if we have tracing on and run a light-weight transaction:

-
Sending PAXOS_PREPARE message to /A.B.C.D [MessagingService-Outgoing-/A.B.C.D] | 2017-09-11
-21:55:18.971000 | A.B.C.D | 15045
-… REQUEST_RESPONSE message received from /A.B.C.D [MessagingService-Incoming-/A.B.C.D] |
-2017-09-11 21:55:18.976000 | A.B.C.D | 20270
-… Processing response from /A.B.C.D [SharedPool-Worker-4] | 2017-09-11 21:55:18.976000 |
-A.B.C.D | 20372
-
-
-

Same thing applies for Propose stage as well.

-

In version 4.0 Paxos prepare and propose stage for local requests are optimized (CASSANDRA-13862).

-
-
-

Quality Assurance

-

Several other quality assurance improvements have been made in version 4.0 (CASSANDRA-15066).

-
-

Framing

-

Version 4.0 introduces framing to all internode messages, i.e. the grouping of messages into a single logical payload with headers and trailers; these frames are guaranteed to either contain at most one message, that is split into its own unique sequence of frames (for large messages), or that a frame contains only complete messages.

-
-
-

Corruption prevention

-

Previously, intra-datacenter internode messages would be unprotected from corruption by default, as only LZ4 provided any integrity checks. All messages to post 4.0 nodes are written to explicit frames, which may be:

-
    -
  • LZ4 encoded
  • -
  • CRC protected
  • -
-

The Unprotected option is still available.

-
-
-

Resilience

-

For resilience, all frames are written with a separate CRC protected header, of 8 and 6 bytes respectively. If corruption occurs in this header, the connection must be reset, as before. If corruption occurs anywhere outside of the header, the corrupt frame will be skipped, leaving the connection intact and avoiding the loss of any messages unnecessarily.

-

Previously, any issue at any point in the stream would result in the connection being reset, with the loss of any in-flight messages.

-
-
-

Efficiency

-

The overall memory usage, and number of byte shuffles, on both inbound and outbound messages is reduced.

-

Outbound the Netty LZ4 encoder maintains a chunk size buffer (64KiB), that is filled before any compressed frame can be produced. Our frame encoders avoid this redundant copy, as well as freeing 192KiB per endpoint.

-

Inbound, frame decoders guarantee only to copy the number of bytes necessary to parse a frame, and to never store more bytes than necessary. This improvement applies twice to LZ4 connections, improving both the message decode and the LZ4 frame decode.

-
-
-

Inbound Path

-

Version 4.0 introduces several improvements to the inbound path.

-

An appropriate message handler is used based on whether large or small messages are expected on a particular connection as set in a flag. NonblockingBufferHandler, running on event loop, is used for small messages, and BlockingBufferHandler, running off event loop, for large messages. The single implementation of InboundMessageHandler handles messages of any size effectively by deriving size of the incoming message from the byte stream. In addition to deriving size of the message from the stream, incoming message expiration time is proactively read, before attempting to deserialize the entire message. If it’s expired at the time when a message is encountered the message is just skipped in the byte stream altogether. -And if a message fails to be deserialized while still on the receiving side - say, because of table id or column being unknown - bytes are skipped, without dropping the entire connection and losing all the buffered messages. An immediately reply back is sent to the coordinator node with the failure reason, rather than waiting for the coordinator callback to expire. This logic is extended to a corrupted frame; a corrupted frame is safely skipped over without dropping the connection.

-

Inbound path imposes strict limits on memory utilization. Specifically, the memory occupied by all parsed, but unprocessed messages is bound - on per-connection, per-endpoint, and global basis. Once a connection exceeds its local unprocessed capacity and cannot borrow any permits from per-endpoint and global reserve, it simply stops processing further messages, providing natural backpressure - until sufficient capacity is regained.

-
-
-

Outbound Connections

-
-

Opening a connection

-

A consistent approach is adopted for all kinds of failure to connect, including: refused by endpoint, incompatible versions, or unexpected exceptions;

-
    -
  • Retry forever, until either success or no messages waiting to deliver.
  • -
  • Wait incrementally longer periods before reconnecting, up to a maximum of 1s.
  • -
  • While failing to connect, no reserve queue limits are acquired.
  • -
-
-
-

Closing a connection

-
    -
  • Correctly drains outbound messages that are waiting to be delivered (unless disconnected and fail to reconnect).
  • -
  • Messages written to a closing connection are either delivered or rejected, with a new connection being opened if the old is irrevocably closed.
  • -
  • Unused connections are pruned eventually.
  • -
-
-
-

Reconnecting

-

We sometimes need to reconnect a perfectly valid connection, e.g. if the preferred IP address changes. We ensure that the underlying connection has no in-progress operations before closing it and reconnecting.

-
-
-

Message Failure

-

Propagates to callbacks instantly, better preventing overload by reclaiming committed memory.

-
-
Expiry
-
    -
  • No longer experiences head-of-line blocking (e.g. undroppable message preventing all droppable messages from being expired).
  • -
  • While overloaded, expiry is attempted eagerly on enqueuing threads.
  • -
  • While disconnected we schedule regular pruning, to handle the case where messages are no longer being sent, but we have a large backlog to expire.
  • -
-
-
-
Overload
-
    -
  • Tracked by bytes queued, as opposed to number of messages.
  • -
-
-
-
Serialization Errors
-
    -
  • Do not result in the connection being invalidated; the message is simply completed with failure, and then erased from the frame.
  • -
  • Includes detected mismatch between calculated serialization size to actual.
  • -
-

Failures to flush to network, perhaps because the connection has been reset are not currently notified to callback handlers, as the necessary information has been discarded, though it would be possible to do so in future if we decide it is worth our while.

-
-
-
-

QoS

-

“Gossip” connection has been replaced with a general purpose “Urgent” connection, for any small messages impacting system stability.

-
-
-

Metrics

-

We track, and expose via Virtual Table and JMX, the number of messages and bytes that: we could not serialize or flush due to an error, we dropped due to overload or timeout, are pending, and have successfully sent.

-
-
-
-
-

Added a Message size limit

-

Cassandra pre-4.0 doesn’t protect the server from allocating huge buffers for the inter-node Message objects. Adding a message size limit would be good to deal with issues such as a malfunctioning cluster participant. Version 4.0 introduced max message size config param, akin to max mutation size - set to endpoint reserve capacity by default.

-
-
-

Recover from unknown table when deserializing internode messages

-

As discussed in (CASSANDRA-9289) it would be nice to gracefully recover from seeing an unknown table in a message from another node. Pre-4.0, we close the connection and reconnect, which can cause other concurrent queries to fail. -Version 4.0 fixes the issue by wrapping message in-stream with -TrackedDataInputPlus, catching -UnknownCFException, and skipping the remaining bytes in this message. TCP won’t be closed and it will remain connected for other messages.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/streaming.html b/src/doc/4.0-beta1/new/streaming.html deleted file mode 100644 index 0cf6c7ab8..000000000 --- a/src/doc/4.0-beta1/new/streaming.html +++ /dev/null @@ -1,260 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Improved Streaming" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Improved Streaming

-

Apache Cassandra 4.0 has made several improvements to streaming. Streaming is the process used by nodes of a cluster to exchange data in the form of SSTables. Streaming of SSTables is performed for several operations, such as:

-
    -
  • SSTable Repair
  • -
  • Host Replacement
  • -
  • Range movements
  • -
  • Bootstrapping
  • -
  • Rebuild
  • -
  • Cluster expansion
  • -
-
-

Streaming based on Netty

-

Streaming in Cassandra 4.0 is based on Non-blocking Input/Output (NIO) with Netty (CASSANDRA-12229). It replaces the single-threaded (or sequential), synchronous, blocking model of streaming messages and transfer of files. Netty supports non-blocking, asynchronous, multi-threaded streaming with which multiple connections are opened simultaneously. Non-blocking implies that threads are not blocked as they don’t wait for a response for a sent request. A response could be returned in a different thread. With asynchronous, connections and threads are decoupled and do not have a 1:1 relation. Several more connections than threads may be opened.

-
-
-

Zero Copy Streaming

-

Pre-4.0, during streaming Cassandra reifies the SSTables into objects. This creates unnecessary garbage and slows down the whole streaming process as some SSTables can be transferred as a whole file rather than individual partitions. Cassandra 4.0 has added support for streaming entire SSTables when possible (CASSANDRA-14556) for faster Streaming using ZeroCopy APIs. If enabled, Cassandra will use ZeroCopy for eligible SSTables significantly speeding up transfers and increasing throughput. A zero-copy path avoids bringing data into user-space on both sending and receiving side. Any streaming related operations will notice corresponding improvement. Zero copy streaming is hardware bound; only limited by the hardware limitations (Network and Disk IO ).

-
-

High Availability

-

In benchmark tests Zero Copy Streaming is 5x faster than partitions based streaming. Faster streaming provides the benefit of improved availability. A cluster’s recovery mainly depends on the streaming speed, Cassandra clusters with failed nodes will be able to recover much more quickly (5x faster). If a node fails, SSTables need to be streamed to a replacement node. During the replacement operation, the new Cassandra node streams SSTables from the neighboring nodes that hold copies of the data belonging to this new node’s token range. Depending on the amount of data stored, this process can require substantial network bandwidth, taking some time to complete. The longer these range movement operations take, the more the cluster availability is lost. Failure of multiple nodes would reduce high availability greatly. The faster the new node completes streaming its data, the faster it can serve traffic, increasing the availability of the cluster.

-
-
-

Enabling Zero Copy Streaming

-

Zero copy streaming is enabled by setting the following setting in cassandra.yaml.

-
stream_entire_sstables: true
-
-
-

By default zero copy streaming is enabled.

-
-
-

SSTables Eligible for Zero Copy Streaming

-

Zero copy streaming is used if all partitions within the SSTable need to be transmitted. This is common when using LeveledCompactionStrategy or when partitioning SSTables by token range has been enabled. All partition keys in the SSTables are iterated over to determine the eligibility for Zero Copy streaming.

-
-
-

Benefits of Zero Copy Streaming

-

When enabled, it permits Cassandra to zero-copy stream entire eligible SSTables between nodes, including every component. This speeds up the network transfer significantly subject to throttling specified by stream_throughput_outbound_megabits_per_sec.

-

Enabling this will reduce the GC pressure on sending and receiving node. While this feature tries to keep the disks balanced, it cannot guarantee it. This feature will be automatically disabled if internode encryption is enabled. Currently this can be used with Leveled Compaction.

-
-
-

Configuring for Zero Copy Streaming

-

Throttling would reduce the streaming speed. The stream_throughput_outbound_megabits_per_sec throttles all outbound streaming file transfers on a node to the given total throughput in Mbps. When unset, the default is 200 Mbps or 25 MB/s.

-
stream_throughput_outbound_megabits_per_sec: 200
-
-
-

To run any Zero Copy streaming benchmark the stream_throughput_outbound_megabits_per_sec must be set to a really high value otherwise, throttling will be significant and the benchmark results will not be meaningful.

-

The inter_dc_stream_throughput_outbound_megabits_per_sec throttles all streaming file transfer between the datacenters, this setting allows users to throttle inter dc stream throughput in addition to throttling all network stream traffic as configured with stream_throughput_outbound_megabits_per_sec. When unset, the default is 200 Mbps or 25 MB/s.

-
inter_dc_stream_throughput_outbound_megabits_per_sec: 200
-
-
-
-
-

SSTable Components Streamed with Zero Copy Streaming

-

Zero Copy Streaming streams entire SSTables. SSTables are made up of multiple components in separate files. SSTable components streamed are listed in Table 1.

-

Table 1. SSTable Components

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SSTable ComponentDescription
Data.dbThe base data for an SSTable: the remaining -components can be regenerated based on the data -component.
Index.dbIndex of the row keys with pointers to their -positions in the data file.
Filter.dbSerialized bloom filter for the row keys in the -SSTable.
CompressionInfo.dbFile to hold information about uncompressed -data length, chunk offsets etc.
Statistics.dbStatistical metadata about the content of the -SSTable.
Digest.crc32Holds CRC32 checksum of the data file -size_bytes.
CRC.dbHolds the CRC32 for chunks in an uncompressed file.
Summary.dbHolds SSTable Index Summary -(sampling of Index component)
TOC.txtTable of contents, stores the list of all -components for the SSTable.
-

Custom component, used by e.g. custom compaction strategy may also be included.

-
-
-
-

Repair Streaming Preview

-

Repair with nodetool repair involves streaming of repaired SSTables and a repair preview has been added to provide an estimate of the amount of repair streaming that would need to be performed. Repair preview (CASSANDRA-13257) is invoke with nodetool repair --preview using option:

-
-prv, --preview
-
-
-

It determines ranges and amount of data to be streamed, but doesn’t actually perform repair.

-
-
-

Parallelizing of Streaming of Keyspaces

-

The streaming of the different keyspaces for bootstrap and rebuild has been parallelized in Cassandra 4.0 (CASSANDRA-4663).

-
-
-

Unique nodes for Streaming in Multi-DC deployment

-

Range Streamer picks unique nodes to stream data from when number of replicas in each DC is three or more (CASSANDRA-4650). What the optimization does is to even out the streaming load across the cluster. Without the optimization, some node can be picked up to stream more data than others. This patch allows to select dedicated node to stream only one range.

-

This will increase the performance of bootstrapping a node and will also put less pressure on nodes serving the data. This does not affect if N < 3 in each DC as then it streams data from only 2 nodes.

-
-
-

Stream Operation Types

-

It is important to know the type or purpose of a certain stream. Version 4.0 (CASSANDRA-13064) adds an enum to distinguish between the different types of streams. Stream types are available both in a stream request and a stream task. The different stream types are:

-
    -
  • Restore replica count
  • -
  • Unbootstrap
  • -
  • Relocation
  • -
  • Bootstrap
  • -
  • Rebuild
  • -
  • Bulk Load
  • -
  • Repair
  • -
-
-
-

Disallow Decommission when number of Replicas will drop below configured RF

-

CASSANDRA-12510 guards against decommission that will drop # of replicas below configured replication factor (RF), and adds the --force option that allows decommission to continue if intentional; force decommission of this node even when it reduces the number of replicas to below configured RF.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/transientreplication.html b/src/doc/4.0-beta1/new/transientreplication.html deleted file mode 100644 index 2f83ff97b..000000000 --- a/src/doc/4.0-beta1/new/transientreplication.html +++ /dev/null @@ -1,228 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Transient Replication" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Transient Replication

-

Note:

-

Transient Replication (CASSANDRA-14404) is an experimental feature designed for expert Apache Cassandra users who are able to validate every aspect of the database for their application and deployment. -That means being able to check that operations like reads, writes, decommission, remove, rebuild, repair, and replace all work with your queries, data, configuration, operational practices, and availability requirements. -Apache Cassandra 4.0 has the initial implementation of transient replication. Future releases of Cassandra will make this feature suitable for a wider audience. -It is anticipated that a future version will support monotonic reads with transient replication as well as LWT, logged batches, and counters. Being experimental, Transient replication is not recommended for production use.

-
-

Objective

-

The objective of transient replication is to decouple storage requirements from data redundancy (or consensus group size) using incremental repair, in order to reduce storage overhead. -Certain nodes act as full replicas (storing all the data for a given token range), and some nodes act as transient replicas, storing only unrepaired data for the same token ranges.

-

The optimization that is made possible with transient replication is called “Cheap quorums”, which implies that data redundancy is increased without corresponding increase in storage usage.

-

Transient replication is useful when sufficient full replicas are unavailable to receive and store all the data. -Transient replication allows you to configure a subset of replicas to only replicate data that hasn’t been incrementally repaired. -As an optimization, we can avoid writing data to a transient replica if we have successfully written data to the full replicas.

-

After incremental repair, transient data stored on transient replicas can be discarded.

-
-
-

Enabling Transient Replication

-

Transient replication is not enabled by default. Transient replication must be enabled on each node in a cluster separately by setting the following configuration property in cassandra.yaml.

-
enable_transient_replication: true
-
-
-

Transient replication may be configured with both SimpleStrategy and NetworkTopologyStrategy. Transient replication is configured by setting replication factor as <total_replicas>/<transient_replicas>.

-

As an example, create a keyspace with replication factor (RF) 3.

-
CREATE KEYSPACE CassandraKeyspaceSimple WITH replication = {'class': 'SimpleStrategy',
-'replication_factor' : 4/1};
-
-
-

As another example, some_keysopace keyspace will have 3 replicas in DC1, 1 of which is transient, and 5 replicas in DC2, 2 of which are transient:

-
CREATE KEYSPACE some_keysopace WITH replication = {'class': 'NetworkTopologyStrategy',
-'DC1' : '3/1'', 'DC2' : '5/2'};
-
-
-

Transiently replicated keyspaces only support tables with read_repair set to NONE.

-

Important Restrictions:

-
    -
  • RF cannot be altered while some endpoints are not in a normal state (no range movements).
  • -
  • You can’t add full replicas if there are any transient replicas. You must first remove all transient replicas, then change the # of full replicas, then add back the transient replicas.
  • -
  • You can only safely increase number of transients one at a time with incremental repair run in between each time.
  • -
-

Additionally, transient replication cannot be used for:

-
    -
  • Monotonic Reads
  • -
  • Lightweight Transactions (LWTs)
  • -
  • Logged Batches
  • -
  • Counters
  • -
  • Keyspaces using materialized views
  • -
  • Secondary indexes (2i)
  • -
-
-
-

Cheap Quorums

-

Cheap quorums are a set of optimizations on the write path to avoid writing to transient replicas unless sufficient full replicas are not available to satisfy the requested consistency level. -Hints are never written for transient replicas. Optimizations on the read path prefer reading from transient replicas. -When writing at quorum to a table configured to use transient replication the quorum will always prefer available full -replicas over transient replicas so that transient replicas don’t have to process writes. Tail latency is reduced by -rapid write protection (similar to rapid read protection) when full replicas are slow or unavailable by sending writes -to transient replicas. Transient replicas can serve reads faster as they don’t have to do anything beyond bloom filter -checks if they have no data. With vnodes and large cluster sizes they will not have a large quantity of data -even for failure of one or more full replicas where transient replicas start to serve a steady amount of write traffic -for some of their transiently replicated ranges.

-
-
-

Speculative Write Option

-

The CREATE TABLE adds an option speculative_write_threshold for use with transient replicas. The option is of type simple with default value as 99PERCENTILE. When replicas are slow or unresponsive speculative_write_threshold specifies the threshold at which a cheap quorum write will be upgraded to include transient replicas.

-
-
-

Pending Ranges and Transient Replicas

-

Pending ranges refers to the movement of token ranges between transient replicas. When a transient range is moved, there -will be a period of time where both transient replicas would need to receive any write intended for the logical -transient replica so that after the movement takes effect a read quorum is able to return a response. Nodes are not -temporarily transient replicas during expansion. They stream data like a full replica for the transient range before they -can serve reads. A pending state is incurred similar to how there is a pending state for full replicas. Transient replicas -also always receive writes when they are pending. Pending transient ranges are sent a bit more data and reading from -them is avoided.

-
-
-

Read Repair and Transient Replicas

-

Read repair never attempts to repair a transient replica. Reads will always include at least one full replica. -They should also prefer transient replicas where possible. Range scans ensure the entire scanned range performs -replica selection that satisfies the requirement that every range scanned includes one full replica. During incremental -& validation repair handling, at transient replicas anti-compaction does not output any data for transient ranges as the -data will be dropped after repair, and transient replicas never have data streamed to them.

-
-
-

Transitioning between Full Replicas and Transient Replicas

-

The additional state transitions that transient replication introduces requires streaming and nodetool cleanup to -behave differently. When data is streamed it is ensured that it is streamed from a full replica and not a transient replica.

-

Transitioning from not replicated to transiently replicated means that a node must stay pending until the next incremental -repair completes at which point the data for that range is known to be available at full replicas.

-

Transitioning from transiently replicated to fully replicated requires streaming from a full replica and is identical -to how data is streamed when transitioning from not replicated to replicated. The transition is managed so the transient -replica is not read from as a full replica until streaming completes. It can be used immediately for a write quorum.

-

Transitioning from fully replicated to transiently replicated requires cleanup to remove repaired data from the transiently -replicated range to reclaim space. It can be used immediately for a write quorum.

-

Transitioning from transiently replicated to not replicated requires cleanup to be run to remove the formerly transiently replicated data.

-

When transient replication is in use ring changes are supported including add/remove node, change RF, add/remove DC.

-
-
-

Transient Replication supports EACH_QUORUM

-

(CASSANDRA-14727) adds support for Transient Replication support for EACH_QUORUM. Per (CASSANDRA-14768), we ensure we write to at least a QUORUM of nodes in every DC, -regardless of how many responses we need to wait for and our requested consistency level. This is to minimally surprise -users with transient replication; with normal writes, we soft-ensure that we reach QUORUM in all DCs we are able to, -by writing to every node; even if we don’t wait for ACK, we have in both cases sent sufficient messages.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/new/virtualtables.html b/src/doc/4.0-beta1/new/virtualtables.html deleted file mode 100644 index fe3bc959b..000000000 --- a/src/doc/4.0-beta1/new/virtualtables.html +++ /dev/null @@ -1,427 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "New Features in Apache Cassandra 4.0" - -doc-title: "Virtual Tables" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Virtual Tables

-

Apache Cassandra 4.0 implements virtual tables (CASSANDRA-7622).

-
-

Definition

-

A virtual table is a table that is backed by an API instead of data explicitly managed and stored as SSTables. Apache Cassandra 4.0 implements a virtual keyspace interface for virtual tables. Virtual tables are specific to each node.

-
-
-

Objective

-

A virtual table could have several uses including:

-
    -
  • Expose metrics through CQL
  • -
  • Expose YAML configuration information
  • -
-
-
-

How are Virtual Tables different from regular tables?

-

Virtual tables and virtual keyspaces are quite different from regular tables and keyspaces respectively such as:

-
    -
  • Virtual tables are read-only, but it is likely to change
  • -
  • Virtual tables are not replicated
  • -
  • Virtual tables are local only and non distributed
  • -
  • Virtual tables have no associated SSTables
  • -
  • Consistency level of the queries sent virtual tables are ignored
  • -
  • Virtual tables are managed by Cassandra and a user cannot run DDL to create new virtual tables or DML to modify existing virtual tables
  • -
  • Virtual tables are created in special keyspaces and not just any keyspace
  • -
  • All existing virtual tables use LocalPartitioner. Since a virtual table is not replicated the partitioner sorts in order of partition keys instead of by their hash.
  • -
  • Making advanced queries with ALLOW FILTERING and aggregation functions may be used with virtual tables even though in normal tables we dont recommend it
  • -
-
-
-

Virtual Keyspaces

-

Apache Cassandra 4.0 has added two new keyspaces for virtual tables: system_virtual_schema and system_views. Run the following command to list the keyspaces:

-
cqlsh> DESC KEYSPACES;
-system_schema  system       system_distributed  system_virtual_schema
-system_auth      system_traces       system_views
-
-
-

The system_virtual_schema keyspace contains schema information on virtual tables. The system_views keyspace contains the actual virtual tables.

-
-
-

Virtual Table Limitations

-

Virtual tables and virtual keyspaces have some limitations initially though some of these could change such as:

-
    -
  • Cannot alter or drop virtual keyspaces or tables
  • -
  • Cannot truncate virtual tables
  • -
  • Expiring columns are not supported by virtual tables
  • -
  • Conditional updates are not supported by virtual tables
  • -
  • Cannot create tables in virtual keyspaces
  • -
  • Cannot perform any operations against virtual keyspace
  • -
  • Secondary indexes are not supported on virtual tables
  • -
  • Cannot create functions in virtual keyspaces
  • -
  • Cannot create types in virtual keyspaces
  • -
  • Materialized views are not supported on virtual tables
  • -
  • Virtual tables don’t support DELETE statements
  • -
  • Cannot CREATE TRIGGER against a virtual table
  • -
  • Conditional BATCH statements cannot include mutations for virtual tables
  • -
  • Cannot include a virtual table statement in a logged batch
  • -
  • Mutations for virtual and regular tables cannot exist in the same batch
  • -
  • Conditional BATCH statements cannot include mutations for virtual tables
  • -
  • Cannot create aggregates in virtual keyspaces; but may run aggregate functions on select
  • -
-
-
-

Listing and Describing Virtual Tables

-

Virtual tables in a virtual keyspace may be listed with DESC TABLES. The system_views virtual keyspace tables include the following:

-
cqlsh> USE system_views;
-cqlsh:system_views> DESC TABLES;
-coordinator_scans   clients             tombstones_scanned  internode_inbound
-disk_usage          sstable_tasks       live_scanned        caches
-local_writes        max_partition_size  local_reads
-coordinator_writes  internode_outbound  thread_pools
-local_scans         coordinator_reads   settings
-
-
-

Some of the salient virtual tables in system_views virtual keyspace are described in Table 1.

-

Table 1 : Virtual Tables in system_views

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Virtual TableDescription
clientsLists information about all connected clients.
disk_usageDisk usage including disk_space, keyspace_name, -and table_name by system keyspaces.
local_writesA table metric for local writes -including count, keyspace_name, -max, median, per_second, and -table_name.
cachesDisplays the general cache information including -cache name, capacity_bytes, entry_count, hit_count, -hit_ratio double, recent_hit_rate_per_second, -recent_request_rate_per_second, request_count, and -size_bytes.
local_readsA table metric for local reads information.
sstable_tasksLists currently running tasks such as compactions -and upgrades on SSTables.
internode_inboundLists information about the inbound -internode messaging.
thread_poolsLists metrics for each thread pool.
settingsDisplays configuration settings in cassandra.yaml.
max_partition_sizeA table metric for maximum partition size.
internode_outboundInformation about the outbound internode messaging.
-

We shall discuss some of the virtual tables in more detail next.

-
-

Clients Virtual Table

-

The clients virtual table lists all active connections (connected clients) including their ip address, port, connection stage, driver name, driver version, hostname, protocol version, request count, ssl enabled, ssl protocol and user name:

-
cqlsh:system_views> select * from system_views.clients;
- address   | port  | connection_stage | driver_name | driver_version | hostname  | protocol_version | request_count | ssl_cipher_suite | ssl_enabled | ssl_protocol | username
------------+-------+------------------+-------------+----------------+-----------+------------------+---------------+------------------+-------------+--------------+-----------
- 127.0.0.1 | 50628 |            ready |        null |           null | localhost |                4 |            55 |             null |       False |         null | anonymous
- 127.0.0.1 | 50630 |            ready |        null |           null | localhost |                4 |            70 |             null |       False |         null | anonymous
-
-(2 rows)
-
-
-

Some examples of how clients can be used are:

-
    -
  • To find applications using old incompatible versions of drivers before upgrading and with nodetool enableoldprotocolversions and nodetool disableoldprotocolversions during upgrades.
  • -
  • To identify clients sending too many requests.
  • -
  • To find if SSL is enabled during the migration to and from ssl.
  • -
-

The virtual tables may be described with DESCRIBE statement. The DDL listed however cannot be run to create a virtual table. As an example describe the system_views.clients virtual table:

-
 cqlsh:system_views> DESC TABLE system_views.clients;
-CREATE TABLE system_views.clients (
-   address inet,
-   connection_stage text,
-   driver_name text,
-   driver_version text,
-   hostname text,
-   port int,
-   protocol_version int,
-   request_count bigint,
-   ssl_cipher_suite text,
-   ssl_enabled boolean,
-   ssl_protocol text,
-   username text,
-   PRIMARY KEY (address, port)) WITH CLUSTERING ORDER BY (port ASC)
-   AND compaction = {'class': 'None'}
-   AND compression = {};
-
-
-
-
-

Caches Virtual Table

-

The caches virtual table lists information about the caches. The four caches presently created are chunks, counters, keys and rows. A query on the caches virtual table returns the following details:

-
cqlsh:system_views> SELECT * FROM system_views.caches;
-name     | capacity_bytes | entry_count | hit_count | hit_ratio | recent_hit_rate_per_second | recent_request_rate_per_second | request_count | size_bytes
----------+----------------+-------------+-----------+-----------+----------------------------+--------------------------------+---------------+------------
-  chunks |      229638144 |          29 |       166 |      0.83 |                          5 |                              6 |           200 |     475136
-counters |       26214400 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
-    keys |       52428800 |          14 |       124 |  0.873239 |                          4 |                              4 |           142 |       1248
-    rows |              0 |           0 |         0 |       NaN |                          0 |                              0 |             0 |          0
-
-(4 rows)
-
-
-
-
-

Settings Virtual Table

-

The settings table is rather useful and lists all the current configuration settings from the cassandra.yaml. The encryption options are overridden to hide the sensitive truststore information or passwords. The configuration settings however cannot be set using DML on the virtual table presently:

-
cqlsh:system_views> SELECT * FROM system_views.settings;
-
-name                                 | value
--------------------------------------+--------------------
-  allocate_tokens_for_keyspace       | null
-  audit_logging_options_enabled      | false
-  auto_snapshot                      | true
-  automatic_sstable_upgrade          | false
-  cluster_name                       | Test Cluster
-  enable_transient_replication       | false
-  hinted_handoff_enabled             | true
-  hints_directory                    | /home/ec2-user/cassandra/data/hints
-  incremental_backups                | false
-  initial_token                      | null
-                           ...
-                           ...
-                           ...
-  rpc_address                        | localhost
-  ssl_storage_port                   | 7001
-  start_native_transport             | true
-  storage_port                       | 7000
-  stream_entire_sstables             | true
-  (224 rows)
-
-
-

The settings table can be really useful if yaml file has been changed since startup and dont know running configuration, or to find if they have been modified via jmx/nodetool or virtual tables.

-
-
-

Thread Pools Virtual Table

-

The thread_pools table lists information about all thread pools. Thread pool information includes active tasks, active tasks limit, blocked tasks, blocked tasks all time, completed tasks, and pending tasks. A query on the thread_pools returns following details:

-
cqlsh:system_views> select * from system_views.thread_pools;
-
-name                         | active_tasks | active_tasks_limit | blocked_tasks | blocked_tasks_all_time | completed_tasks | pending_tasks
-------------------------------+--------------+--------------------+---------------+------------------------+-----------------+---------------
-            AntiEntropyStage |            0 |                  1 |             0 |                      0 |               0 |             0
-        CacheCleanupExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
-          CompactionExecutor |            0 |                  2 |             0 |                      0 |             881 |             0
-        CounterMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-                 GossipStage |            0 |                  1 |             0 |                      0 |               0 |             0
-             HintsDispatcher |            0 |                  2 |             0 |                      0 |               0 |             0
-       InternalResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
-         MemtableFlushWriter |            0 |                  2 |             0 |                      0 |               1 |             0
-           MemtablePostFlush |            0 |                  1 |             0 |                      0 |               2 |             0
-       MemtableReclaimMemory |            0 |                  1 |             0 |                      0 |               1 |             0
-              MigrationStage |            0 |                  1 |             0 |                      0 |               0 |             0
-                   MiscStage |            0 |                  1 |             0 |                      0 |               0 |             0
-               MutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-   Native-Transport-Requests |            1 |                128 |             0 |                      0 |             130 |             0
-      PendingRangeCalculator |            0 |                  1 |             0 |                      0 |               1 |             0
-PerDiskMemtableFlushWriter_0 |            0 |                  2 |             0 |                      0 |               1 |             0
-                   ReadStage |            0 |                 32 |             0 |                      0 |              13 |             0
-                 Repair-Task |            0 |         2147483647 |             0 |                      0 |               0 |             0
-        RequestResponseStage |            0 |                  2 |             0 |                      0 |               0 |             0
-                     Sampler |            0 |                  1 |             0 |                      0 |               0 |             0
-    SecondaryIndexManagement |            0 |                  1 |             0 |                      0 |               0 |             0
-          ValidationExecutor |            0 |         2147483647 |             0 |                      0 |               0 |             0
-           ViewBuildExecutor |            0 |                  1 |             0 |                      0 |               0 |             0
-           ViewMutationStage |            0 |                 32 |             0 |                      0 |               0 |             0
-
-
-

(24 rows)

-
-
-

Internode Inbound Messaging Virtual Table

-

The internode_inbound virtual table is for the internode inbound messaging. Initially no internode inbound messaging may get listed. In addition to the address, port, datacenter and rack information includes corrupt frames recovered, corrupt frames unrecovered, error bytes, error count, expired bytes, expired count, processed bytes, processed count, received bytes, received count, scheduled bytes, scheduled count, throttled count, throttled nanos, using bytes, using reserve bytes. A query on the internode_inbound returns following details:

-
cqlsh:system_views> SELECT * FROM system_views.internode_inbound;
-address | port | dc | rack | corrupt_frames_recovered | corrupt_frames_unrecovered |
-error_bytes | error_count | expired_bytes | expired_count | processed_bytes |
-processed_count | received_bytes | received_count | scheduled_bytes | scheduled_count | throttled_count | throttled_nanos | using_bytes | using_reserve_bytes
----------+------+----+------+--------------------------+----------------------------+-
-----------
-(0 rows)
-
-
-
-
-

SSTables Tasks Virtual Table

-

The sstable_tasks could be used to get information about running tasks. It lists following columns:

-
cqlsh:system_views> SELECT * FROM sstable_tasks;
-keyspace_name | table_name | task_id                              | kind       | progress | total    | unit
----------------+------------+--------------------------------------+------------+----------+----------+-------
-       basic |      wide2 | c3909740-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction | 60418761 | 70882110 | bytes
-       basic |      wide2 | c7556770-cdf7-11e9-a8ed-0f03de2d9ae1 | compaction |  2995623 | 40314679 | bytes
-
-
-

As another example, to find how much time is remaining for SSTable tasks, use the following query:

-
SELECT total - progress AS remaining
-FROM system_views.sstable_tasks;
-
-
-
-
-

Other Virtual Tables

-

Some examples of using other virtual tables are as follows.

-

Find tables with most disk usage:

-
cqlsh> SELECT * FROM disk_usage WHERE mebibytes > 1 ALLOW FILTERING;
-
-keyspace_name | table_name | mebibytes
----------------+------------+-----------
-   keyspace1 |  standard1 |       288
-  tlp_stress |   keyvalue |      3211
-
-
-

Find queries on table/s with greatest read latency:

-
cqlsh> SELECT * FROM  local_read_latency WHERE per_second > 1 ALLOW FILTERING;
-
-keyspace_name | table_name | p50th_ms | p99th_ms | count    | max_ms  | per_second
----------------+------------+----------+----------+----------+---------+------------
-  tlp_stress |   keyvalue |    0.043 |    0.152 | 49785158 | 186.563 |  11418.356
-
-
-
-
-
-

The system_virtual_schema keyspace

-

The system_virtual_schema keyspace has three tables: keyspaces, columns and tables for the virtual keyspace definitions, virtual table definitions, and virtual column definitions respectively. It is used by Cassandra internally and a user would not need to access it directly.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/objects.inv b/src/doc/4.0-beta1/objects.inv deleted file mode 100644 index cedfa463b..000000000 Binary files a/src/doc/4.0-beta1/objects.inv and /dev/null differ diff --git a/src/doc/4.0-beta1/operating/audit_logging.html b/src/doc/4.0-beta1/operating/audit_logging.html deleted file mode 100644 index 0c2c6d596..000000000 --- a/src/doc/4.0-beta1/operating/audit_logging.html +++ /dev/null @@ -1,281 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Audit Logging" -doc-header-links: ' - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Audit Logging

-

Audit logging in Cassandra logs every incoming CQL command request, Authentication (successful as well as unsuccessful login) -to C* node. Currently, there are two implementations provided, the custom logger can be implemented and injected with the -class name as a parameter in cassandra.yaml.

-
    -
  • BinAuditLogger An efficient way to log events to file in a binary format.
  • -
  • FileAuditLogger Logs events to audit/audit.log file using slf4j logger.
  • -
-

Recommendation BinAuditLogger is a community recommended logger considering the performance

-
-

What does it capture

-

Audit logging captures following events

-
    -
  • Successful as well as unsuccessful login attempts.
  • -
  • All database commands executed via Native protocol (CQL) attempted or successfully executed.
  • -
-
-
-

Limitations

-

Executing prepared statements will log the query as provided by the client in the prepare call, along with the execution time stamp and all other attributes (see below). Actual values bound for prepared statement execution will not show up in the audit log.

-
-
-

What does it log

-

Each audit log implementation has access to the following attributes, and for the default text based logger these fields are concatenated with | s to yield the final message.

-
-
    -
  • user: User name(if available)
  • -
  • host: Host IP, where the command is being executed
  • -
  • source ip address: Source IP address from where the request initiated
  • -
  • source port: Source port number from where the request initiated
  • -
  • timestamp: unix time stamp
  • -
  • type: Type of the request (SELECT, INSERT, etc.,)
  • -
  • category - Category of the request (DDL, DML, etc.,)
  • -
  • keyspace - Keyspace(If applicable) on which request is targeted to be executed
  • -
  • scope - Table/Aggregate name/ function name/ trigger name etc., as applicable
  • -
  • operation - CQL command being executed
  • -
-
-
-
-

How to configure

-

Auditlog can be configured using cassandra.yaml. If you want to try Auditlog on one node, it can also be enabled and configured using nodetool.

-
-

cassandra.yaml configurations for AuditLog

-
-
    -
  • enabled: This option enables/ disables audit log
  • -
  • logger: Class name of the logger/ custom logger.
  • -
  • audit_logs_dir: Auditlogs directory location, if not set, default to cassandra.logdir.audit or cassandra.logdir + /audit/
  • -
  • included_keyspaces: Comma separated list of keyspaces to be included in audit log, default - includes all keyspaces
  • -
  • excluded_keyspaces: Comma separated list of keyspaces to be excluded from audit log, default - excludes no keyspace except system, system_schema and system_virtual_schema
  • -
  • included_categories: Comma separated list of Audit Log Categories to be included in audit log, default - includes all categories
  • -
  • excluded_categories: Comma separated list of Audit Log Categories to be excluded from audit log, default - excludes no category
  • -
  • included_users: Comma separated list of users to be included in audit log, default - includes all users
  • -
  • excluded_users: Comma separated list of users to be excluded from audit log, default - excludes no user
  • -
-
-

List of available categories are: QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE

-
-
-

NodeTool command to enable AuditLog

-

enableauditlog: Enables AuditLog with yaml defaults. yaml configurations can be overridden using options via nodetool command.

-
nodetool enableauditlog
-
-
-
-

Options

-
-
--excluded-categories
-
Comma separated list of Audit Log Categories to be excluded for -audit log. If not set the value from cassandra.yaml will be used
-
--excluded-keyspaces
-
Comma separated list of keyspaces to be excluded for audit log. If -not set the value from cassandra.yaml will be used. -Please remeber that system, system_schema and system_virtual_schema are excluded by default, -if you are overwriting this option via nodetool, -remember to add these keyspaces back if you dont want them in audit logs
-
--excluded-users
-
Comma separated list of users to be excluded for audit log. If not -set the value from cassandra.yaml will be used
-
--included-categories
-
Comma separated list of Audit Log Categories to be included for -audit log. If not set the value from cassandra.yaml will be used
-
--included-keyspaces
-
Comma separated list of keyspaces to be included for audit log. If -not set the value from cassandra.yaml will be used
-
--included-users
-
Comma separated list of users to be included for audit log. If not -set the value from cassandra.yaml will be used
-
--logger
-
Logger name to be used for AuditLogging. Default BinAuditLogger. If -not set the value from cassandra.yaml will be used
-
-
-
-
-

NodeTool command to disable AuditLog

-

disableauditlog: Disables AuditLog.

-
nodetool disableuditlog
-
-
-
-
-

NodeTool command to reload AuditLog filters

-

enableauditlog: NodeTool enableauditlog command can be used to reload auditlog filters when called with default or previous loggername and updated filters

-

E.g.,

-
nodetool enableauditlog --loggername <Default/ existing loggerName> --included-keyspaces <New Filter values>
-
-
-
-
-
-

View the contents of AuditLog Files

-

auditlogviewer is the new tool introduced to help view the contents of binlog file in human readable text format.

-
auditlogviewer <path1> [<path2>...<pathN>] [options]
-
-
-
-

Options

-
-
-f,--follow
-
-
Upon reacahing the end of the log continue indefinitely
-
waiting for more records
-
-
-
-r,--roll_cycle
-
-
How often to roll the log file was rolled. May be
-
necessary for Chronicle to correctly parse file names. (MINUTELY, HOURLY, -DAILY). Default HOURLY.
-
-
-
-h,--help
-
display this help message
-
-

For example, to dump the contents of audit log files on the console

-
auditlogviewer /logs/cassandra/audit
-
-
-
-
-

Sample output

-
LogMessage: user:anonymous|host:localhost/X.X.X.X|source:/X.X.X.X|port:60878|timestamp:1521158923615|type:USE_KS|category:DDL|ks:dev1|operation:USE "dev1"
-
-
-
-
-
-

Configuring BinAuditLogger

-

To use BinAuditLogger as a logger in AuditLogging, set the logger to BinAuditLogger in cassandra.yaml under audit_logging_options section. BinAuditLogger can be futher configued using its advanced options in cassandra.yaml.

-
-

Adcanced Options for BinAuditLogger

-
-
block
-
Indicates if the AuditLog should block if the it falls behind or should drop audit log records. Default is set to true so that AuditLog records wont be lost
-
max_queue_weight
-
Maximum weight of in memory queue for records waiting to be written to the audit log file before blocking or dropping the log records. Default is set to 256 * 1024 * 1024
-
max_log_size
-
Maximum size of the rolled files to retain on disk before deleting the oldest file. Default is set to 16L * 1024L * 1024L * 1024L
-
roll_cycle
-
How often to roll Audit log segments so they can potentially be reclaimed. Available options are: MINUTELY, HOURLY, DAILY, LARGE_DAILY, XLARGE_DAILY, HUGE_DAILY.For more options, refer: net.openhft.chronicle.queue.RollCycles. Default is set to "HOURLY"
-
-
-
-
-

Configuring FileAuditLogger

-

To use FileAuditLogger as a logger in AuditLogging, apart from setting the class name in cassandra.yaml, following configuration is needed to have the audit log events to flow through separate log file instead of system.log

-
<!-- Audit Logging (FileAuditLogger) rolling file appender to audit.log -->
-<appender name="AUDIT" class="ch.qos.logback.core.rolling.RollingFileAppender">
-  <file>${cassandra.logdir}/audit/audit.log</file>
-  <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
-    <!-- rollover daily -->
-    <fileNamePattern>${cassandra.logdir}/audit/audit.log.%d{yyyy-MM-dd}.%i.zip</fileNamePattern>
-    <!-- each file should be at most 50MB, keep 30 days worth of history, but at most 5GB -->
-    <maxFileSize>50MB</maxFileSize>
-    <maxHistory>30</maxHistory>
-    <totalSizeCap>5GB</totalSizeCap>
-  </rollingPolicy>
-  <encoder>
-    <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern>
-  </encoder>
-</appender>
-
-<!-- Audit Logging additivity to redirect audt logging events to audit/audit.log -->
-<logger name="org.apache.cassandra.audit" additivity="false" level="INFO">
-        <appender-ref ref="AUDIT"/>
-</logger>
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/backups.html b/src/doc/4.0-beta1/operating/backups.html deleted file mode 100644 index 08516f832..000000000 --- a/src/doc/4.0-beta1/operating/backups.html +++ /dev/null @@ -1,666 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Backups" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Backups

-

Apache Cassandra stores data in immutable SSTable files. Backups in Apache Cassandra database are backup copies of the database data that is stored as SSTable files. Backups are used for several purposes including the following:

-
    -
  • To store a data copy for durability
  • -
  • To be able to restore a table if table data is lost due to node/partition/network failure
  • -
  • To be able to transfer the SSTable files to a different machine; for portability
  • -
-
-

Types of Backups

-

Apache Cassandra supports two kinds of backup strategies.

-
    -
  • Snapshots
  • -
  • Incremental Backups
  • -
-

A snapshot is a copy of a table’s SSTable files at a given time, created via hard links. The DDL to create the table is stored as well. Snapshots may be created by a user or created automatically. -The setting (snapshot_before_compaction) in cassandra.yaml determines if snapshots are created before each compaction. -By default snapshot_before_compaction is set to false. -Snapshots may be created automatically before keyspace truncation or dropping of a table by setting auto_snapshot to true (default) in cassandra.yaml. -Truncates could be delayed due to the auto snapshots and another setting in cassandra.yaml determines how long the coordinator should wait for truncates to complete. -By default Cassandra waits 60 seconds for auto snapshots to complete.

-

An incremental backup is a copy of a table’s SSTable files created by a hard link when memtables are flushed to disk as SSTables. -Typically incremental backups are paired with snapshots to reduce the backup time as well as reduce disk space. -Incremental backups are not enabled by default and must be enabled explicitly in cassandra.yaml (with incremental_backups setting) or with the Nodetool. -Once enabled, Cassandra creates a hard link to each SSTable flushed or streamed locally in a backups/ subdirectory of the keyspace data. Incremental backups of system tables are also created.

-
-
-

Data Directory Structure

-

The directory structure of Cassandra data consists of different directories for keyspaces, and tables with the data files within the table directories. Directories backups and snapshots to store backups and snapshots respectively for a particular table are also stored within the table directory. The directory structure for Cassandra is illustrated in Figure 1.

-
-../_images/Figure_1_backups.jpg -
-

Figure 1. Directory Structure for Cassandra Data

-
-

Setting Up Example Tables for Backups and Snapshots

-

In this section we shall create some example data that could be used to demonstrate incremental backups and snapshots. We have used a three node Cassandra cluster. -First, the keyspaces are created. Subsequently tables are created within a keyspace and table data is added. We have used two keyspaces CQLKeyspace and CatalogKeyspace with two tables within each. -Create CQLKeyspace:

-
cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Create table t in the CQLKeyspace keyspace.

-
cqlsh> USE CQLKeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Add data to table t:

-
cqlsh:cqlkeyspace>
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-
-
-

A table query lists the data:

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
-
- (2 rows)
-
-
-

Create another table t2:

-
cqlsh:cqlkeyspace> CREATE TABLE t2 (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Add data to table t2:

-
cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (1, 1, 'val1');
-cqlsh:cqlkeyspace> INSERT INTO t2 (id, k, v) VALUES (2, 2, 'val2');
-
-
-

A table query lists table data:

-
cqlsh:cqlkeyspace> SELECT * FROM t2;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
- 2 | 2 | val2
-
- (3 rows)
-
-
-

Create a second keyspace CatalogKeyspace:

-
cqlsh:cqlkeyspace> CREATE KEYSPACE CatalogKeyspace
-              ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Create a table called journal in CatalogKeyspace:

-
cqlsh:cqlkeyspace> USE CatalogKeyspace;
-cqlsh:catalogkeyspace> CREATE TABLE journal (
-                  ...     id int,
-                  ...     name text,
-                  ...     publisher text,
-                  ...     PRIMARY KEY (id)
-                  ... );
-
-
-

Add data to table journal:

-
cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (0, 'Apache
-Cassandra Magazine', 'Apache Cassandra');
-cqlsh:catalogkeyspace> INSERT INTO journal (id, name, publisher) VALUES (1, 'Couchbase
-Magazine', 'Couchbase');
-
-
-

Query table journal to list its data:

-
cqlsh:catalogkeyspace> SELECT * FROM journal;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
- (2 rows)
-
-
-

Add another table called magazine:

-
cqlsh:catalogkeyspace> CREATE TABLE magazine (
-                  ...     id int,
-                  ...     name text,
-                  ...     publisher text,
-                  ...     PRIMARY KEY (id)
-                  ... );
-
-
-

Add table data to magazine:

-
cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (0, 'Apache
-Cassandra Magazine', 'Apache Cassandra');
-cqlsh:catalogkeyspace> INSERT INTO magazine (id, name, publisher) VALUES (1, 'Couchbase
-Magazine', 'Couchbase');
-
-
-

List table magazine’s data:

-
cqlsh:catalogkeyspace> SELECT * from magazine;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
- (2 rows)
-
-
-
-
-
-

Snapshots

-

In this section including sub-sections we shall demonstrate creating snapshots. The command used to create a snapshot is nodetool snapshot and its usage is as follows:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool help snapshot
-NAME
-       nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-       of the specified table
-
-SYNOPSIS
-       nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-               [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-               [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-               [(-u <username> | --username <username>)] snapshot
-               [(-cf <table> | --column-family <table> | --table <table>)]
-               [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-               [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-       -cf <table>, --column-family <table>, --table <table>
-           The table name (you must specify one and only one keyspace for using
-           this option)
-
-       -h <host>, --host <host>
-           Node hostname or ip address
-
-       -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-           The list of Keyspace.table to take snapshot.(you must not specify
-           only keyspace)
-
-       -p <port>, --port <port>
-           Remote jmx agent port number
-
-       -pp, --print-port
-           Operate in 4.0 mode with hosts disambiguated by port number
-
-       -pw <password>, --password <password>
-           Remote jmx agent password
-
-       -pwf <passwordFilePath>, --password-file <passwordFilePath>
-           Path to the JMX password file
-
-       -sf, --skip-flush
-           Do not flush memtables before snapshotting (snapshot will not
-           contain unflushed data)
-
-       -t <tag>, --tag <tag>
-           The name of the snapshot
-
-       -u <username>, --username <username>
-           Remote jmx agent username
-
-       --
-           This option can be used to separate command-line options from the
-           list of argument, (useful when arguments might be mistaken for
-           command-line options
-
-       [<keyspaces...>]
-           List of keyspaces. By default, all keyspaces
-
-
-
-

Configuring for Snapshots

-

To demonstrate creating snapshots with Nodetool on the commandline we have set -auto_snapshots setting to false in cassandra.yaml:

-
auto_snapshot: false
-
-
-

Also set snapshot_before_compaction to false to disable creating snapshots automatically before compaction:

-
snapshot_before_compaction: false
-
-
-
-
-

Creating Snapshots

-

To demonstrate creating snapshots start with no snapshots. Search for snapshots and none get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-
-
-

We shall be using the example keyspaces and tables to create snapshots.

-
-

Taking Snapshots of all Tables in a Keyspace

-

To take snapshots of all tables in a keyspace and also optionally tag the snapshot the syntax becomes:

-
nodetool snapshot --tag <tag>  --<keyspace>
-
-
-

As an example create a snapshot called catalog-ks for all the tables in the catalogkeyspace keyspace:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag catalog-ks -- catalogkeyspace
-Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [catalog-ks] and
-options {skipFlush=false}
-Snapshot directory: catalog-ks
-
-
-

Search for snapshots and snapshots directories for the tables journal and magazine, which are in the catalogkeyspace keyspace should get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots
-
-
-

Snapshots of all tables in multiple keyspaces may be created similarly, as an example:

-
nodetool snapshot --tag catalog-cql-ks --catalogkeyspace,cqlkeyspace
-
-
-
-
-

Taking Snapshots of Single Table in a Keyspace

-

To take a snapshot of a single table the nodetool snapshot command syntax becomes as follows:

-
nodetool snapshot --tag <tag> --table <table>  --<keyspace>
-
-
-

As an example create a snapshot for table magazine in keyspace catalokeyspace:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --tag magazine --table magazine  --
-catalogkeyspace
-Requested creating snapshot(s) for [catalogkeyspace] with snapshot name [magazine] and
-options {skipFlush=false}
-Snapshot directory: magazine
-
-
-
-
-

Taking Snapshot of Multiple Tables from same Keyspace

-

To take snapshots of multiple tables in a keyspace the list of Keyspace.table must be specified with option --kt-list. As an example create snapshots for tables t and t2 in the cqlkeyspace keyspace:

-
nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag multi-table
-[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag
-multi-table
-Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi-
-table] and options {skipFlush=false}
-Snapshot directory: multi-table
-
-
-

Multiple snapshots of the same set of tables may be created and tagged with a different name. As an example, create another snapshot for the same set of tables t and t2 in the cqlkeyspace keyspace and tag the snapshots differently:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list cqlkeyspace.t,cqlkeyspace.t2 --tag
-multi-table-2
-Requested creating snapshot(s) for [cqlkeyspace.t,cqlkeyspace.t2] with snapshot name [multi-
-table-2] and options {skipFlush=false}
-Snapshot directory: multi-table-2
-
-
-
-
-

Taking Snapshot of Multiple Tables from Different Keyspaces

-

To take snapshots of multiple tables that are in different keyspaces the command syntax is the same as when multiple tables are in the same keyspace. Each keyspace.table must be specified separately in the --kt-list option. As an example, create a snapshot for table t in the cqlkeyspace and table journal in the catalogkeyspace and tag the snapshot multi-ks.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool snapshot --kt-list
-catalogkeyspace.journal,cqlkeyspace.t --tag multi-ks
-Requested creating snapshot(s) for [catalogkeyspace.journal,cqlkeyspace.t] with snapshot
-name [multi-ks] and options {skipFlush=false}
-Snapshot directory: multi-ks
-
-
-
-
-
-

Listing Snapshots

-

To list snapshots use the nodetool listsnapshots command. All the snapshots that we created in the preceding examples get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool listsnapshots
-Snapshot Details:
-Snapshot name Keyspace name   Column family name True size Size on disk
-multi-table   cqlkeyspace     t2                 4.86 KiB  5.67 KiB
-multi-table   cqlkeyspace     t                  4.89 KiB  5.7 KiB
-multi-ks      cqlkeyspace     t                  4.89 KiB  5.7 KiB
-multi-ks      catalogkeyspace journal            4.9 KiB   5.73 KiB
-magazine      catalogkeyspace magazine           4.9 KiB   5.73 KiB
-multi-table-2 cqlkeyspace     t2                 4.86 KiB  5.67 KiB
-multi-table-2 cqlkeyspace     t                  4.89 KiB  5.7 KiB
-catalog-ks    catalogkeyspace journal            4.9 KiB   5.73 KiB
-catalog-ks    catalogkeyspace magazine           4.9 KiB   5.73 KiB
-
-Total TrueDiskSpaceUsed: 44.02 KiB
-
-
-
-
-

Finding Snapshots Directories

-

The snapshots directories may be listed with find –name snapshots command:

-
[ec2-user@ip-10-0-2-238 ~]$ find -name snapshots
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/snapshots
-./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

To list the snapshots for a particular table first change directory ( with cd) to the snapshots directory for the table. As an example, list the snapshots for the catalogkeyspace/journal table. Two snapshots get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/journal-
-296a2d30c22a11e9b1350d927649052c/snapshots
-[ec2-user@ip-10-0-2-238 snapshots]$ ls -l
-total 0
-drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:44 catalog-ks
-drwxrwxr-x. 2 ec2-user ec2-user 265 Aug 19 02:52 multi-ks
-
-
-

A snapshots directory lists the SSTable files in the snapshot. Schema.cql file is also created in each snapshot for the schema definition DDL that may be run in CQL to create the table when restoring from a snapshot:

-
[ec2-user@ip-10-0-2-238 snapshots]$ cd catalog-ks
-[ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user   31 Aug 19 02:44 manifest.jsonZ
-
--rw-rw-r--. 4 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 4 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 4 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 4 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 4 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 4 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 4 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 4 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
--rw-rw-r--. 1 ec2-user ec2-user  814 Aug 19 02:44 schema.cql
-
-
-
-
-

Clearing Snapshots

-

Snapshots may be cleared or deleted with the nodetool clearsnapshot command. Either a specific snapshot name must be specified or the –all option must be specified. -As an example delete a snapshot called magazine from keyspace cqlkeyspace:

-
nodetool clearsnapshot -t magazine – cqlkeyspace
-Delete all snapshots from cqlkeyspace with the –all option.
-nodetool clearsnapshot –all -- cqlkeyspace
-
-
-
-
-
-

Incremental Backups

-

In the following sub-sections we shall discuss configuring and creating incremental backups.

-
-

Configuring for Incremental Backups

-

To create incremental backups set incremental_backups to true in cassandra.yaml.

-
incremental_backups: true
-
-
-

This is the only setting needed to create incremental backups. By default incremental_backups setting is set to false because a new set of SSTable files is created for each data flush and if several CQL statements are to be run the backups directory could fill up quickly and use up storage that is needed to store table data. -Incremental backups may also be enabled on the command line with the Nodetool command nodetool enablebackup. Incremental backups may be disabled with nodetool disablebackup command. Status of incremental backups, whether they are enabled may be found with nodetool statusbackup.

-
-
-

Creating Incremental Backups

-

After each table is created flush the table data with nodetool flush command. Incremental backups get created.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t2
-[ec2-user@ip-10-0-2-238 ~]$ nodetool flush catalogkeyspace journal magazine
-
-
-
-
-

Finding Incremental Backups

-

Incremental backups are created within the Cassandra’s data directory within a table directory. Backups may be found with following command.

-
[ec2-user@ip-10-0-2-238 ~]$ find -name backups
-
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-./cassandra/data/data/cqlkeyspace/t2-d993a390c22911e9b1350d927649052c/backups
-./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/backups
-./cassandra/data/data/catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups
-
-
-
-
-

Creating an Incremental Backup

-

This section discusses how incremental backups are created in more detail starting with when a new keyspace is created and a table is added. Create a keyspace called CQLKeyspace (arbitrary name).

-
cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}
-
-
-

Create a table called t within the CQLKeyspace keyspace:

-
cqlsh> USE CQLKeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-              ...     id int,
-              ...     k int,
-              ...     v text,
-              ...     PRIMARY KEY (id)
-              ... );
-
-
-

Flush the keyspace and table:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-
-
-

Search for backups and a backups directory should get listed even though we have added no table data yet.

-
[ec2-user@ip-10-0-2-238 ~]$ find -name backups
-
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-
-
-

Change directory to the backups directory and list files and no files get listed as no table data has been added yet:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 0
-
-
-

Next, add a row of data to table t that we created:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-
-
-

Run the nodetool flush command to flush table data:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool flush cqlkeyspace t
-
-
-

List the files and directories in the backups directory and SSTable files for an incremental backup get listed:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 36
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:32 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   43 Aug 19 00:32 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:32 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:32 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:32 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:32 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:32 na-1-big-TOC.txt
-
-
-

Add another row of data:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-
-
-

Again, run the nodetool flush command:

-
[ec2-user@ip-10-0-2-238 backups]$  nodetool flush cqlkeyspace t
-
-
-

A new incremental backup gets created for the new data added. List the files in the backups directory for table t and two sets of SSTable files get listed, one for each incremental backup. The SSTable files are timestamped, which distinguishes the first incremental backup from the second:

-
[ec2-user@ip-10-0-2-238 backups]$ ls -l
-total 72
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:32 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   43 Aug 19 00:32 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:32 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:32 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:32 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:32 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:32 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:32 na-1-big-TOC.txt
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 00:35 na-2-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   41 Aug 19 00:35 na-2-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 00:35 na-2-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 00:35 na-2-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user    8 Aug 19 00:35 na-2-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4673 Aug 19 00:35 na-2-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 00:35 na-2-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 00:35 na-2-big-TOC.txt
-[ec2-user@ip-10-0-2-238 backups]$
-
-
-

The backups directory for table cqlkeyspace/t is created within the data directory for the table:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/cqlkeyspace/t-
-d132e240c21711e9bbee19821dcea330
-[ec2-user@ip-10-0-2-238 t-d132e240c21711e9bbee19821dcea330]$ ls -l
-total 36
-drwxrwxr-x. 2 ec2-user ec2-user  226 Aug 19 02:30 backups
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 02:30 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   79 Aug 19 02:30 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 02:30 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:30 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:30 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4696 Aug 19 02:30 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 02:30 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 02:30 na-1-big-TOC.txt
-
-
-

The incremental backups for the other keyspaces/tables get created similarly. As an example the backups directory for table catalogkeyspace/magazine is created within the data directory:

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c
-[ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l
-total 36
-drwxrwxr-x. 2 ec2-user ec2-user  226 Aug 19 02:38 backups
--rw-rw-r--. 2 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 2 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 2 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 2 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 2 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 2 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 2 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
-
-
-
-
-
-

Restoring from Incremental Backups and Snapshots

-

The two main tools/commands for restoring a table after it has been dropped are:

-
    -
  • sstableloader
  • -
  • nodetool import
  • -
-

A snapshot contains essentially the same set of SSTable files as an incremental backup does with a few additional files. A snapshot includes a schema.cql file for the schema DDL to create a table in CQL. A table backup does not include DDL which must be obtained from a snapshot when restoring from an incremental backup.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/bloom_filters.html b/src/doc/4.0-beta1/operating/bloom_filters.html deleted file mode 100644 index c91fb3c6c..000000000 --- a/src/doc/4.0-beta1/operating/bloom_filters.html +++ /dev/null @@ -1,162 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bloom Filters" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bloom Filters

-

In the read path, Cassandra merges data on disk (in SSTables) with data in RAM (in memtables). To avoid checking every -SSTable data file for the partition being requested, Cassandra employs a data structure known as a bloom filter.

-

Bloom filters are a probabilistic data structure that allows Cassandra to determine one of two possible states: - The -data definitely does not exist in the given file, or - The data probably exists in the given file.

-

While bloom filters can not guarantee that the data exists in a given SSTable, bloom filters can be made more accurate -by allowing them to consume more RAM. Operators have the opportunity to tune this behavior per table by adjusting the -the bloom_filter_fp_chance to a float between 0 and 1.

-

The default value for bloom_filter_fp_chance is 0.1 for tables using LeveledCompactionStrategy and 0.01 for all -other cases.

-

Bloom filters are stored in RAM, but are stored offheap, so operators should not consider bloom filters when selecting -the maximum heap size. As accuracy improves (as the bloom_filter_fp_chance gets closer to 0), memory usage -increases non-linearly - the bloom filter for bloom_filter_fp_chance = 0.01 will require about three times as much -memory as the same table with bloom_filter_fp_chance = 0.1.

-

Typical values for bloom_filter_fp_chance are usually between 0.01 (1%) to 0.1 (10%) false-positive chance, where -Cassandra may scan an SSTable for a row, only to find that it does not exist on the disk. The parameter should be tuned -by use case:

-
    -
  • Users with more RAM and slower disks may benefit from setting the bloom_filter_fp_chance to a numerically lower -number (such as 0.01) to avoid excess IO operations
  • -
  • Users with less RAM, more dense nodes, or very fast disks may tolerate a higher bloom_filter_fp_chance in order to -save RAM at the expense of excess IO operations
  • -
  • In workloads that rarely read, or that only perform reads by scanning the entire data set (such as analytics -workloads), setting the bloom_filter_fp_chance to a much higher number is acceptable.
  • -
-
-

Changing

-

The bloom filter false positive chance is visible in the DESCRIBE TABLE output as the field -bloom_filter_fp_chance. Operators can change the value with an ALTER TABLE statement:

-
ALTER TABLE keyspace.table WITH bloom_filter_fp_chance=0.01
-
-
-

Operators should be aware, however, that this change is not immediate: the bloom filter is calculated when the file is -written, and persisted on disk as the Filter component of the SSTable. Upon issuing an ALTER TABLE statement, new -files on disk will be written with the new bloom_filter_fp_chance, but existing sstables will not be modified until -they are compacted - if an operator needs a change to bloom_filter_fp_chance to take effect, they can trigger an -SSTable rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the sstables on -disk, regenerating the bloom filters in the progress.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/bulk_loading.html b/src/doc/4.0-beta1/operating/bulk_loading.html deleted file mode 100644 index 468b85b4c..000000000 --- a/src/doc/4.0-beta1/operating/bulk_loading.html +++ /dev/null @@ -1,680 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Bulk Loading" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Bulk Loading

-

Bulk loading of data in Apache Cassandra is supported by different tools. The data to be bulk loaded must be in the form of SSTables. Cassandra does not support loading data in any other format such as CSV, JSON, and XML directly. Bulk loading could be used to:

-
    -
  • Restore incremental backups and snapshots. Backups and snapshots are already in the form of SSTables.
  • -
  • Load existing SSTables into another cluster, which could have a different number of nodes or replication strategy.
  • -
  • Load external data into a cluster
  • -
-

**Note*: CSV Data can be loaded via the cqlsh COPY command but we do not recommend this for bulk loading, which typically requires many GB or TB of data.

-
-

Tools for Bulk Loading

-

Cassandra provides two commands or tools for bulk loading data. These are:

-
    -
  • Cassandra Bulk loader, also called sstableloader
  • -
  • The nodetool import command
  • -
-

The sstableloader and nodetool import are accessible if the Cassandra installation bin directory is in the PATH environment variable. Or these may be accessed directly from the bin directory. We shall discuss each of these next. We shall use the example or sample keyspaces and tables created in the Backups section.

-
-
-

Using sstableloader

-

The sstableloader is the main tool for bulk uploading data. The sstableloader streams SSTable data files to a running cluster. The sstableloader loads data conforming to the replication strategy and replication factor. The table to upload data to does need not to be empty.

-

The only requirements to run sstableloader are:

-
    -
  1. One or more comma separated initial hosts to connect to and get ring information.
  2. -
  3. A directory path for the SSTables to load.
  4. -
-

Its usage is as follows.

-
sstableloader [options] <dir_path>
-
-
-

Sstableloader bulk loads the SSTables found in the directory <dir_path> to the configured cluster. The <dir_path> is used as the target keyspace/table name. As an example, to load an SSTable named -Standard1-g-1-Data.db into Keyspace1/Standard1, you will need to have the -files Standard1-g-1-Data.db and Standard1-g-1-Index.db in a directory /path/to/Keyspace1/Standard1/.

-
-

Sstableloader Option to accept Target keyspace name

-

Often as part of a backup strategy some Cassandra DBAs store an entire data directory. When corruption in data is found then they would like to restore data in the same cluster (for large clusters 200 nodes) but with different keyspace name.

-

Currently sstableloader derives keyspace name from the folder structure. As an option to specify target keyspace name as part of sstableloader, version 4.0 adds support for the --target-keyspace option (CASSANDRA-13884).

-

The supported options are as follows from which only -d,--nodes <initial hosts> is required.

-
-alg,--ssl-alg <ALGORITHM>                                   Client SSL: algorithm
-
--ap,--auth-provider <auth provider>                          Custom
-                                                             AuthProvider class name for
-                                                             cassandra authentication
--ciphers,--ssl-ciphers <CIPHER-SUITES>                       Client SSL:
-                                                             comma-separated list of
-                                                             encryption suites to use
--cph,--connections-per-host <connectionsPerHost>             Number of
-                                                             concurrent connections-per-host.
--d,--nodes <initial hosts>                                   Required.
-                                                             Try to connect to these hosts (comma separated) initially for ring information
-
--f,--conf-path <path to config file>                         cassandra.yaml file path for streaming throughput and client/server SSL.
-
--h,--help                                                    Display this help message
-
--i,--ignore <NODES>                                          Don't stream to this (comma separated) list of nodes
-
--idct,--inter-dc-throttle <inter-dc-throttle>                Inter-datacenter throttle speed in Mbits (default unlimited)
-
--k,--target-keyspace <target keyspace name>                  Target
-                                                             keyspace name
--ks,--keystore <KEYSTORE>                                    Client SSL:
-                                                             full path to keystore
--kspw,--keystore-password <KEYSTORE-PASSWORD>                Client SSL:
-                                                             password of the keystore
---no-progress                                                Don't
-                                                             display progress
--p,--port <native transport port>                            Port used
-                                                             for native connection (default 9042)
--prtcl,--ssl-protocol <PROTOCOL>                             Client SSL:
-                                                             connections protocol to use (default: TLS)
--pw,--password <password>                                    Password for
-                                                             cassandra authentication
--sp,--storage-port <storage port>                            Port used
-                                                             for internode communication (default 7000)
--spd,--server-port-discovery <allow server port discovery>   Use ports
-                                                             published by server to decide how to connect. With SSL requires StartTLS
-                                                             to be used.
--ssp,--ssl-storage-port <ssl storage port>                   Port used
-                                                             for TLS internode communication (default 7001)
--st,--store-type <STORE-TYPE>                                Client SSL:
-                                                             type of store
--t,--throttle <throttle>                                     Throttle
-                                                             speed in Mbits (default unlimited)
--ts,--truststore <TRUSTSTORE>                                Client SSL:
-                                                             full path to truststore
--tspw,--truststore-password <TRUSTSTORE-PASSWORD>            Client SSL:
-                                                             Password of the truststore
--u,--username <username>                                     Username for
-                                                             cassandra authentication
--v,--verbose                                                 verbose
-                                                             output
-
-
-

The cassandra.yaml file could be provided on the command-line with -f option to set up streaming throughput, client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

A sstableloader Demo

-

We shall demonstrate using sstableloader by uploading incremental backup data for table catalogkeyspace.magazine. We shall also use a snapshot of the same table to bulk upload in a different run of sstableloader. The backups and snapshots for the catalogkeyspace.magazine table are listed as follows.

-
[ec2-user@ip-10-0-2-238 ~]$ cd ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c
-[ec2-user@ip-10-0-2-238 magazine-446eae30c22a11e9b1350d927649052c]$ ls -l
-total 0
-drwxrwxr-x. 2 ec2-user ec2-user 226 Aug 19 02:38 backups
-drwxrwxr-x. 4 ec2-user ec2-user  40 Aug 19 02:45 snapshots
-
-
-

The directory path structure of SSTables to be uploaded using sstableloader is used as the target keyspace/table.

-

We could have directly uploaded from the backups and snapshots directories respectively if the directory structure were in the format used by sstableloader. But the directory path of backups and snapshots for SSTables is /catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/backups and /catalogkeyspace/magazine-446eae30c22a11e9b1350d927649052c/snapshots respectively, which cannot be used to upload SSTables to catalogkeyspace.magazine table. The directory path structure must be /catalogkeyspace/magazine/ to use sstableloader. We need to create a new directory structure to upload SSTables with sstableloader which is typical when using sstableloader. Create a directory structure /catalogkeyspace/magazine and set its permissions.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine
-
-
-
-

Bulk Loading from an Incremental Backup

-

An incremental backup does not include the DDL for a table. The table must already exist. If the table was dropped it may be created using the schema.cql generated with every snapshot of a table. As we shall be using sstableloader to load SSTables to the magazine table, the table must exist prior to running sstableloader. The table does not need to be empty but we have used an empty table as indicated by a CQL query:

-
cqlsh:catalogkeyspace> SELECT * FROM magazine;
-
-id | name | publisher
-----+------+-----------
-
-(0 rows)
-
-
-

After the table to upload has been created copy the SSTable files from the backups directory to the /catalogkeyspace/magazine/ directory that we created.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c/backups/* /catalogkeyspace/magazine/
-
-
-

Run the sstableloader to upload SSTables from the /catalogkeyspace/magazine/ directory.

-
sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-
-

The output from the sstableloader command should be similar to the listed:

-
[ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-Opening SSTables and calculating sections to stream
-Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db
-/catalogkeyspace/magazine/na-2-big-Data.db  to [35.173.233.153:7000, 10.0.2.238:7000,
-54.158.45.75:7000]
-progress: [35.173.233.153:7000]0:1/2 88 % total: 88% 0.018KiB/s (avg: 0.018KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% total: 176% 33.807KiB/s (avg: 0.036KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% total: 176% 0.000KiB/s (avg: 0.029KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:1/2 39 % total: 81% 0.115KiB/s
-(avg: 0.024KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 % total: 108%
-97.683KiB/s (avg: 0.033KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:1/2 39 % total: 80% 0.233KiB/s (avg: 0.040KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 88.522KiB/s (avg: 0.049KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.045KiB/s)
-progress: [35.173.233.153:7000]0:2/2 176% [10.0.2.238:7000]0:2/2 78 %
-[54.158.45.75:7000]0:2/2 78 % total: 96% 0.000KiB/s (avg: 0.044KiB/s)
-
-
-

After the sstableloader has run query the magazine table and the loaded table should get listed when a query is run.

-
cqlsh:catalogkeyspace> SELECT * FROM magazine;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
-(2 rows)
-cqlsh:catalogkeyspace>
-
-
-
-
-

Bulk Loading from a Snapshot

-

In this section we shall demonstrate restoring a snapshot of the magazine table to the magazine table. As we used the same table to restore data from a backup the directory structure required by sstableloader should already exist. If the directory structure needed to load SSTables to catalogkeyspace.magazine does not exist create the directories and set their permissions.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo mkdir -p /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 ~]$ sudo chmod -R 777 /catalogkeyspace/magazine
-
-
-

As we shall be copying the snapshot files to the directory remove any files that may be in the directory.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo rm /catalogkeyspace/magazine/*
-[ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine/
-[ec2-user@ip-10-0-2-238 magazine]$ ls -l
-total 0
-
-
-

Copy the snapshot files to the /catalogkeyspace/magazine directory.

-
[ec2-user@ip-10-0-2-238 ~]$ sudo cp ./cassandra/data/data/catalogkeyspace/magazine-
-446eae30c22a11e9b1350d927649052c/snapshots/magazine/* /catalogkeyspace/magazine
-
-
-

List the files in the /catalogkeyspace/magazine directory and a schema.cql should also get listed.

-
[ec2-user@ip-10-0-2-238 ~]$ cd /catalogkeyspace/magazine
-[ec2-user@ip-10-0-2-238 magazine]$ ls -l
-total 44
--rw-r--r--. 1 root root   31 Aug 19 04:13 manifest.json
--rw-r--r--. 1 root root   47 Aug 19 04:13 na-1-big-CompressionInfo.db
--rw-r--r--. 1 root root   97 Aug 19 04:13 na-1-big-Data.db
--rw-r--r--. 1 root root   10 Aug 19 04:13 na-1-big-Digest.crc32
--rw-r--r--. 1 root root   16 Aug 19 04:13 na-1-big-Filter.db
--rw-r--r--. 1 root root   16 Aug 19 04:13 na-1-big-Index.db
--rw-r--r--. 1 root root 4687 Aug 19 04:13 na-1-big-Statistics.db
--rw-r--r--. 1 root root   56 Aug 19 04:13 na-1-big-Summary.db
--rw-r--r--. 1 root root   92 Aug 19 04:13 na-1-big-TOC.txt
--rw-r--r--. 1 root root  815 Aug 19 04:13 schema.cql
-
-
-

Alternatively create symlinks to the snapshot folder instead of copying the data, something like:

-
mkdir keyspace_name
-ln -s _path_to_snapshot_folder keyspace_name/table_name
-
-
-

If the magazine table was dropped run the DDL in the schema.cql to create the table. Run the sstableloader with the following command.

-
sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-
-

As the output from the command indicates SSTables get streamed to the cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ sstableloader --nodes 10.0.2.238  /catalogkeyspace/magazine/
-
-Established connection to initial hosts
-Opening SSTables and calculating sections to stream
-Streaming relevant part of /catalogkeyspace/magazine/na-1-big-Data.db  to
-[35.173.233.153:7000, 10.0.2.238:7000, 54.158.45.75:7000]
-progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.017KiB/s (avg: 0.017KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% total: 176% 0.000KiB/s (avg: 0.014KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 % total: 108% 0.115KiB/s
-(avg: 0.017KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.232KiB/s (avg: 0.024KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.022KiB/s)
-progress: [35.173.233.153:7000]0:1/1 176% [10.0.2.238:7000]0:1/1 78 %
-[54.158.45.75:7000]0:1/1 78 % total: 96% 0.000KiB/s (avg: 0.021KiB/s)
-
-
-

Some other requirements of sstableloader that should be kept into consideration are:

-
    -
  • The SSTables to be loaded must be compatible with the Cassandra version being loaded into.
  • -
  • Repairing tables that have been loaded into a different cluster does not repair the source tables.
  • -
  • Sstableloader makes use of port 7000 for internode communication.
  • -
  • Before restoring incremental backups run nodetool flush to backup any data in memtables
  • -
-
-
-
-
-

Using nodetool import

-

In this section we shall import SSTables into a table using the nodetool import command. The nodetool refresh command is deprecated, and it is recommended to use nodetool import instead. The nodetool refresh does not have an option to load new SSTables from a separate directory which the nodetool import does.

-

The command usage is as follows.

-
nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-       [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-       [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-       [(-u <username> | --username <username>)] import
-       [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-       [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-       [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-       <directory> ...
-
-
-

The arguments keyspace, table name and directory to import SSTables from are required.

-

The supported options are as follows.

-
-c, --no-invalidate-caches
-    Don't invalidate the row cache when importing
-
--e, --extended-verify
-    Run an extended verify, verifying all values in the new SSTables
-
--h <host>, --host <host>
-    Node hostname or ip address
-
--l, --keep-level
-    Keep the level on the new SSTables
-
--p <port>, --port <port>
-    Remote jmx agent port number
-
--pp, --print-port
-    Operate in 4.0 mode with hosts disambiguated by port number
-
--pw <password>, --password <password>
-    Remote jmx agent password
-
--pwf <passwordFilePath>, --password-file <passwordFilePath>
-    Path to the JMX password file
-
--q, --quick
-    Do a quick import without verifying SSTables, clearing row cache or
-    checking in which data directory to put the file
-
--r, --keep-repaired
-    Keep any repaired information from the SSTables
-
--t, --no-tokens
-    Don't verify that all tokens in the new SSTable are owned by the
-    current node
-
--u <username>, --username <username>
-    Remote jmx agent username
-
--v, --no-verify
-    Don't verify new SSTables
-
---
-    This option can be used to separate command-line options from the
-    list of argument, (useful when arguments might be mistaken for
-    command-line options
-
-
-

As the keyspace and table are specified on the command line nodetool import does not have the same requirement that sstableloader does, which is to have the SSTables in a specific directory path. When importing snapshots or incremental backups with nodetool import the SSTables don’t need to be copied to another directory.

-
-

Importing Data from an Incremental Backup

-

In this section we shall demonstrate using nodetool import to import SSTables from an incremental backup. We shall use the example table cqlkeyspace.t. Drop table t as we are demonstrating to restore the table.

-
cqlsh:cqlkeyspace> DROP table t;
-
-
-

An incremental backup for a table does not include the schema definition for the table. If the schema definition is not kept as a separate backup, the schema.cql from a backup of the table may be used to create the table as follows.

-
cqlsh:cqlkeyspace> CREATE TABLE IF NOT EXISTS cqlkeyspace.t (
-              ...         id int PRIMARY KEY,
-              ...         k int,
-              ...         v text)
-              ...         WITH ID = d132e240-c217-11e9-bbee-19821dcea330
-              ...         AND bloom_filter_fp_chance = 0.01
-              ...         AND crc_check_chance = 1.0
-              ...         AND default_time_to_live = 0
-              ...         AND gc_grace_seconds = 864000
-              ...         AND min_index_interval = 128
-              ...         AND max_index_interval = 2048
-              ...         AND memtable_flush_period_in_ms = 0
-              ...         AND speculative_retry = '99p'
-              ...         AND additional_write_policy = '99p'
-              ...         AND comment = ''
-              ...         AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' }
-              ...         AND compaction = { 'max_threshold': '32', 'min_threshold': '4',
-'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' }
-              ...         AND compression = { 'chunk_length_in_kb': '16', 'class':
-'org.apache.cassandra.io.compress.LZ4Compressor' }
-              ...         AND cdc = false
-              ...         AND extensions = {  };
-
-
-

Initially the table could be empty, but does not have to be.

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+---
-
-(0 rows)
-
-
-

Run the nodetool import command by providing the keyspace, table and the backups directory. We don’t need to copy the table backups to another directory to run nodetool import as we had to when using sstableloader.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool import -- cqlkeyspace t
-./cassandra/data/data/cqlkeyspace/t-d132e240c21711e9bbee19821dcea330/backups
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

The SSTables get imported into the table. Run a query in cqlsh to list the data imported.

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
-
-
-
-
-

Importing Data from a Snapshot

-

Importing SSTables from a snapshot with the nodetool import command is similar to importing SSTables from an incremental backup. To demonstrate we shall import a snapshot for table catalogkeyspace.journal. Drop the table as we are demonstrating to restore the table from a snapshot.

-
cqlsh:cqlkeyspace> use CATALOGKEYSPACE;
-cqlsh:catalogkeyspace> DROP TABLE journal;
-
-
-

We shall use the catalog-ks snapshot for the journal table. List the files in the snapshot. The snapshot includes a schema.cql, which is the schema definition for the journal table.

-
[ec2-user@ip-10-0-2-238 catalog-ks]$ ls -l
-total 44
--rw-rw-r--. 1 ec2-user ec2-user   31 Aug 19 02:44 manifest.json
--rw-rw-r--. 3 ec2-user ec2-user   47 Aug 19 02:38 na-1-big-CompressionInfo.db
--rw-rw-r--. 3 ec2-user ec2-user   97 Aug 19 02:38 na-1-big-Data.db
--rw-rw-r--. 3 ec2-user ec2-user   10 Aug 19 02:38 na-1-big-Digest.crc32
--rw-rw-r--. 3 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Filter.db
--rw-rw-r--. 3 ec2-user ec2-user   16 Aug 19 02:38 na-1-big-Index.db
--rw-rw-r--. 3 ec2-user ec2-user 4687 Aug 19 02:38 na-1-big-Statistics.db
--rw-rw-r--. 3 ec2-user ec2-user   56 Aug 19 02:38 na-1-big-Summary.db
--rw-rw-r--. 3 ec2-user ec2-user   92 Aug 19 02:38 na-1-big-TOC.txt
--rw-rw-r--. 1 ec2-user ec2-user  814 Aug 19 02:44 schema.cql
-
-
-

Copy the DDL from the schema.cql and run in cqlsh to create the catalogkeyspace.journal table.

-
cqlsh:catalogkeyspace> CREATE TABLE IF NOT EXISTS catalogkeyspace.journal (
-                  ...         id int PRIMARY KEY,
-                  ...         name text,
-                  ...         publisher text)
-                  ...         WITH ID = 296a2d30-c22a-11e9-b135-0d927649052c
-                  ...         AND bloom_filter_fp_chance = 0.01
-                  ...         AND crc_check_chance = 1.0
-                  ...         AND default_time_to_live = 0
-                  ...         AND gc_grace_seconds = 864000
-                  ...         AND min_index_interval = 128
-                  ...         AND max_index_interval = 2048
-                  ...         AND memtable_flush_period_in_ms = 0
-                  ...         AND speculative_retry = '99p'
-                  ...         AND additional_write_policy = '99p'
-                  ...         AND comment = ''
-                  ...         AND caching = { 'keys': 'ALL', 'rows_per_partition': 'NONE' }
-                  ...         AND compaction = { 'min_threshold': '4', 'max_threshold':
-'32', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' }
-                  ...         AND compression = { 'chunk_length_in_kb': '16', 'class':
-'org.apache.cassandra.io.compress.LZ4Compressor' }
-                  ...         AND cdc = false
-                  ...         AND extensions = {  };
-
-
-

Run the nodetool import command to import the SSTables for the snapshot.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool import -- catalogkeyspace journal
-./cassandra/data/data/catalogkeyspace/journal-
-296a2d30c22a11e9b1350d927649052c/snapshots/catalog-ks/
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

Subsequently run a CQL query on the journal table and the data imported gets listed.

-
cqlsh:catalogkeyspace>
-cqlsh:catalogkeyspace> SELECT * FROM journal;
-
-id | name                      | publisher
-----+---------------------------+------------------
- 1 |        Couchbase Magazine |        Couchbase
- 0 | Apache Cassandra Magazine | Apache Cassandra
-
-(2 rows)
-cqlsh:catalogkeyspace>
-
-
-
-
-
-

Bulk Loading External Data

-

Bulk loading external data directly is not supported by any of the tools we have discussed which include sstableloader and nodetool import. The sstableloader and nodetool import require data to be in the form of SSTables. Apache Cassandra supports a Java API for generating SSTables from input data. Subsequently the sstableloader or nodetool import could be used to bulk load the SSTables. Next, we shall discuss the org.apache.cassandra.io.sstable.CQLSSTableWriter Java class for generating SSTables.

-
-

Generating SSTables with CQLSSTableWriter Java API

-

To generate SSTables using the CQLSSTableWriter class the following need to be supplied at the least.

-
    -
  • An output directory to generate the SSTable in
  • -
  • The schema for the SSTable
  • -
  • A prepared insert statement
  • -
  • A partitioner
  • -
-

The output directory must already have been created. Create a directory (/sstables as an example) and set its permissions.

-
sudo mkdir /sstables
-sudo chmod  777 -R /sstables
-
-
-

Next, we shall discuss To use CQLSSTableWriter could be used in a Java application. Create a Java constant for the output directory.

-
public static final String OUTPUT_DIR = "./sstables";
-
-
-

CQLSSTableWriter Java API has the provision to create a user defined type. Create a new type to store int data:

-
String type = "CREATE TYPE CQLKeyspace.intType (a int, b int)";
-// Define a String variable for the SSTable schema.
-String schema = "CREATE TABLE CQLKeyspace.t ("
-                 + "  id int PRIMARY KEY,"
-                 + "  k int,"
-                 + "  v1 text,"
-                 + "  v2 intType,"
-                 + ")";
-
-
-

Define a String variable for the prepared insert statement to use:

-
String insertStmt = "INSERT INTO CQLKeyspace.t (id, k, v1, v2) VALUES (?, ?, ?, ?)";
-
-
-

The partitioner to use does not need to be set as the default partitioner Murmur3Partitioner is used.

-

All these variables or settings are used by the builder class CQLSSTableWriter.Builder to create a CQLSSTableWriter object.

-

Create a File object for the output directory.

-
File outputDir = new File(OUTPUT_DIR + File.separator + "CQLKeyspace" + File.separator + "t");
-
-
-

Next, obtain a CQLSSTableWriter.Builder object using static method CQLSSTableWriter.builder(). Set the output -directory File object, user defined type, SSTable schema, buffer size, prepared insert statement, and optionally any of the other builder options, and invoke the build() method to create a CQLSSTableWriter object:

-
CQLSSTableWriter writer = CQLSSTableWriter.builder()
-                                             .inDirectory(outputDir)
-                                             .withType(type)
-                                             .forTable(schema)
-                                             .withBufferSizeInMB(256)
-                                             .using(insertStmt).build();
-
-
-

Next, set the SSTable data. If any user define types are used obtain a UserType object for these:

-
UserType userType = writer.getUDType("intType");
-
-
-

Add data rows for the resulting SSTable.

-
writer.addRow(0, 0, "val0", userType.newValue().setInt("a", 0).setInt("b", 0));
-   writer.addRow(1, 1, "val1", userType.newValue().setInt("a", 1).setInt("b", 1));
-   writer.addRow(2, 2, "val2", userType.newValue().setInt("a", 2).setInt("b", 2));
-
-
-

Close the writer, finalizing the SSTable.

-
writer.close();
-
-
-

All the public methods the CQLSSTableWriter class provides including some other methods that are not discussed in the preceding example are as follows.

-

All the public methods the CQLSSTableWriter.Builder class provides including some other methods that are not discussed in the preceding example are as follows.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MethodDescription
inDirectory(String directory)The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable.
inDirectory(File directory)The directory where to write the SSTables. This is a mandatory option. The directory to use should already exist and be writable.
forTable(String schema)The schema (CREATE TABLE statement) for the table for which SSTable is to be created. The -provided CREATE TABLE statement must use a fully-qualified table name, one that includes the -keyspace name. This is a mandatory option.
withPartitioner(IPartitioner partitioner)The partitioner to use. By default, Murmur3Partitioner will be used. If this is not the -partitioner used by the cluster for which the SSTables are created, the correct partitioner -needs to be provided.
using(String insert)The INSERT or UPDATE statement defining the order of the values to add for a given CQL row. -The provided INSERT statement must use a fully-qualified table name, one that includes the -keyspace name. Moreover, said statement must use bind variables since these variables will -be bound to values by the resulting SSTable writer. This is a mandatory option.
withBufferSizeInMB(int size)The size of the buffer to use. This defines how much data will be buffered before being -written as a new SSTable. This corresponds roughly to the data size that will have the -created SSTable. The default is 128MB, which should be reasonable for a 1GB heap. If -OutOfMemory exception gets generated while using the SSTable writer, should lower this -value.
sorted()Creates a CQLSSTableWriter that expects sorted inputs. If this option is used, the resulting -SSTable writer will expect rows to be added in SSTable sorted order (and an exception will -be thrown if that is not the case during row insertion). The SSTable sorted order means that -rows are added such that their partition keys respect the partitioner order. This option -should only be used if the rows can be provided in order, which is rarely the case. If the -rows can be provided in order however, using this sorted might be more efficient. If this -option is used, some option like withBufferSizeInMB will be ignored.
build()Builds a CQLSSTableWriter object.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/cdc.html b/src/doc/4.0-beta1/operating/cdc.html deleted file mode 100644 index fc7eca211..000000000 --- a/src/doc/4.0-beta1/operating/cdc.html +++ /dev/null @@ -1,194 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Change Data Capture" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Change Data Capture

-
-

Overview

-

Change data capture (CDC) provides a mechanism to flag specific tables for archival as well as rejecting writes to those -tables once a configurable size-on-disk for the CDC log is reached. An operator can enable CDC on a table by setting the -table property cdc=true (either when creating the table or -altering it). Upon CommitLogSegment creation, a hard-link to the segment is created in the -directory specified in cassandra.yaml. On segment fsync to disk, if CDC data is present anywhere in the segment a -<segment_name>_cdc.idx file is also created with the integer offset of how much data in the original segment is persisted -to disk. Upon final segment flush, a second line with the human-readable word “COMPLETED” will be added to the _cdc.idx -file indicating that Cassandra has completed all processing on the file.

-

We we use an index file rather than just encouraging clients to parse the log realtime off a memory mapped handle as data -can be reflected in a kernel buffer that is not yet persisted to disk. Parsing only up to the listed offset in the _cdc.idx -file will ensure that you only parse CDC data for data that is durable.

-

A threshold of total disk space allowed is specified in the yaml at which time newly allocated CommitLogSegments will -not allow CDC data until a consumer parses and removes files from the specified cdc_raw directory.

-
-
-

Configuration

-
-

Enabling or disabling CDC on a table

-

CDC is enable or disable through the cdc table property, for instance:

-
CREATE TABLE foo (a int, b text, PRIMARY KEY(a)) WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=true;
-
-ALTER TABLE foo WITH cdc=false;
-
-
-
-
-

cassandra.yaml parameters

-

The following cassandra.yaml are available for CDC:

-
-
cdc_enabled (default: false)
-
Enable or disable CDC operations node-wide.
-
cdc_raw_directory (default: $CASSANDRA_HOME/data/cdc_raw)
-
Destination for CommitLogSegments to be moved after all corresponding memtables are flushed.
-
cdc_free_space_in_mb: (default: min of 4096 and 1/8th volume space)
-
Calculated as sum of all active CommitLogSegments that permit CDC + all flushed CDC segments in -cdc_raw_directory.
-
cdc_free_space_check_interval_ms (default: 250)
-
When at capacity, we limit the frequency with which we re-calculate the space taken up by cdc_raw_directory to -prevent burning CPU cycles unnecessarily. Default is to check 4 times per second.
-
-
-
-
-

Reading CommitLogSegments

-

Use a CommitLogReader.java. -Usage is fairly straightforward -with a variety of signatures -available for use. In order to handle mutations read from disk, implement CommitLogReadHandler.

-
-
-

Warnings

-

Do not enable CDC without some kind of consumption process in-place.

-

If CDC is enabled on a node and then on a table, the cdc_free_space_in_mb will fill up and then writes to -CDC-enabled tables will be rejected unless some consumption process is in place.

-
-
-

Further Reading

- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/compaction/index.html b/src/doc/4.0-beta1/operating/compaction/index.html deleted file mode 100644 index 01841f9d9..000000000 --- a/src/doc/4.0-beta1/operating/compaction/index.html +++ /dev/null @@ -1,387 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compaction" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compaction

-
-

Strategies

-

Picking the right compaction strategy for your workload will ensure the best performance for both querying and for compaction itself.

-
-
Size Tiered Compaction Strategy
-
The default compaction strategy. Useful as a fallback when other strategies don’t fit the workload. Most useful for -non pure time series workloads with spinning disks, or when the I/O from LCS is too high.
-
Leveled Compaction Strategy
-
Leveled Compaction Strategy (LCS) is optimized for read heavy workloads, or workloads with lots of updates and deletes. It is not a good choice for immutable time series data.
-
Time Window Compaction Strategy
-
Time Window Compaction Strategy is designed for TTL’ed, mostly immutable time series data.
-
-
-
-

Types of compaction

-

The concept of compaction is used for different kinds of operations in Cassandra, the common thing about these -operations is that it takes one or more sstables and output new sstables. The types of compactions are;

-
-
Minor compaction
-
triggered automatically in Cassandra.
-
Major compaction
-
a user executes a compaction over all sstables on the node.
-
User defined compaction
-
a user triggers a compaction on a given set of sstables.
-
Scrub
-
try to fix any broken sstables. This can actually remove valid data if that data is corrupted, if that happens you -will need to run a full repair on the node.
-
Upgradesstables
-
upgrade sstables to the latest version. Run this after upgrading to a new major version.
-
Cleanup
-
remove any ranges this node does not own anymore, typically triggered on neighbouring nodes after a node has been -bootstrapped since that node will take ownership of some ranges from those nodes.
-
Secondary index rebuild
-
rebuild the secondary indexes on the node.
-
Anticompaction
-
after repair the ranges that were actually repaired are split out of the sstables that existed when repair started.
-
Sub range compaction
-
It is possible to only compact a given sub range - this could be useful if you know a token that has been -misbehaving - either gathering many updates or many deletes. (nodetool compact -st x -et y) will pick -all sstables containing the range between x and y and issue a compaction for those sstables. For STCS this will -most likely include all sstables but with LCS it can issue the compaction for a subset of the sstables. With LCS -the resulting sstable will end up in L0.
-
-
-
-

When is a minor compaction triggered?

-

# When an sstable is added to the node through flushing/streaming etc. -# When autocompaction is enabled after being disabled (nodetool enableautocompaction) -# When compaction adds new sstables. -# A check for new minor compactions every 5 minutes.

-
-
-

Merging sstables

-

Compaction is about merging sstables, since partitions in sstables are sorted based on the hash of the partition key it -is possible to efficiently merge separate sstables. Content of each partition is also sorted so each partition can be -merged efficiently.

-
-
-

Tombstones and Garbage Collection (GC) Grace

-
-

Why Tombstones

-

When a delete request is received by Cassandra it does not actually remove the data from the underlying store. Instead -it writes a special piece of data known as a tombstone. The Tombstone represents the delete and causes all values which -occurred before the tombstone to not appear in queries to the database. This approach is used instead of removing values -because of the distributed nature of Cassandra.

-
-
-

Deletes without tombstones

-

Imagine a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If one of the nodes fails and and our delete operation only removes existing values we can end up with a cluster that -looks like:

-
[], [], [A]
-
-
-

Then a repair operation would replace the value of [A] back onto the two -nodes which are missing the value.:

-
[A], [A], [A]
-
-
-

This would cause our data to be resurrected even though it had been -deleted.

-
-
-

Deletes with Tombstones

-

Starting again with a three node cluster which has the value [A] replicated to every node.:

-
[A], [A], [A]
-
-
-

If instead of removing data we add a tombstone record, our single node failure situation will look like this.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A]
-
-
-

Now when we issue a repair the Tombstone will be copied to the replica, rather than the deleted data being -resurrected.:

-
[A, Tombstone[A]], [A, Tombstone[A]], [A, Tombstone[A]]
-
-
-

Our repair operation will correctly put the state of the system to what we expect with the record [A] marked as deleted -on all nodes. This does mean we will end up accruing Tombstones which will permanently accumulate disk space. To avoid -keeping tombstones forever we have a parameter known as gc_grace_seconds for every table in Cassandra.

-
-
-

The gc_grace_seconds parameter and Tombstone Removal

-

The table level gc_grace_seconds parameter controls how long Cassandra will retain tombstones through compaction -events before finally removing them. This duration should directly reflect the amount of time a user expects to allow -before recovering a failed node. After gc_grace_seconds has expired the tombstone may be removed (meaning there will -no longer be any record that a certain piece of data was deleted), but as a tombstone can live in one sstable and the -data it covers in another, a compaction must also include both sstable for a tombstone to be removed. More precisely, to -be able to drop an actual tombstone the following needs to be true;

-
    -
  • The tombstone must be older than gc_grace_seconds
  • -
  • If partition X contains the tombstone, the sstable containing the partition plus all sstables containing data older -than the tombstone containing X must be included in the same compaction. We don’t need to care if the partition is in -an sstable if we can guarantee that all data in that sstable is newer than the tombstone. If the tombstone is older -than the data it cannot shadow that data.
  • -
  • If the option only_purge_repaired_tombstones is enabled, tombstones are only removed if the data has also been -repaired.
  • -
-

If a node remains down or disconnected for longer than gc_grace_seconds it’s deleted data will be repaired back to -the other nodes and re-appear in the cluster. This is basically the same as in the “Deletes without Tombstones” section. -Note that tombstones will not be removed until a compaction event even if gc_grace_seconds has elapsed.

-

The default value for gc_grace_seconds is 864000 which is equivalent to 10 days. This can be set when creating or -altering a table using WITH gc_grace_seconds.

-
-
-
-

TTL

-

Data in Cassandra can have an additional property called time to live - this is used to automatically drop data that has -expired once the time is reached. Once the TTL has expired the data is converted to a tombstone which stays around for -at least gc_grace_seconds. Note that if you mix data with TTL and data without TTL (or just different length of the -TTL) Cassandra will have a hard time dropping the tombstones created since the partition might span many sstables and -not all are compacted at once.

-
-
-

Fully expired sstables

-

If an sstable contains only tombstones and it is guaranteed that that sstable is not shadowing data in any other sstable -compaction can drop that sstable. If you see sstables with only tombstones (note that TTL:ed data is considered -tombstones once the time to live has expired) but it is not being dropped by compaction, it is likely that other -sstables contain older data. There is a tool called sstableexpiredblockers that will list which sstables are -droppable and which are blocking them from being dropped. This is especially useful for time series compaction with -TimeWindowCompactionStrategy (and the deprecated DateTieredCompactionStrategy). With TimeWindowCompactionStrategy -it is possible to remove the guarantee (not check for shadowing data) by enabling unsafe_aggressive_sstable_expiration.

-
-
-

Repaired/unrepaired data

-

With incremental repairs Cassandra must keep track of what data is repaired and what data is unrepaired. With -anticompaction repaired data is split out into repaired and unrepaired sstables. To avoid mixing up the data again -separate compaction strategy instances are run on the two sets of data, each instance only knowing about either the -repaired or the unrepaired sstables. This means that if you only run incremental repair once and then never again, you -might have very old data in the repaired sstables that block compaction from dropping tombstones in the unrepaired -(probably newer) sstables.

-
-
-

Data directories

-

Since tombstones and data can live in different sstables it is important to realize that losing an sstable might lead to -data becoming live again - the most common way of losing sstables is to have a hard drive break down. To avoid making -data live tombstones and actual data are always in the same data directory. This way, if a disk is lost, all versions of -a partition are lost and no data can get undeleted. To achieve this a compaction strategy instance per data directory is -run in addition to the compaction strategy instances containing repaired/unrepaired data, this means that if you have 4 -data directories there will be 8 compaction strategy instances running. This has a few more benefits than just avoiding -data getting undeleted:

-
    -
  • It is possible to run more compactions in parallel - leveled compaction will have several totally separate levelings -and each one can run compactions independently from the others.
  • -
  • Users can backup and restore a single data directory.
  • -
  • Note though that currently all data directories are considered equal, so if you have a tiny disk and a big disk -backing two data directories, the big one will be limited the by the small one. One work around to this is to create -more data directories backed by the big disk.
  • -
-
-
-

Single sstable tombstone compaction

-

When an sstable is written a histogram with the tombstone expiry times is created and this is used to try to find -sstables with very many tombstones and run single sstable compaction on that sstable in hope of being able to drop -tombstones in that sstable. Before starting this it is also checked how likely it is that any tombstones will actually -will be able to be dropped how much this sstable overlaps with other sstables. To avoid most of these checks the -compaction option unchecked_tombstone_compaction can be enabled.

-
-
-

Common options

-

There is a number of common options for all the compaction strategies;

-
-
enabled (default: true)
-
Whether minor compactions should run. Note that you can have ‘enabled’: true as a compaction option and then do -‘nodetool enableautocompaction’ to start running compactions.
-
tombstone_threshold (default: 0.2)
-
How much of the sstable should be tombstones for us to consider doing a single sstable compaction of that sstable.
-
tombstone_compaction_interval (default: 86400s (1 day))
-
Since it might not be possible to drop any tombstones when doing a single sstable compaction we need to make sure -that one sstable is not constantly getting recompacted - this option states how often we should try for a given -sstable.
-
log_all (default: false)
-
New detailed compaction logging, see below.
-
unchecked_tombstone_compaction (default: false)
-
The single sstable compaction has quite strict checks for whether it should be started, this option disables those -checks and for some usecases this might be needed. Note that this does not change anything for the actual -compaction, tombstones are only dropped if it is safe to do so - it might just rewrite an sstable without being able -to drop any tombstones.
-
only_purge_repaired_tombstone (default: false)
-
Option to enable the extra safety of making sure that tombstones are only dropped if the data has been repaired.
-
min_threshold (default: 4)
-
Lower limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
max_threshold (default: 32)
-
Upper limit of number of sstables before a compaction is triggered. Not used for LeveledCompactionStrategy.
-
-

Further, see the section on each strategy for specific additional options.

-
-
-

Compaction nodetool commands

-

The nodetool utility provides a number of commands related to compaction:

-
-
enableautocompaction
-
Enable compaction.
-
disableautocompaction
-
Disable compaction.
-
setcompactionthroughput
-
How fast compaction should run at most - defaults to 16MB/s, but note that it is likely not possible to reach this -throughput.
-
compactionstats
-
Statistics about current and pending compactions.
-
compactionhistory
-
List details about the last compactions.
-
setcompactionthreshold
-
Set the min/max sstable count for when to trigger compaction, defaults to 4/32.
-
-
-
-

Switching the compaction strategy and options using JMX

-

It is possible to switch compaction strategies and its options on just a single node using JMX, this is a great way to -experiment with settings without affecting the whole cluster. The mbean is:

-
org.apache.cassandra.db:type=ColumnFamilies,keyspace=<keyspace_name>,columnfamily=<table_name>
-
-
-

and the attribute to change is CompactionParameters or CompactionParametersJson if you use jconsole or jmc. The -syntax for the json version is the same as you would use in an ALTER TABLE statement - -for example:

-
{ 'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb': 123, 'fanout_size': 10}
-
-
-

The setting is kept until someone executes an ALTER TABLE that touches the compaction -settings or restarts the node.

-
-
-

More detailed compaction logging

-

Enable with the compaction option log_all and a more detailed compaction log file will be produced in your log -directory.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/compaction/lcs.html b/src/doc/4.0-beta1/operating/compaction/lcs.html deleted file mode 100644 index 252a8f5b4..000000000 --- a/src/doc/4.0-beta1/operating/compaction/lcs.html +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Leveled Compaction Strategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Leveled Compaction Strategy

-

The idea of LeveledCompactionStrategy (LCS) is that all sstables are put into different levels where we guarantee -that no overlapping sstables are in the same level. By overlapping we mean that the first/last token of a single sstable -are never overlapping with other sstables. This means that for a SELECT we will only have to look for the partition key -in a single sstable per level. Each level is 10x the size of the previous one and each sstable is 160MB by default. L0 -is where sstables are streamed/flushed - no overlap guarantees are given here.

-

When picking compaction candidates we have to make sure that the compaction does not create overlap in the target level. -This is done by always including all overlapping sstables in the next level. For example if we select an sstable in L3, -we need to guarantee that we pick all overlapping sstables in L4 and make sure that no currently ongoing compactions -will create overlap if we start that compaction. We can start many parallel compactions in a level if we guarantee that -we wont create overlap. For L0 -> L1 compactions we almost always need to include all L1 sstables since most L0 sstables -cover the full range. We also can’t compact all L0 sstables with all L1 sstables in a single compaction since that can -use too much memory.

-

When deciding which level to compact LCS checks the higher levels first (with LCS, a “higher” level is one with a higher -number, L0 being the lowest one) and if the level is behind a compaction will be started in that level.

-
-

Major compaction

-

It is possible to do a major compaction with LCS - it will currently start by filling out L1 and then once L1 is full, -it continues with L2 etc. This is sub optimal and will change to create all the sstables in a high level instead, -CASSANDRA-11817.

-
-
-

Bootstrapping

-

During bootstrap sstables are streamed from other nodes. The level of the remote sstable is kept to avoid many -compactions after the bootstrap is done. During bootstrap the new node also takes writes while it is streaming the data -from a remote node - these writes are flushed to L0 like all other writes and to avoid those sstables blocking the -remote sstables from going to the correct level, we only do STCS in L0 until the bootstrap is done.

-
-
-

STCS in L0

-

If LCS gets very many L0 sstables reads are going to hit all (or most) of the L0 sstables since they are likely to be -overlapping. To more quickly remedy this LCS does STCS compactions in L0 if there are more than 32 sstables there. This -should improve read performance more quickly compared to letting LCS do its L0 -> L1 compactions. If you keep getting -too many sstables in L0 it is likely that LCS is not the best fit for your workload and STCS could work out better.

-
-
-

Starved sstables

-

If a node ends up with a leveling where there are a few very high level sstables that are not getting compacted they -might make it impossible for lower levels to drop tombstones etc. For example, if there are sstables in L6 but there is -only enough data to actually get a L4 on the node the left over sstables in L6 will get starved and not compacted. This -can happen if a user changes sstable_size_in_mb from 5MB to 160MB for example. To avoid this LCS tries to include -those starved high level sstables in other compactions if there has been 25 compaction rounds where the highest level -has not been involved.

-
-
-

LCS options

-
-
sstable_size_in_mb (default: 160MB)
-
The target compressed (if using compression) sstable size - the sstables can end up being larger if there are very -large partitions on the node.
-
fanout_size (default: 10)
-
The target size of levels increases by this fanout_size multiplier. You can reduce the space amplification by tuning -this option.
-
-

LCS also support the cassandra.disable_stcs_in_l0 startup option (-Dcassandra.disable_stcs_in_l0=true) to avoid -doing STCS in L0.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/compaction/stcs.html b/src/doc/4.0-beta1/operating/compaction/stcs.html deleted file mode 100644 index 5338696c3..000000000 --- a/src/doc/4.0-beta1/operating/compaction/stcs.html +++ /dev/null @@ -1,122 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Leveled Compaction Strategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Leveled Compaction Strategy

-

The basic idea of SizeTieredCompactionStrategy (STCS) is to merge sstables of approximately the same size. All -sstables are put in different buckets depending on their size. An sstable is added to the bucket if size of the sstable -is within bucket_low and bucket_high of the current average size of the sstables already in the bucket. This -will create several buckets and the most interesting of those buckets will be compacted. The most interesting one is -decided by figuring out which bucket’s sstables takes the most reads.

-
-

Major compaction

-

When running a major compaction with STCS you will end up with two sstables per data directory (one for repaired data -and one for unrepaired data). There is also an option (-s) to do a major compaction that splits the output into several -sstables. The sizes of the sstables are approximately 50%, 25%, 12.5%… of the total size.

-
-
-

STCS options

-
-
min_sstable_size (default: 50MB)
-
Sstables smaller than this are put in the same bucket.
-
bucket_low (default: 0.5)
-
How much smaller than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if bucket_low * avg_bucket_size < sstable_size (and the bucket_high condition holds, see below), then -the sstable is added to the bucket.
-
bucket_high (default: 1.5)
-
How much bigger than the average size of a bucket a sstable should be before not being included in the bucket. That -is, if sstable_size < bucket_high * avg_bucket_size (and the bucket_low condition holds, see above), then -the sstable is added to the bucket.
-
-
-
-

Defragmentation

-

Defragmentation is done when many sstables are touched during a read. The result of the read is put in to the memtable -so that the next read will not have to touch as many sstables. This can cause writes on a read-only-cluster.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/compaction/twcs.html b/src/doc/4.0-beta1/operating/compaction/twcs.html deleted file mode 100644 index 4208e354b..000000000 --- a/src/doc/4.0-beta1/operating/compaction/twcs.html +++ /dev/null @@ -1,140 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Time Window CompactionStrategy" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Time Window CompactionStrategy

-

TimeWindowCompactionStrategy (TWCS) is designed specifically for workloads where it’s beneficial to have data on -disk grouped by the timestamp of the data, a common goal when the workload is time-series in nature or when all data is -written with a TTL. In an expiring/TTL workload, the contents of an entire SSTable likely expire at approximately the -same time, allowing them to be dropped completely, and space reclaimed much more reliably than when using -SizeTieredCompactionStrategy or LeveledCompactionStrategy. The basic concept is that -TimeWindowCompactionStrategy will create 1 sstable per file for a given window, where a window is simply calculated -as the combination of two primary options:

-
-
compaction_window_unit (default: DAYS)
-
A Java TimeUnit (MINUTES, HOURS, or DAYS).
-
compaction_window_size (default: 1)
-
The number of units that make up a window.
-
unsafe_aggressive_sstable_expiration (default: false)
-
Expired sstables will be dropped without checking its data is shadowing other sstables. This is a potentially -risky option that can lead to data loss or deleted data re-appearing, going beyond what -unchecked_tombstone_compaction does for single sstable compaction. Due to the risk the jvm must also be -started with -Dcassandra.unsafe_aggressive_sstable_expiration=true.
-
-

Taken together, the operator can specify windows of virtually any size, and TimeWindowCompactionStrategy will work to -create a single sstable for writes within that window. For efficiency during writing, the newest window will be -compacted using SizeTieredCompactionStrategy.

-

Ideally, operators should select a compaction_window_unit and compaction_window_size pair that produces -approximately 20-30 windows - if writing with a 90 day TTL, for example, a 3 Day window would be a reasonable choice -('compaction_window_unit':'DAYS','compaction_window_size':3).

-
-

TimeWindowCompactionStrategy Operational Concerns

-

The primary motivation for TWCS is to separate data on disk by timestamp and to allow fully expired SSTables to drop -more efficiently. One potential way this optimal behavior can be subverted is if data is written to SSTables out of -order, with new data and old data in the same SSTable. Out of order data can appear in two ways:

-
    -
  • If the user mixes old data and new data in the traditional write path, the data will be comingled in the memtables -and flushed into the same SSTable, where it will remain comingled.
  • -
  • If the user’s read requests for old data cause read repairs that pull old data into the current memtable, that data -will be comingled and flushed into the same SSTable.
  • -
-

While TWCS tries to minimize the impact of comingled data, users should attempt to avoid this behavior. Specifically, -users should avoid queries that explicitly set the timestamp via CQL USING TIMESTAMP. Additionally, users should run -frequent repairs (which streams data in such a way that it does not become comingled).

-
-
-

Changing TimeWindowCompactionStrategy Options

-

Operators wishing to enable TimeWindowCompactionStrategy on existing data should consider running a major compaction -first, placing all existing data into a single (old) window. Subsequent newer writes will then create typical SSTables -as expected.

-

Operators wishing to change compaction_window_unit or compaction_window_size can do so, but may trigger -additional compactions as adjacent windows are joined together. If the window size is decrease d (for example, from 24 -hours to 12 hours), then the existing SSTables will not be modified - TWCS can not split existing SSTables into multiple -windows.

-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/compression.html b/src/doc/4.0-beta1/operating/compression.html deleted file mode 100644 index a04c1f491..000000000 --- a/src/doc/4.0-beta1/operating/compression.html +++ /dev/null @@ -1,294 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Compression" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Compression

-

Cassandra offers operators the ability to configure compression on a per-table basis. Compression reduces the size of -data on disk by compressing the SSTable in user-configurable compression chunk_length_in_kb. As Cassandra SSTables -are immutable, the CPU cost of compressing is only necessary when the SSTable is written - subsequent updates -to data will land in different SSTables, so Cassandra will not need to decompress, overwrite, and recompress data when -UPDATE commands are issued. On reads, Cassandra will locate the relevant compressed chunks on disk, decompress the full -chunk, and then proceed with the remainder of the read path (merging data from disks and memtables, read repair, and so -on).

-

Compression algorithms typically trade off between the following three areas:

-
    -
  • Compression speed: How fast does the compression algorithm compress data. This is critical in the flush and -compaction paths because data must be compressed before it is written to disk.
  • -
  • Decompression speed: How fast does the compression algorithm de-compress data. This is critical in the read -and compaction paths as data must be read off disk in a full chunk and decompressed before it can be returned.
  • -
  • Ratio: By what ratio is the uncompressed data reduced by. Cassandra typically measures this as the size of data -on disk relative to the uncompressed size. For example a ratio of 0.5 means that the data on disk is 50% the size -of the uncompressed data. Cassandra exposes this ratio per table as the SSTable Compression Ratio field of -nodetool tablestats.
  • -
-

Cassandra offers five compression algorithms by default that make different tradeoffs in these areas. While -benchmarking compression algorithms depends on many factors (algorithm parameters such as compression level, -the compressibility of the input data, underlying processor class, etc …), the following table should help you pick -a starting point based on your application’s requirements with an extremely rough grading of the different choices -by their performance in these areas (A is relatively good, F is relatively bad):

- -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Compression AlgorithmCassandra ClassCompressionDecompressionRatioC* Version
LZ4LZ4CompressorA+A+C+>=1.2.2
LZ4HCLZ4CompressorC+A+B+>= 3.6
ZstdZstdCompressorA-A-A+>= 4.0
SnappySnappyCompressorA-AC>= 1.0
Deflate (zlib)DeflateCompressorCCA>= 1.0
-

Generally speaking for a performance critical (latency or throughput) application LZ4 is the right choice as it -gets excellent ratio per CPU cycle spent. This is why it is the default choice in Cassandra.

-

For storage critical applications (disk footprint), however, Zstd may be a better choice as it can get significant -additional ratio to LZ4.

-

Snappy is kept for backwards compatibility and LZ4 will typically be preferable.

-

Deflate is kept for backwards compatibility and Zstd will typically be preferable.

-
-

Configuring Compression

-

Compression is configured on a per-table basis as an optional argument to CREATE TABLE or ALTER TABLE. Three -options are available for all compressors:

-
    -
  • class (default: LZ4Compressor): specifies the compression class to use. The two “fast” -compressors are LZ4Compressor and SnappyCompressor and the two “good” ratio compressors are ZstdCompressor -and DeflateCompressor.
  • -
  • chunk_length_in_kb (default: 16KiB): specifies the number of kilobytes of data per compression chunk. The main -tradeoff here is that larger chunk sizes give compression algorithms more context and improve their ratio, but -require reads to deserialize and read more off disk.
  • -
  • crc_check_chance (default: 1.0): determines how likely Cassandra is to verify the checksum on each compression -chunk during reads to protect against data corruption. Unless you have profiles indicating this is a performance -problem it is highly encouraged not to turn this off as it is Cassandra’s only protection against bitrot.
  • -
-

The LZ4Compressor supports the following additional options:

-
    -
  • lz4_compressor_type (default fast): specifies if we should use the high (a.k.a LZ4HC) ratio version -or the fast (a.k.a LZ4) version of LZ4. The high mode supports a configurable level, which can allow -operators to tune the performance <-> ratio tradeoff via the lz4_high_compressor_level option. Note that in -4.0 and above it may be preferable to use the Zstd compressor.
  • -
  • lz4_high_compressor_level (default 9): A number between 1 and 17 inclusive that represents how much -CPU time to spend trying to get more compression ratio. Generally lower levels are “faster” but they get less ratio -and higher levels are slower but get more compression ratio.
  • -
-

The ZstdCompressor supports the following options in addition:

-
    -
  • compression_level (default 3): A number between -131072 and 22 inclusive that represents how much CPU -time to spend trying to get more compression ratio. The lower the level, the faster the speed (at the cost of ratio). -Values from 20 to 22 are called “ultra levels” and should be used with caution, as they require more memory. -The default of 3 is a good choice for competing with Deflate ratios and 1 is a good choice for competing -with LZ4.
  • -
-

Users can set compression using the following syntax:

-
CREATE TABLE keyspace.table (id int PRIMARY KEY) WITH compression = {'class': 'LZ4Compressor'};
-
-
-

Or

-
ALTER TABLE keyspace.table WITH compression = {'class': 'LZ4Compressor', 'chunk_length_in_kb': 64, 'crc_check_chance': 0.5};
-
-
-

Once enabled, compression can be disabled with ALTER TABLE setting enabled to false:

-
ALTER TABLE keyspace.table WITH compression = {'enabled':'false'};
-
-
-

Operators should be aware, however, that changing compression is not immediate. The data is compressed when the SSTable -is written, and as SSTables are immutable, the compression will not be modified until the table is compacted. Upon -issuing a change to the compression options via ALTER TABLE, the existing SSTables will not be modified until they -are compacted - if an operator needs compression changes to take effect immediately, the operator can trigger an SSTable -rewrite using nodetool scrub or nodetool upgradesstables -a, both of which will rebuild the SSTables on disk, -re-compressing the data in the process.

-
-
-

Benefits and Uses

-

Compression’s primary benefit is that it reduces the amount of data written to disk. Not only does the reduced size save -in storage requirements, it often increases read and write throughput, as the CPU overhead of compressing data is faster -than the time it would take to read or write the larger volume of uncompressed data from disk.

-

Compression is most useful in tables comprised of many rows, where the rows are similar in nature. Tables containing -similar text columns (such as repeated JSON blobs) often compress very well. Tables containing data that has already -been compressed or random data (e.g. benchmark datasets) do not typically compress well.

-
-
-

Operational Impact

-
    -
  • Compression metadata is stored off-heap and scales with data on disk. This often requires 1-3GB of off-heap RAM per -terabyte of data on disk, though the exact usage varies with chunk_length_in_kb and compression ratios.
  • -
  • Streaming operations involve compressing and decompressing data on compressed tables - in some code paths (such as -non-vnode bootstrap), the CPU overhead of compression can be a limiting factor.
  • -
  • To prevent slow compressors (Zstd, Deflate, LZ4HC) from blocking flushes for too long, all three -flush with the default fast LZ4 compressor and then rely on normal compaction to re-compress the data into the -desired compression strategy. See CASSANDRA-15379 <https://issues.apache.org/jira/browse/CASSANDRA-15379> for more -details.
  • -
  • The compression path checksums data to ensure correctness - while the traditional Cassandra read path does not have a -way to ensure correctness of data on disk, compressed tables allow the user to set crc_check_chance (a float from -0.0 to 1.0) to allow Cassandra to probabilistically validate chunks on read to verify bits on disk are not corrupt.
  • -
-
-
-

Advanced Use

-

Advanced users can provide their own compression class by implementing the interface at -org.apache.cassandra.io.compress.ICompressor.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/hardware.html b/src/doc/4.0-beta1/operating/hardware.html deleted file mode 100644 index fca92c997..000000000 --- a/src/doc/4.0-beta1/operating/hardware.html +++ /dev/null @@ -1,189 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hardware Choices" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hardware Choices

-

Like most databases, Cassandra throughput improves with more CPU cores, more RAM, and faster disks. While Cassandra can -be made to run on small servers for testing or development environments (including Raspberry Pis), a minimal production -server requires at least 2 cores, and at least 8GB of RAM. Typical production servers have 8 or more cores and at least -32GB of RAM.

-
-

CPU

-

Cassandra is highly concurrent, handling many simultaneous requests (both read and write) using multiple threads running -on as many CPU cores as possible. The Cassandra write path tends to be heavily optimized (writing to the commitlog and -then inserting the data into the memtable), so writes, in particular, tend to be CPU bound. Consequently, adding -additional CPU cores often increases throughput of both reads and writes.

-
-
-

Memory

-

Cassandra runs within a Java VM, which will pre-allocate a fixed size heap (java’s Xmx system parameter). In addition to -the heap, Cassandra will use significant amounts of RAM offheap for compression metadata, bloom filters, row, key, and -counter caches, and an in process page cache. Finally, Cassandra will take advantage of the operating system’s page -cache, storing recently accessed portions files in RAM for rapid re-use.

-

For optimal performance, operators should benchmark and tune their clusters based on their individual workload. However, -basic guidelines suggest:

-
    -
  • ECC RAM should always be used, as Cassandra has few internal safeguards to protect against bit level corruption
  • -
  • The Cassandra heap should be no less than 2GB, and no more than 50% of your system RAM
  • -
  • Heaps smaller than 12GB should consider ParNew/ConcurrentMarkSweep garbage collection
  • -
  • Heaps larger than 12GB should consider G1GC
  • -
-
-
-

Disks

-

Cassandra persists data to disk for two very different purposes. The first is to the commitlog when a new write is made -so that it can be replayed after a crash or system shutdown. The second is to the data directory when thresholds are -exceeded and memtables are flushed to disk as SSTables.

-

Commitlogs receive every write made to a Cassandra node and have the potential to block client operations, but they are -only ever read on node start-up. SSTable (data file) writes on the other hand occur asynchronously, but are read to -satisfy client look-ups. SSTables are also periodically merged and rewritten in a process called compaction. The data -held in the commitlog directory is data that has not been permanently saved to the SSTable data directories - it will be -periodically purged once it is flushed to the SSTable data files.

-

Cassandra performs very well on both spinning hard drives and solid state disks. In both cases, Cassandra’s sorted -immutable SSTables allow for linear reads, few seeks, and few overwrites, maximizing throughput for HDDs and lifespan of -SSDs by avoiding write amplification. However, when using spinning disks, it’s important that the commitlog -(commitlog_directory) be on one physical disk (not simply a partition, but a physical disk), and the data files -(data_file_directories) be set to a separate physical disk. By separating the commitlog from the data directory, -writes can benefit from sequential appends to the commitlog without having to seek around the platter as reads request -data from various SSTables on disk.

-

In most cases, Cassandra is designed to provide redundancy via multiple independent, inexpensive servers. For this -reason, using NFS or a SAN for data directories is an antipattern and should typically be avoided. Similarly, servers -with multiple disks are often better served by using RAID0 or JBOD than RAID1 or RAID5 - replication provided by -Cassandra obsoletes the need for replication at the disk layer, so it’s typically recommended that operators take -advantage of the additional throughput of RAID0 rather than protecting against failures with RAID1 or RAID5.

-
-
-

Common Cloud Choices

-

Many large users of Cassandra run in various clouds, including AWS, Azure, and GCE - Cassandra will happily run in any -of these environments. Users should choose similar hardware to what would be needed in physical space. In EC2, popular -options include:

-
    -
  • i2 instances, which provide both a high RAM:CPU ratio and local ephemeral SSDs
  • -
  • m4.2xlarge / c4.4xlarge instances, which provide modern CPUs, enhanced networking and work well with EBS GP2 (SSD) -storage
  • -
-

Generally, disk and network performance increases with instance size and generation, so newer generations of instances -and larger instance types within each family often perform better than their smaller or older alternatives.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/hints.html b/src/doc/4.0-beta1/operating/hints.html deleted file mode 100644 index 273727e98..000000000 --- a/src/doc/4.0-beta1/operating/hints.html +++ /dev/null @@ -1,402 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Hints" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Hints

-

Hinting is a data repair technique applied during write operations. When -replica nodes are unavailable to accept a mutation, either due to failure or -more commonly routine maintenance, coordinators attempting to write to those -replicas store temporary hints on their local filesystem for later application -to the unavailable replica. Hints are an important way to help reduce the -duration of data inconsistency. Coordinators replay hints quickly after -unavailable replica nodes return to the ring. Hints are best effort, however, -and do not guarantee eventual consistency like anti-entropy repair does.

-

Hints are useful because of how Apache Cassandra replicates data to provide -fault tolerance, high availability and durability. Cassandra partitions -data across the cluster using consistent -hashing, and then replicates keys to multiple nodes along the hash ring. To -guarantee availability, all replicas of a key can accept mutations without -consensus, but this means it is possible for some replicas to accept a mutation -while others do not. When this happens an inconsistency is introduced.

-

Hints are one of the three ways, in addition to read-repair and -full/incremental anti-entropy repair, that Cassandra implements the eventual -consistency guarantee that all updates are eventually received by all replicas. -Hints, like read-repair, are best effort and not an alternative to performing -full repair, but they do help reduce the duration of inconsistency between -replicas in practice.

-
-

Hinted Handoff

-

Hinted handoff is the process by which Cassandra applies hints to unavailable -nodes.

-

For example, consider a mutation is to be made at Consistency Level -LOCAL_QUORUM against a keyspace with Replication Factor of 3. -Normally the client sends the mutation to a single coordinator, who then sends -the mutation to all three replicas, and when two of the three replicas -acknowledge the mutation the coordinator responds successfully to the client. -If a replica node is unavailable, however, the coordinator stores a hint -locally to the filesystem for later application. New hints will be retained for -up to max_hint_window_in_ms of downtime (defaults to 3 hours). If the -unavailable replica does return to the cluster before the window expires, the -coordinator applies any pending hinted mutations against the replica to ensure -that eventual consistency is maintained.

-
-Hinted Handoff Example

Hinted Handoff in Action

-
-
    -
  • (t0): The write is sent by the client, and the coordinator sends it -to the three replicas. Unfortunately replica_2 is restarting and cannot -receive the mutation.
  • -
  • (t1): The client receives a quorum acknowledgement from the coordinator. -At this point the client believe the write to be durable and visible to reads -(which it is).
  • -
  • (t2): After the write timeout (default 2s), the coordinator decides -that replica_2 is unavailable and stores a hint to its local disk.
  • -
  • (t3): Later, when replica_2 starts back up it sends a gossip message -to all nodes, including the coordinator.
  • -
  • (t4): The coordinator replays hints including the missed mutation -against replica_2.
  • -
-

If the node does not return in time, the destination replica will be -permanently out of sync until either read-repair or full/incremental -anti-entropy repair propagates the mutation.

-
-

Application of Hints

-

Hints are streamed in bulk, a segment at a time, to the target replica node and -the target node replays them locally. After the target node has replayed a -segment it deletes the segment and receives the next segment. This continues -until all hints are drained.

-
-
-

Storage of Hints on Disk

-

Hints are stored in flat files in the coordinator node’s -$CASSANDRA_HOME/data/hints directory. A hint includes a hint id, the target -replica node on which the mutation is meant to be stored, the serialized -mutation (stored as a blob) that couldn’t be delivered to the replica node, the -mutation timestamp, and the Cassandra version used to serialize the mutation. -By default hints are compressed using LZ4Compressor. Multiple hints are -appended to the same hints file.

-

Since hints contain the original unmodified mutation timestamp, hint application -is idempotent and cannot overwrite a future mutation.

-
-
-

Hints for Timed Out Write Requests

-

Hints are also stored for write requests that time out. The -write_request_timeout_in_ms setting in cassandra.yaml configures the -timeout for write requests.

-
write_request_timeout_in_ms: 2000
-
-
-

The coordinator waits for the configured amount of time for write requests to -complete, at which point it will time out and generate a hint for the timed out -request. The lowest acceptable value for write_request_timeout_in_ms is 10 ms.

-
-
-
-

Configuring Hints

-

Hints are enabled by default as they are critical for data consistency. The -cassandra.yaml configuration file provides several settings for configuring -hints:

-

Table 1. Settings for Hints

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SettingDescriptionDefault Value
hinted_handoff_enabledEnables/Disables hinted handoffstrue
hinted_handoff_disabled_datacenters

A list of data centers that do not perform -hinted handoffs even when handoff is -otherwise enabled. -Example:

-
-
hinted_handoff_disabled_datacenters:
-  - DC1
-  - DC2
-
-
-
-
unset
max_hint_window_in_msDefines the maximum amount of time (ms) -a node shall have hints generated after it -has failed.10800000 # 3 hours
hinted_handoff_throttle_in_kbMaximum throttle in KBs per second, per -delivery thread. This will be reduced -proportionally to the number of nodes in -the cluster. -(If there are two nodes in the cluster, -each delivery thread will use the maximum -rate; if there are 3, each will throttle -to half of the maximum,since it is expected -for two nodes to be delivering hints -simultaneously.)1024
max_hints_delivery_threadsNumber of threads with which to deliver -hints; Consider increasing this number when -you have multi-dc deployments, since -cross-dc handoff tends to be slower2
hints_directoryDirectory where Cassandra stores hints.$CASSANDRA_HOME/data/hints
hints_flush_period_in_msHow often hints should be flushed from the -internal buffers to disk. Will not -trigger fsync.10000
max_hints_file_size_in_mbMaximum size for a single hints file, in -megabytes.128
hints_compressionCompression to apply to the hint files. -If omitted, hints files will be written -uncompressed. LZ4, Snappy, and Deflate -compressors are supported.LZ4Compressor
-
-
-

Configuring Hints at Runtime with nodetool

-

nodetool provides several commands for configuring hints or getting hints -related information. The nodetool commands override the corresponding -settings if any in cassandra.yaml for the node running the command.

-

Table 2. Nodetool Commands for Hints

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CommandDescription
nodetool disablehandoffDisables storing and delivering hints
nodetool disablehintsfordcDisables storing and delivering hints to a -data center
nodetool enablehandoffRe-enables future hints storing and -delivery on the current node
nodetool enablehintsfordcEnables hints for a data center that was -previously disabled
nodetool getmaxhintwindowPrints the max hint window in ms. New in -Cassandra 4.0.
nodetool handoffwindowPrints current hinted handoff window
nodetool pausehandoffPauses hints delivery process
nodetool resumehandoffResumes hints delivery process
nodetool -sethintedhandoffthrottlekbSets hinted handoff throttle in kb -per second, per delivery thread
nodetool setmaxhintwindowSets the specified max hint window in ms
nodetool statushandoffStatus of storing future hints on the -current node
nodetool truncatehintsTruncates all hints on the local node, or -truncates hints for the endpoint(s) -specified.
-
-

Make Hints Play Faster at Runtime

-

The default of 1024 kbps handoff throttle is conservative for most modern -networks, and it is entirely possible that in a simple node restart you may -accumulate many gigabytes hints that may take hours to play back. For example if -you are ingesting 100 Mbps of data per node, a single 10 minute long -restart will create 10 minutes * (100 megabit / second) ~= 7 GiB of data -which at (1024 KiB / second) would take 7.5 GiB / (1024 KiB / second) = -2.03 hours to play back. The exact math depends on the load balancing strategy -(round robin is better than token aware), number of tokens per node (more -tokens is better than fewer), and naturally the cluster’s write rate, but -regardless you may find yourself wanting to increase this throttle at runtime.

-

If you find yourself in such a situation, you may consider raising -the hinted_handoff_throttle dynamically via the -nodetool sethintedhandoffthrottlekb command.

-
-
-

Allow a Node to be Down Longer at Runtime

-

Sometimes a node may be down for more than the normal max_hint_window_in_ms, -(default of three hours), but the hardware and data itself will still be -accessible. In such a case you may consider raising the -max_hint_window_in_ms dynamically via the nodetool setmaxhintwindow -command added in Cassandra 4.0 (CASSANDRA-11720). -This will instruct Cassandra to continue holding hints for the down -endpoint for a longer amount of time.

-

This command should be applied on all nodes in the cluster that may be holding -hints. If needed, the setting can be applied permanently by setting the -max_hint_window_in_ms setting in cassandra.yaml followed by a rolling -restart.

-
-
-
-

Monitoring Hint Delivery

-

Cassandra 4.0 adds histograms available to understand how long it takes to deliver -hints which is useful for operators to better identify problems (CASSANDRA-13234).

-

There are also metrics available for tracking Hinted Handoff -and Hints Service metrics.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/index.html b/src/doc/4.0-beta1/operating/index.html deleted file mode 100644 index e64356a5d..000000000 --- a/src/doc/4.0-beta1/operating/index.html +++ /dev/null @@ -1,252 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Operating Cassandra" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Operating Cassandra

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/metrics.html b/src/doc/4.0-beta1/operating/metrics.html deleted file mode 100644 index 48d0c96d6..000000000 --- a/src/doc/4.0-beta1/operating/metrics.html +++ /dev/null @@ -1,1801 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Monitoring" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Monitoring

-

Metrics in Cassandra are managed using the Dropwizard Metrics library. These metrics -can be queried via JMX or pushed to external monitoring systems using a number of built in and third party reporter plugins.

-

Metrics are collected for a single node. It’s up to the operator to use an external monitoring system to aggregate them.

-
-

Metric Types

-

All metrics reported by cassandra fit into one of the following types.

-
-
Gauge
-
An instantaneous measurement of a value.
-
Counter
-
A gauge for an AtomicLong instance. Typically this is consumed by monitoring the change since the last call to -see if there is a large increase compared to the norm.
-
Histogram
-

Measures the statistical distribution of values in a stream of data.

-

In addition to minimum, maximum, mean, etc., it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th -percentiles.

-
-
Timer
-
Measures both the rate that a particular piece of code is called and the histogram of its duration.
-
Latency
-
Special type that tracks latency (in microseconds) with a Timer plus a Counter that tracks the total latency -accrued since starting. The former is useful if you track the change in total latency since the last check. Each -metric name of this type will have ‘Latency’ and ‘TotalLatency’ appended to it.
-
Meter
-
A meter metric which measures mean throughput and one-, five-, and fifteen-minute exponentially-weighted moving -average throughputs.
-
-
-
-

Table Metrics

-

Each table in Cassandra has metrics responsible for tracking its state and performance.

-

The metric names are all appended with the specific Keyspace and Table name.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Table.<MetricName>.<Keyspace>.<Table>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Table keyspace=<Keyspace> scope=<Table> name=<MetricName>
-
-
-

Note

-

There is a special table called ‘all’ without a keyspace. This represents the aggregation of metrics across -all tables and keyspaces on the node.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
MemtableOnHeapSizeGauge<Long>Total amount of data stored in the memtable that resides on-heap, including column related overhead and partitions overwritten.
MemtableOffHeapSizeGauge<Long>Total amount of data stored in the memtable that resides off-heap, including column related overhead and partitions overwritten.
MemtableLiveDataSizeGauge<Long>Total amount of live data stored in the memtable, excluding any data structure overhead.
AllMemtablesOnHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides on-heap.
AllMemtablesOffHeapSizeGauge<Long>Total amount of data stored in the memtables (2i and pending flush memtables included) that resides off-heap.
AllMemtablesLiveDataSizeGauge<Long>Total amount of live data stored in the memtables (2i and pending flush memtables included) that resides off-heap, excluding any data structure overhead.
MemtableColumnsCountGauge<Long>Total number of columns present in the memtable.
MemtableSwitchCountCounterNumber of times flush has resulted in the memtable being switched out.
CompressionRatioGauge<Double>Current compression ratio for all SSTables.
EstimatedPartitionSizeHistogramGauge<long[]>Histogram of estimated partition size (in bytes).
EstimatedPartitionCountGauge<Long>Approximate number of keys in table.
EstimatedColumnCountHistogramGauge<long[]>Histogram of estimated number of columns.
SSTablesPerReadHistogramHistogramHistogram of the number of sstable data files accessed per single partition read. SSTables skipped due to Bloom Filters, min-max key or partition index lookup are not taken into acoount.
ReadLatencyLatencyLocal read latency for this table.
RangeLatencyLatencyLocal range scan latency for this table.
WriteLatencyLatencyLocal write latency for this table.
CoordinatorReadLatencyTimerCoordinator read latency for this table.
CoordinatorWriteLatencyTimerCoordinator write latency for this table.
CoordinatorScanLatencyTimerCoordinator range scan latency for this table.
PendingFlushesCounterEstimated number of flush tasks pending for this table.
BytesFlushedCounterTotal number of bytes flushed since server [re]start.
CompactionBytesWrittenCounterTotal number of bytes written by compaction since server [re]start.
PendingCompactionsGauge<Integer>Estimate of number of pending compactions for this table.
LiveSSTableCountGauge<Integer>Number of SSTables on disk for this table.
LiveDiskSpaceUsedCounterDisk space used by SSTables belonging to this table (in bytes).
TotalDiskSpaceUsedCounterTotal disk space used by SSTables belonging to this table, including obsolete ones waiting to be GC’d.
MinPartitionSizeGauge<Long>Size of the smallest compacted partition (in bytes).
MaxPartitionSizeGauge<Long>Size of the largest compacted partition (in bytes).
MeanPartitionSizeGauge<Long>Size of the average compacted partition (in bytes).
BloomFilterFalsePositivesGauge<Long>Number of false positives on table’s bloom filter.
BloomFilterFalseRatioGauge<Double>False positive ratio of table’s bloom filter.
BloomFilterDiskSpaceUsedGauge<Long>Disk space used by bloom filter (in bytes).
BloomFilterOffHeapMemoryUsedGauge<Long>Off-heap memory used by bloom filter.
IndexSummaryOffHeapMemoryUsedGauge<Long>Off-heap memory used by index summary.
CompressionMetadataOffHeapMemoryUsedGauge<Long>Off-heap memory used by compression meta data.
KeyCacheHitRateGauge<Double>Key cache hit rate for this table.
TombstoneScannedHistogramHistogramHistogram of tombstones scanned in queries on this table.
LiveScannedHistogramHistogramHistogram of live cells scanned in queries on this table.
ColUpdateTimeDeltaHistogramHistogramHistogram of column update time delta on this table.
ViewLockAcquireTimeTimerTime taken acquiring a partition lock for materialized view updates on this table.
ViewReadTimeTimerTime taken during the local read of a materialized view update.
TrueSnapshotsSizeGauge<Long>Disk space used by snapshots of this table including all SSTable components.
RowCacheHitOutOfRangeCounterNumber of table row cache hits that do not satisfy the query filter, thus went to disk.
RowCacheHitCounterNumber of table row cache hits.
RowCacheMissCounterNumber of table row cache misses.
CasPrepareLatencyLatency of paxos prepare round.
CasProposeLatencyLatency of paxos propose round.
CasCommitLatencyLatency of paxos commit round.
PercentRepairedGauge<Double>Percent of table data that is repaired on disk.
BytesRepairedGauge<Long>Size of table data repaired on disk
BytesUnrepairedGauge<Long>Size of table data unrepaired on disk
BytesPendingRepairGauge<Long>Size of table data isolated for an ongoing incremental repair
SpeculativeRetriesCounterNumber of times speculative retries were sent for this table.
SpeculativeFailedRetriesCounterNumber of speculative retries that failed to prevent a timeout
SpeculativeInsufficientReplicasCounterNumber of speculative retries that couldn’t be attempted due to lack of replicas
SpeculativeSampleLatencyNanosGauge<Long>Number of nanoseconds to wait before speculation is attempted. Value may be statically configured or updated periodically based on coordinator latency.
WaitingOnFreeMemtableSpaceHistogramHistogram of time spent waiting for free memtable space, either on- or off-heap.
DroppedMutationsCounterNumber of dropped mutations on this table.
AnticompactionTimeTimerTime spent anticompacting before a consistent repair.
ValidationTimeTimerTime spent doing validation compaction during repair.
SyncTimeTimerTime spent doing streaming during repair.
BytesValidatedHistogramHistogram over the amount of bytes read during validation.
PartitionsValidatedHistogramHistogram over the number of partitions read during validation.
BytesAnticompactedCounterHow many bytes we anticompacted.
BytesMutatedAnticompactionCounterHow many bytes we avoided anticompacting because the sstable was fully contained in the repaired range.
MutatedAnticompactionGaugeGauge<Double>Ratio of bytes mutated vs total bytes repaired.
-
-
-

Keyspace Metrics

-

Each keyspace in Cassandra has metrics responsible for tracking its state and performance.

-

Most of these metrics are the same as the Table Metrics above, only they are aggregated at the Keyspace level. The keyspace specific metrics are specified in the table below.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.keyspace.<MetricName>.<Keyspace>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Keyspace scope=<Keyspace> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
WriteFailedIdeaCLCounterNumber of writes that failed to achieve the configured ideal consistency level or 0 if none is configured
IdealCLWriteLatencyLatencyCoordinator latency of writes at the configured ideal consistency level. No values are recorded if ideal consistency level is not configured
RepairTimeTimerTotal time spent as repair coordinator.
RepairPrepareTimeTimerTotal time spent preparing for repair.
-
-
-

ThreadPool Metrics

-

Cassandra splits work of a particular type into its own thread pool. This provides back-pressure and asynchrony for -requests on a node. It’s important to monitor the state of these thread pools since they can tell you how saturated a -node is.

-

The metric names are all appended with the specific ThreadPool name. The thread pools are also categorized under a -specific type.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ThreadPools.<MetricName>.<Path>.<ThreadPoolName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ThreadPools path=<Path> scope=<ThreadPoolName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ActiveTasksGauge<Integer>Number of tasks being actively worked on by this pool.
PendingTasksGauge<Integer>Number of queued tasks queued up on this pool.
CompletedTasksCounterNumber of tasks completed.
TotalBlockedTasksCounterNumber of tasks that were blocked due to queue saturation.
CurrentlyBlockedTaskCounterNumber of tasks that are currently blocked due to queue saturation but on retry will become unblocked.
MaxPoolSizeGauge<Integer>The maximum number of threads in this pool.
MaxTasksQueuedGauge<Integer>The maximum number of tasks queued before a task get blocked.
-

The following thread pools can be monitored.

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Native-Transport-RequeststransportHandles client CQL requests
CounterMutationStagerequestResponsible for counter writes
ViewMutationStagerequestResponsible for materialized view writes
MutationStagerequestResponsible for all other writes
ReadRepairStagerequestReadRepair happens on this thread pool
ReadStagerequestLocal reads run on this thread pool
RequestResponseStagerequestCoordinator requests to the cluster run on this thread pool
AntiEntropyStageinternalBuilds merkle tree for repairs
CacheCleanupExecutorinternalCache maintenance performed on this thread pool
CompactionExecutorinternalCompactions are run on these threads
GossipStageinternalHandles gossip requests
HintsDispatcherinternalPerforms hinted handoff
InternalResponseStageinternalResponsible for intra-cluster callbacks
MemtableFlushWriterinternalWrites memtables to disk
MemtablePostFlushinternalCleans up commit log after memtable is written to disk
MemtableReclaimMemoryinternalMemtable recycling
MigrationStageinternalRuns schema migrations
MiscStageinternalMisceleneous tasks run here
PendingRangeCalculatorinternalCalculates token range
PerDiskMemtableFlushWriter_0internalResponsible for writing a spec (there is one of these per disk 0-N)
SamplerinternalResponsible for re-sampling the index summaries of SStables
SecondaryIndexManagementinternalPerforms updates to secondary indexes
ValidationExecutorinternalPerforms validation compaction or scrubbing
ViewBuildExecutorinternalPerforms materialized views initial build
-
-
-

Client Request Metrics

-

Client requests have their own set of metrics that encapsulate the work happening at coordinator level.

-

Different types of client requests are broken down by RequestType.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.ClientRequest.<MetricName>.<RequestType>
-
JMX MBean
-
org.apache.cassandra.metrics:type=ClientRequest scope=<RequestType> name=<MetricName>
-
- --- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
RequestType:

CASRead

-
Description:

Metrics related to transactional read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction read latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
UnfinishedCommitCounterNumber of transactions that were committed on read.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended reads were encountered
-
RequestType:

CASWrite

-
Description:

Metrics related to transactional write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
 LatencyTransaction write latency.
UnfinishedCommitCounterNumber of transactions that were committed on write.
ConditionNotMetCounterNumber of transaction preconditions did not match current values.
ContentionHistogramHistogramHow many contended writes were encountered
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

Read

-
Description:

Metrics related to standard read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of read failures encountered.
 LatencyRead latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

RangeSlice

-
Description:

Metrics related to token range read requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of range query failures encountered.
 LatencyRange query latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
-
RequestType:

Write

-
Description:

Metrics related to regular write requests.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of write failures encountered.
 LatencyWrite latency.
UnavailablesCounterNumber of unavailable exceptions encountered.
MutationSizeHistogramHistogramTotal size in bytes of the requests mutations.
-
RequestType:

ViewWrite

-
Description:

Metrics related to materialized view write wrtes.

-
Metrics: ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TimeoutsCounterNumber of timeouts encountered.
FailuresCounterNumber of transaction failures encountered.
UnavailablesCounterNumber of unavailable exceptions encountered.
ViewReplicasAttemptedCounterTotal number of attempted view replica writes.
ViewReplicasSuccessCounterTotal number of succeded view replica writes.
ViewPendingMutationsGauge<Long>ViewReplicasAttempted - ViewReplicasSuccess.
ViewWriteLatencyTimerTime between when mutation is applied to base table and when CL.ONE is achieved on view.
-
-
-
-

Cache Metrics

-

Cassandra caches have metrics to track the effectivness of the caches. Though the Table Metrics might be more useful.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Cache.<MetricName>.<CacheName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Cache scope=<CacheName> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Cache capacity in bytes.
EntriesGauge<Integer>Total number of cache entries.
FifteenMinuteCacheHitRateGauge<Double>15m cache hit rate.
FiveMinuteCacheHitRateGauge<Double>5m cache hit rate.
OneMinuteCacheHitRateGauge<Double>1m cache hit rate.
HitRateGauge<Double>All time cache hit rate.
HitsMeterTotal number of cache hits.
MissesMeterTotal number of cache misses.
MissLatencyTimerLatency of misses.
RequestsGauge<Long>Total number of cache requests.
SizeGauge<Long>Total size of occupied cache, in bytes.
-

The following caches are covered:

- ---- - - - - - - - - - - - - - - - - - - - -
NameDescription
CounterCacheKeeps hot counters in memory for performance.
ChunkCacheIn process uncompressed page cache.
KeyCacheCache for partition to sstable offsets.
RowCacheCache for rows kept in memory.
-
-

Note

-

Misses and MissLatency are only defined for the ChunkCache

-
-
-
-

CQL Metrics

-

Metrics specific to CQL prepared statement caching.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CQL.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CQL name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PreparedStatementsCountGauge<Integer>Number of cached prepared statements.
PreparedStatementsEvictedCounterNumber of prepared statements evicted from the prepared statement cache
PreparedStatementsExecutedCounterNumber of prepared statements executed.
RegularStatementsExecutedCounterNumber of non prepared statements executed.
PreparedStatementsRatioGauge<Double>Percentage of statements that are prepared vs unprepared.
-
-
-

DroppedMessage Metrics

-

Metrics specific to tracking dropped messages for different types of requests. -Dropped writes are stored and retried by Hinted Handoff

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.DroppedMessage.<MetricName>.<Type>
-
JMX MBean
-
org.apache.cassandra.metrics:type=DroppedMessage scope=<Type> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CrossNodeDroppedLatencyTimerThe dropped latency across nodes.
InternalDroppedLatencyTimerThe dropped latency within node.
DroppedMeterNumber of dropped messages.
-

The different types of messages tracked are:

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
BATCH_STOREBatchlog write
BATCH_REMOVEBatchlog cleanup (after succesfully applied)
COUNTER_MUTATIONCounter writes
HINTHint replay
MUTATIONRegular writes
READRegular reads
READ_REPAIRRead repair
PAGED_SLICEPaged read
RANGE_SLICEToken range read
REQUEST_RESPONSERPC Callbacks
_TRACETracing writes
-
-
-

Streaming Metrics

-

Metrics reported during Streaming operations, such as repair, bootstrap, rebuild.

-

These metrics are specific to a peer endpoint, with the source node being the node you are pulling the metrics from.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Streaming.<MetricName>.<PeerIP>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Streaming scope=<PeerIP> name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
IncomingBytesCounterNumber of bytes streamed to this node from the peer.
OutgoingBytesCounterNumber of bytes streamed to the peer endpoint from this node.
-
-
-

Compaction Metrics

-

Metrics specific to Compaction work.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Compaction.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Compaction name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
BytesCompactedCounterTotal number of bytes compacted since server [re]start.
PendingTasksGauge<Integer>Estimated number of compactions remaining to perform.
CompletedTasksGauge<Long>Number of completed compactions since server [re]start.
TotalCompactionsCompletedMeterThroughput of completed compactions since server [re]start.
PendingTasksByTableNameGauge<Map<String, Map<String, Integer>>>Estimated number of compactions remaining to perform, grouped by keyspace and then table name. This info is also kept in Table Metrics.
-
-
-

CommitLog Metrics

-

Metrics specific to the CommitLog

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.CommitLog.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=CommitLog name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CompletedTasksGauge<Long>Total number of commit log messages written since [re]start.
PendingTasksGauge<Long>Number of commit log messages written but yet to be fsync’d.
TotalCommitLogSizeGauge<Long>Current size, in bytes, used by all the commit log segments.
WaitingOnSegmentAllocationTimerTime spent waiting for a CommitLogSegment to be allocated - under normal conditions this should be zero.
WaitingOnCommitTimerThe time spent waiting on CL fsync; for Periodic this is only occurs when the sync is lagging its sync interval.
-
-
-

Storage Metrics

-

Metrics specific to the storage engine.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Storage.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Storage name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
ExceptionsCounterNumber of internal exceptions caught. Under normal exceptions this should be zero.
LoadCounterSize, in bytes, of the on disk data size this node manages.
TotalHintsCounterNumber of hint messages written to this node since [re]start. Includes one entry for each host to be hinted per hint.
TotalHintsInProgressCounterNumber of hints attemping to be sent currently.
-
-
-

HintedHandoff Metrics

-

Metrics specific to Hinted Handoff. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintedHandOffManager.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintedHandOffManager name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
Hints_created-<PeerIP>CounterNumber of hints on disk for this peer.
Hints_not_stored-<PeerIP>CounterNumber of hints not stored for this peer, due to being down past the configured hint window.
-
-
-

HintsService Metrics

-

Metrics specific to the Hints delivery service. There are also some metrics related to hints tracked in Storage Metrics

-

These metrics include the peer endpoint in the metric name

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.HintsService.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=HintsService name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
HintsSucceededMeterA meter of the hints successfully delivered
HintsFailedMeterA meter of the hints that failed deliver
HintsTimedOutMeterA meter of the hints that timed out
Hint_delaysHistogramHistogram of hint delivery delays (in milliseconds)
Hint_delays-<PeerIP>HistogramHistogram of hint delivery delays (in milliseconds) per peer
-
-
-

SSTable Index Metrics

-

Metrics specific to the SSTable index metadata.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Index.<MetricName>.RowIndexEntry
-
JMX MBean
-
org.apache.cassandra.metrics:type=Index scope=RowIndexEntry name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
IndexedEntrySizeHistogramHistogram of the on-heap size, in bytes, of the index across all SSTables.
IndexInfoCountHistogramHistogram of the number of on-heap index entries managed across all SSTables.
IndexInfoGetsHistogramHistogram of the number index seeks performed per SSTable.
-
-
-

BufferPool Metrics

-

Metrics specific to the internal recycled buffer pool Cassandra manages. This pool is meant to keep allocations and GC -lower by recycling on and off heap buffers.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.BufferPool.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=BufferPool name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
SizeGauge<Long>Size, in bytes, of the managed buffer pool
MissesMeterThe rate of misses in the pool. The higher this is the more allocations incurred.
-
-
-

Client Metrics

-

Metrics specifc to client managment.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Client.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Client name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
connectedNativeClientsGauge<Integer>Number of clients connected to this nodes native protocol server
connectionsGauge<List<Map<String, String>>List of all connections and their state information
connectedNativeClientsByUserGauge<Map<String, Int>Number of connnective native clients by username
-
-
-

Batch Metrics

-

Metrics specifc to batch statements.

-

Reported name format:

-
-
Metric Name
-
org.apache.cassandra.metrics.Batch.<MetricName>
-
JMX MBean
-
org.apache.cassandra.metrics:type=Batch name=<MetricName>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
PartitionsPerCounterBatchHistogramDistribution of the number of partitions processed per counter batch
PartitionsPerLoggedBatchHistogramDistribution of the number of partitions processed per logged batch
PartitionsPerUnloggedBatchHistogramDistribution of the number of partitions processed per unlogged batch
-
-
-

JVM Metrics

-

JVM metrics such as memory and garbage collection statistics can either be accessed by connecting to the JVM using JMX or can be exported using Metric Reporters.

-
-

BufferPool

-
-
Metric Name
-
jvm.buffers.<direct|mapped>.<MetricName>
-
JMX MBean
-
java.nio:type=BufferPool name=<direct|mapped>
-
- ----- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
CapacityGauge<Long>Estimated total capacity of the buffers in this pool
CountGauge<Long>Estimated number of buffers in the pool
UsedGauge<Long>Estimated memory that the Java virtual machine is using for this buffer pool
-
-
-

FileDescriptorRatio

-
-
Metric Name
-
jvm.fd.<MetricName>
-
JMX MBean
-
java.lang:type=OperatingSystem name=<OpenFileDescriptorCount|MaxFileDescriptorCount>
-
- ----- - - - - - - - - - - - - -
NameTypeDescription
UsageRatioRatio of used to total file descriptors
-
-
-

GarbageCollector

-
-
Metric Name
-
jvm.gc.<gc_type>.<MetricName>
-
JMX MBean
-
java.lang:type=GarbageCollector name=<gc_type>
-
- ----- - - - - - - - - - - - - - - - - -
NameTypeDescription
CountGauge<Long>Total number of collections that have occurred
TimeGauge<Long>Approximate accumulated collection elapsed time in milliseconds
-
-
-

Memory

-
-
Metric Name
-
jvm.memory.<heap/non-heap/total>.<MetricName>
-
JMX MBean
-
java.lang:type=Memory
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-

MemoryPool

-
-
Metric Name
-
jvm.memory.pools.<memory_pool>.<MetricName>
-
JMX MBean
-
java.lang:type=MemoryPool name=<memory_pool>
-
- ----- - - - - - - - - - - - - - - - - - - - - - - -
CommittedGauge<Long>Amount of memory in bytes that is committed for the JVM to use
InitGauge<Long>Amount of memory in bytes that the JVM initially requests from the OS
MaxGauge<Long>Maximum amount of memory in bytes that can be used for memory management
UsageRatioRatio of used to maximum memory
UsedGauge<Long>Amount of used memory in bytes
-
-
-
-

JMX

-

Any JMX based client can access metrics from cassandra.

-

If you wish to access JMX metrics over http it’s possible to download Mx4jTool and -place mx4j-tools.jar into the classpath. On startup you will see in the log:

-
HttpAdaptor version 3.0.2 started on port 8081
-
-
-

To choose a different port (8081 is the default) or a different listen address (0.0.0.0 is not the default) edit -conf/cassandra-env.sh and uncomment:

-
#MX4J_ADDRESS="-Dmx4jaddress=0.0.0.0"
-
-#MX4J_PORT="-Dmx4jport=8081"
-
-
-
-
-

Metric Reporters

-

As mentioned at the top of this section on monitoring the Cassandra metrics can be exported to a number of monitoring -system a number of built in and third party reporter plugins.

-

The configuration of these plugins is managed by the metrics reporter config project. There is a sample configuration file located at -conf/metrics-reporter-config-sample.yaml.

-

Once configured, you simply start cassandra with the flag --Dcassandra.metricsReporterConfigFile=metrics-reporter-config.yaml. The specified .yaml file plus any 3rd party -reporter jars must all be in Cassandra’s classpath.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/read_repair.html b/src/doc/4.0-beta1/operating/read_repair.html deleted file mode 100644 index 6c71d5eac..000000000 --- a/src/doc/4.0-beta1/operating/read_repair.html +++ /dev/null @@ -1,267 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Read repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Read repair

-

Read Repair is the process of repairing data replicas during a read request. If all replicas involved in a read request at the given read consistency level are consistent the data is returned to the client and no read repair is needed. But if the replicas involved in a read request at the given consistency level are not consistent a read repair is performed to make replicas involved in the read request consistent. The most up-to-date data is returned to the client. The read repair runs in the foreground and is blocking in that a response is not returned to the client until the read repair has completed and up-to-date data is constructed.

-
-

Expectation of Monotonic Quorum Reads

-

Cassandra uses a blocking read repair to ensure the expectation of “monotonic quorum reads” i.e. that in 2 successive quorum reads, it’s guaranteed the 2nd one won’t get something older than the 1st one, and this even if a failed quorum write made a write of the most up to date value only to a minority of replicas. “Quorum” means majority of nodes among replicas.

-
-
-

Table level configuration of monotonic reads

-

Cassandra 4.0 adds support for table level configuration of monotonic reads (CASSANDRA-14635). The read_repair table option has been added to table schema, with the options blocking (default), and none.

-

The read_repair option configures the read repair behavior to allow tuning for various performance and consistency behaviors. Two consistency properties are affected by read repair behavior.

-
    -
  • Monotonic Quorum Reads: Provided by BLOCKING. Monotonic quorum reads prevents reads from appearing to go back in time in some circumstances. When monotonic quorum reads are not provided and a write fails to reach a quorum of replicas, it may be visible in one read, and then disappear in a subsequent read.
  • -
  • Write Atomicity: Provided by NONE. Write atomicity prevents reads from returning partially applied writes. Cassandra attempts to provide partition level write atomicity, but since only the data covered by a SELECT statement is repaired by a read repair, read repair can break write atomicity when data is read at a more granular level than it is written. For example read repair can break write atomicity if you write multiple rows to a clustered partition in a batch, but then select a single row by specifying the clustering column in a SELECT statement.
  • -
-

The available read repair settings are:

-
-

Blocking

-

The default setting. When read_repair is set to BLOCKING, and a read repair is started, the read will block on writes sent to other replicas until the CL is reached by the writes. Provides monotonic quorum reads, but not partition level write atomicity.

-
-
-

None

-

When read_repair is set to NONE, the coordinator will reconcile any differences between replicas, but will not attempt to repair them. Provides partition level write atomicity, but not monotonic quorum reads.

-

An example of using the NONE setting for the read_repair option is as follows:

-
CREATE TABLE ks.tbl (k INT, c INT, v INT, PRIMARY KEY (k,c)) with read_repair='NONE'");
-
-
-
-
-
-

Read Repair Example

-

To illustrate read repair with an example, consider that a client sends a read request with read consistency level TWO to a 5-node cluster as illustrated in Figure 1. Read consistency level determines how many replica nodes must return a response before the read request is considered successful.

-
-../_images/Figure_1_read_repair.jpg -
-

Figure 1. Client sends read request to a 5-node Cluster

-

Three nodes host replicas for the requested data as illustrated in Figure 2. With a read consistency level of TWO two replica nodes must return a response for the read request to be considered successful. If the node the client sends request to hosts a replica of the data requested only one other replica node needs to be sent a read request to. But if the receiving node does not host a replica for the requested data the node becomes a coordinator node and forwards the read request to a node that hosts a replica. A direct read request is forwarded to the fastest node (as determined by dynamic snitch) as shown in Figure 2. A direct read request is a full read and returns the requested data.

-
-../_images/Figure_2_read_repair.jpg -
-

Figure 2. Direct Read Request sent to Fastest Replica Node

-

Next, the coordinator node sends the requisite number of additional requests to satisfy the consistency level, which is TWO. The coordinator node needs to send one more read request for a total of two. All read requests additional to the first direct read request are digest read requests. A digest read request is not a full read and only returns the hash value of the data. Only a hash value is returned to reduce the network data traffic. In the example being discussed the coordinator node sends one digest read request to a node hosting a replica as illustrated in Figure 3.

-
-../_images/Figure_3_read_repair.jpg -
-

Figure 3. Coordinator Sends a Digest Read Request

-

The coordinator node has received a full copy of data from one node and a hash value for the data from another node. To compare the data returned a hash value is calculated for the full copy of data. The two hash values are compared. If the hash values are the same no read repair is needed and the full copy of requested data is returned to the client. The coordinator node only performed a total of two replica read request because the read consistency level is TWO in the example. If the consistency level were higher such as THREE, three replica nodes would need to respond to a read request and only if all digest or hash values were to match with the hash value of the full copy of data would the read request be considered successful and the data returned to the client.

-

But, if the hash value/s from the digest read request/s are not the same as the hash value of the data from the full read request of the first replica node it implies that an inconsistency in the replicas exists. To fix the inconsistency a read repair is performed.

-

For example, consider that that digest request returns a hash value that is not the same as the hash value of the data from the direct full read request. We would need to make the replicas consistent for which the coordinator node sends a direct (full) read request to the replica node that it sent a digest read request to earlier as illustrated in Figure 4.

-
-../_images/Figure_4_read_repair.jpg -
-

Figure 4. Coordinator sends Direct Read Request to Replica Node it had sent Digest Read Request to

-

After receiving the data from the second replica node the coordinator has data from two of the replica nodes. It only needs two replicas as the read consistency level is TWO in the example. Data from the two replicas is compared and based on the timestamps the most recent replica is selected. Data may need to be merged to construct an up-to-date copy of data if one replica has data for only some of the columns. In the example, if the data from the first direct read request is found to be outdated and the data from the second full read request to be the latest read, repair needs to be performed on Replica 2. If a new up-to-date data is constructed by merging the two replicas a read repair would be needed on both the replicas involved. For example, a read repair is performed on Replica 2 as illustrated in Figure 5.

-
-../_images/Figure_5_read_repair.jpg -
-

Figure 5. Coordinator performs Read Repair

-

The most up-to-date data is returned to the client as illustrated in Figure 6. From the three replicas Replica 1 is not even read and thus not repaired. Replica 2 is repaired. Replica 3 is the most up-to-date and returned to client.

-
-../_images/Figure_6_read_repair.jpg -
-

Figure 6. Most up-to-date Data returned to Client

-
-
-

Read Consistency Level and Read Repair

-

The read consistency is most significant in determining if a read repair needs to be performed. As discussed in Table 1 a read repair is not needed for all of the consistency levels.

-

Table 1. Read Repair based on Read Consistency Level

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
Read Consistency LevelDescription
ONERead repair is not performed as the -data from the first direct read request -satisfies the consistency level ONE. -No digest read requests are involved -for finding mismatches in data.
TWORead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
THREERead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
LOCAL_ONERead repair is not performed as the data -from the direct read request from the -closest replica satisfies the consistency -level LOCAL_ONE.No digest read requests are -involved for finding mismatches in data.
LOCAL_QUORUMRead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
QUORUMRead repair is performed if inconsistencies -in data are found as determined by the -direct and digest read requests.
-

If read repair is performed it is made only on the replicas that are not up-to-date and that are involved in the read request. The number of replicas involved in a read request would be based on the read consistency level; in the example it is two.

-
-
-

Improved Read Repair Blocking Behavior in Cassandra 4.0

-

Cassandra 4.0 makes two improvements to read repair blocking behavior (CASSANDRA-10726).

-
    -
  1. Speculative Retry of Full Data Read Requests. Cassandra 4.0 makes use of speculative retry in sending read requests (full, not digest) to replicas if a full data response is not received, whether in the initial full read request or a full data read request during read repair. With speculative retry if it looks like a response may not be received from the initial set of replicas Cassandra sent messages to, to satisfy the consistency level, it speculatively sends additional read request to un-contacted replica/s. Cassandra 4.0 will also speculatively send a repair mutation to a minority of nodes not involved in the read repair data read / write cycle with the combined contents of all un-acknowledged mutations if it looks like one may not respond. Cassandra accepts acks from them in lieu of acks from the initial mutations sent out, so long as it receives the same number of acks as repair mutations transmitted.
  2. -
  3. Only blocks on Full Data Responses to satisfy the Consistency Level. Cassandra 4.0 only blocks for what is needed for resolving the digest mismatch and wait for enough full data responses to meet the consistency level, no matter whether it’s speculative retry or read repair chance. As an example, if it looks like Cassandra might not receive full data requests from everyone in time, it sends additional requests to additional replicas not contacted in the initial full data read. If the collection of nodes that end up responding in time end up agreeing on the data, the response from the disagreeing replica that started the read repair is not considered, and won’t be included in the response to the client, preserving the expectation of monotonic quorum reads.
  4. -
-
-
-

Diagnostic Events for Read Repairs

-

Cassandra 4.0 adds diagnostic events for read repair (CASSANDRA-14668) that can be used for exposing information such as:

-
    -
  • Contacted endpoints
  • -
  • Digest responses by endpoint
  • -
  • Affected partition keys
  • -
  • Speculated reads / writes
  • -
  • Update oversized
  • -
-
-
-

Background Read Repair

-

Background read repair, which was configured using read_repair_chance and dclocal_read_repair_chance settings in cassandra.yaml is removed Cassandra 4.0 (CASSANDRA-13910).

-

Read repair is not an alternative for other kind of repairs such as full repairs or replacing a node that keeps failing. The data returned even after a read repair has been performed may not be the most up-to-date data if consistency level is other than one requiring response from all replicas.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/repair.html b/src/doc/4.0-beta1/operating/repair.html deleted file mode 100644 index 9cec65f91..000000000 --- a/src/doc/4.0-beta1/operating/repair.html +++ /dev/null @@ -1,278 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Repair" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Repair

-

Cassandra is designed to remain available if one of it’s nodes is down or unreachable. However, when a node is down or -unreachable, it needs to eventually discover the writes it missed. Hints attempt to inform a node of missed writes, but -are a best effort, and aren’t guaranteed to inform a node of 100% of the writes it missed. These inconsistencies can -eventually result in data loss as nodes are replaced or tombstones expire.

-

These inconsistencies are fixed with the repair process. Repair synchronizes the data between nodes by comparing their -respective datasets for their common token ranges, and streaming the differences for any out of sync sections between -the nodes. It compares the data with merkle trees, which are a hierarchy of hashes.

-
-

Incremental and Full Repairs

-

There are 2 types of repairs: full repairs, and incremental repairs. Full repairs operate over all of the data in the -token range being repaired. Incremental repairs only repair data that’s been written since the previous incremental repair.

-

Incremental repairs are the default repair type, and if run regularly, can significantly reduce the time and io cost of -performing a repair. However, it’s important to understand that once an incremental repair marks data as repaired, it won’t -try to repair it again. This is fine for syncing up missed writes, but it doesn’t protect against things like disk corruption, -data loss by operator error, or bugs in Cassandra. For this reason, full repairs should still be run occasionally.

-
-
-

Usage and Best Practices

-

Since repair can result in a lot of disk and network io, it’s not run automatically by Cassandra. It is run by the operator -via nodetool.

-

Incremental repair is the default and is run with the following command:

-
nodetool repair
-
-
-

A full repair can be run with the following command:

-
nodetool repair --full
-
-
-

Additionally, repair can be run on a single keyspace:

-
nodetool repair [options] <keyspace_name>
-
-
-

Or even on specific tables:

-
nodetool repair [options] <keyspace_name> <table1> <table2>
-
-
-

The repair command only repairs token ranges on the node being repaired, it doesn’t repair the whole cluster. By default, repair -will operate on all token ranges replicated by the node you’re running repair on, which will cause duplicate work if you run it -on every node. The -pr flag will only repair the “primary” ranges on a node, so you can repair your entire cluster by running -nodetool repair -pr on each node in a single datacenter.

-

The specific frequency of repair that’s right for your cluster, of course, depends on several factors. However, if you’re -just starting out and looking for somewhere to start, running an incremental repair every 1-3 days, and a full repair every -1-3 weeks is probably reasonable. If you don’t want to run incremental repairs, a full repair every 5 days is a good place -to start.

-

At a minimum, repair should be run often enough that the gc grace period never expires on unrepaired data. Otherwise, deleted -data could reappear. With a default gc grace period of 10 days, repairing every node in your cluster at least once every 7 days -will prevent this, while providing enough slack to allow for delays.

-
-
-

Other Options

-
-
-pr, --partitioner-range
-
Restricts repair to the ‘primary’ token ranges of the node being repaired. A primary range is just a token range for -which a node is the first replica in the ring.
-
-prv, --preview
-
Estimates the amount of streaming that would occur for the given repair command. This builds the merkle trees, and prints -the expected streaming activity, but does not actually do any streaming. By default, incremental repairs are estimated, -add the --full flag to estimate a full repair.
-
-vd, --validate
-
Verifies that the repaired data is the same across all nodes. Similiar to --preview, this builds and compares merkle -trees of repaired data, but doesn’t do any streaming. This is useful for troubleshooting. If this shows that the repaired -data is out of sync, a full repair should be run.
-
-
-

See also

-

nodetool repair docs

-
-
-
-

Full Repair Example

-

Full repair is typically needed to redistribute data after increasing the replication factor of a keyspace or after adding a node to the cluster. Full repair involves streaming SSTables. To demonstrate full repair start with a three node cluster.

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool status
-Datacenter: us-east-1
-=====================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address   Load        Tokens  Owns  Host ID                              Rack
-UN  10.0.1.115  547 KiB     256    ?  b64cb32a-b32a-46b4-9eeb-e123fa8fc287  us-east-1b
-UN  10.0.3.206  617.91 KiB  256    ?  74863177-684b-45f4-99f7-d1006625dc9e  us-east-1d
-UN  10.0.2.238  670.26 KiB  256    ?  4dcdadd2-41f9-4f34-9892-1f20868b27c7  us-east-1c
-
-
-

Create a keyspace with replication factor 3:

-
cqlsh> DROP KEYSPACE cqlkeyspace;
-cqlsh> CREATE KEYSPACE CQLKeyspace
-  ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-
-

Add a table to the keyspace:

-
cqlsh> use cqlkeyspace;
-cqlsh:cqlkeyspace> CREATE TABLE t (
-           ...   id int,
-           ...   k int,
-           ...   v text,
-           ...   PRIMARY KEY (id)
-           ... );
-
-
-

Add table data:

-
cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (0, 0, 'val0');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (1, 1, 'val1');
-cqlsh:cqlkeyspace> INSERT INTO t (id, k, v) VALUES (2, 2, 'val2');
-
-
-

A query lists the data added:

-
cqlsh:cqlkeyspace> SELECT * FROM t;
-
-id | k | v
-----+---+------
- 1 | 1 | val1
- 0 | 0 | val0
- 2 | 2 | val2
-(3 rows)
-
-
-

Make the following changes to a three node cluster:

-
    -
  1. Increase the replication factor from 3 to 4.
  2. -
  3. Add a 4th node to the cluster
  4. -
-

When the replication factor is increased the following message gets output indicating that a full repair is needed as per (CASSANDRA-13079):

-
cqlsh:cqlkeyspace> ALTER KEYSPACE CQLKeyspace
-           ... WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-Warnings :
-When increasing replication factor you need to run a full (-full) repair to distribute the
-data.
-
-
-

Perform a full repair on the keyspace cqlkeyspace table t with following command:

-
nodetool repair -full cqlkeyspace t
-
-
-

Full repair completes in about a second as indicated by the output:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool repair -full cqlkeyspace t
-[2019-08-17 03:06:21,445] Starting repair command #1 (fd576da0-c09b-11e9-b00c-1520e8c38f00), repairing keyspace cqlkeyspace with repair options (parallelism: parallel, primary range: false, incremental: false, job threads: 1, ColumnFamilies: [t], dataCenters: [], hosts: [], previewKind: NONE, # of ranges: 1024, pull repair: false, force repair: false, optimise streams: false)
-[2019-08-17 03:06:23,059] Repair session fd8e5c20-c09b-11e9-b00c-1520e8c38f00 for range [(-8792657144775336505,-8786320730900698730], (-5454146041421260303,-5439402053041523135], (4288357893651763201,4324309707046452322], ... , (4350676211955643098,4351706629422088296]] finished (progress: 0%)
-[2019-08-17 03:06:23,077] Repair completed successfully
-[2019-08-17 03:06:23,077] Repair command #1 finished in 1 second
-[ec2-user@ip-10-0-2-238 ~]$
-
-
-

The nodetool  tpstats command should list a repair having been completed as Repair-Task > Completed column value of 1:

-
[ec2-user@ip-10-0-2-238 ~]$ nodetool tpstats
-Pool Name Active   Pending Completed   Blocked  All time blocked
-ReadStage  0           0           99       0              0
-…
-Repair-Task 0       0           1        0              0
-RequestResponseStage                  0        0        2078        0               0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/security.html b/src/doc/4.0-beta1/operating/security.html deleted file mode 100644 index 3122daa77..000000000 --- a/src/doc/4.0-beta1/operating/security.html +++ /dev/null @@ -1,474 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Security" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Security

-

There are three main components to the security features provided by Cassandra:

-
    -
  • TLS/SSL encryption for client and inter-node communication
  • -
  • Client authentication
  • -
  • Authorization
  • -
-

By default, these features are disabled as Cassandra is configured to easily find and be found by other members of a -cluster. In other words, an out-of-the-box Cassandra installation presents a large attack surface for a bad actor. -Enabling authentication for clients using the binary protocol is not sufficient to protect a cluster. Malicious users -able to access internode communication and JMX ports can still:

-
    -
  • Craft internode messages to insert users into authentication schema
  • -
  • Craft internode messages to truncate or drop schema
  • -
  • Use tools such as sstableloader to overwrite system_auth tables
  • -
  • Attach to the cluster directly to capture write traffic
  • -
-

Correct configuration of all three security components should negate theses vectors. Therefore, understanding Cassandra’s -security features is crucial to configuring your cluster to meet your security needs.

-
-

TLS/SSL Encryption

-

Cassandra provides secure communication between a client machine and a database cluster and between nodes within a -cluster. Enabling encryption ensures that data in flight is not compromised and is transferred securely. The options for -client-to-node and node-to-node encryption are managed separately and may be configured independently.

-

In both cases, the JVM defaults for supported protocols and cipher suites are used when encryption is enabled. These can -be overidden using the settings in cassandra.yaml, but this is not recommended unless there are policies in place -which dictate certain settings or a need to disable vulnerable ciphers or protocols in cases where the JVM cannot be -updated.

-

FIPS compliant settings can be configured at the JVM level and should not involve changing encryption settings in -cassandra.yaml. See the java document on FIPS -for more details.

-

For information on generating the keystore and truststore files used in SSL communications, see the -java documentation on creating keystores

-
-
-

SSL Certificate Hot Reloading

-

Beginning with Cassandra 4, Cassandra supports hot reloading of SSL Certificates. If SSL/TLS support is enabled in Cassandra, -the node periodically polls the Trust and Key Stores specified in cassandra.yaml. When the files are updated, Cassandra will -reload them and use them for subsequent connections. Please note that the Trust & Key Store passwords are part of the yaml so -the updated files should also use the same passwords. The default polling interval is 10 minutes.

-

Certificate Hot reloading may also be triggered using the nodetool reloadssl command. Use this if you want to Cassandra to -immediately notice the changed certificates.

-
-

Inter-node Encryption

-

The settings for managing inter-node encryption are found in cassandra.yaml in the server_encryption_options -section. To enable inter-node encryption, change the internode_encryption setting from its default value of none -to one value from: rack, dc or all.

-
-
-

Client to Node Encryption

-

The settings for managing client to node encryption are found in cassandra.yaml in the client_encryption_options -section. There are two primary toggles here for enabling encryption, enabled and optional.

-
    -
  • If neither is set to true, client connections are entirely unencrypted.
  • -
  • If enabled is set to true and optional is set to false, all client connections must be secured.
  • -
  • If both options are set to true, both encrypted and unencrypted connections are supported using the same port. -Client connections using encryption with this configuration will be automatically detected and handled by the server.
  • -
-

As an alternative to the optional setting, separate ports can also be configured for secure and unsecure connections -where operational requirements demand it. To do so, set optional to false and use the native_transport_port_ssl -setting in cassandra.yaml to specify the port to be used for secure client communication.

-
-
-
-

Roles

-

Cassandra uses database roles, which may represent either a single user or a group of users, in both authentication and -permissions management. Role management is an extension point in Cassandra and may be configured using the -role_manager setting in cassandra.yaml. The default setting uses CassandraRoleManager, an implementation -which stores role information in the tables of the system_auth keyspace.

-

See also the CQL documentation on roles.

-
-
-

Authentication

-

Authentication is pluggable in Cassandra and is configured using the authenticator setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthenticator which performs no authentication checks and therefore -requires no credentials. It is used to disable authentication completely. Note that authentication is a necessary -condition of Cassandra’s permissions subsystem, so if authentication is disabled, effectively so are permissions.

-

The default distribution also includes PasswordAuthenticator, which stores encrypted credentials in a system table. -This can be used to enable simple username/password authentication.

-
-

Enabling Password Authentication

-

Before enabling client authentication on the cluster, client applications should be pre-configured with their intended -credentials. When a connection is initiated, the server will only ask for credentials once authentication is -enabled, so setting up the client side config in advance is safe. In contrast, as soon as a server has authentication -enabled, any connection attempt without proper credentials will be rejected which may cause availability problems for -client applications. Once clients are setup and ready for authentication to be enabled, follow this procedure to enable -it on the cluster.

-

Pick a single node in the cluster on which to perform the initial configuration. Ideally, no clients should connect -to this node during the setup process, so you may want to remove it from client config, block it at the network level -or possibly add a new temporary node to the cluster for this purpose. On that node, perform the following steps:

-
    -
  1. Open a cqlsh session and change the replication factor of the system_auth keyspace. By default, this keyspace -uses SimpleReplicationStrategy and a replication_factor of 1. It is recommended to change this for any -non-trivial deployment to ensure that should nodes become unavailable, login is still possible. Best practice is to -configure a replication factor of 3 to 5 per-DC.
  2. -
-
ALTER KEYSPACE system_auth WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': 3, 'DC2': 3};
-
-
-
    -
  1. Edit cassandra.yaml to change the authenticator option like so:
  2. -
-
authenticator: PasswordAuthenticator
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of the default superuser:
  4. -
-
cqlsh -u cassandra -p cassandra
-
-
-
    -
  1. During login, the credentials for the default superuser are read with a consistency level of QUORUM, whereas -those for all other users (including superusers) are read at LOCAL_ONE. In the interests of performance and -availability, as well as security, operators should create another superuser and disable the default one. This step -is optional, but highly recommended. While logged in as the default superuser, create another superuser role which -can be used to bootstrap further configuration.
  2. -
-
# create a new superuser
-CREATE ROLE dba WITH SUPERUSER = true AND LOGIN = true AND PASSWORD = 'super';
-
-
-
    -
  1. Start a new cqlsh session, this time logging in as the new_superuser and disable the default superuser.
  2. -
-
ALTER ROLE cassandra WITH SUPERUSER = false AND LOGIN = false;
-
-
-
    -
  1. Finally, set up the roles and credentials for your application users with CREATE ROLE -statements.
  2. -
-

At the end of these steps, the one node is configured to use password authentication. To roll that out across the -cluster, repeat steps 2 and 3 on each node in the cluster. Once all nodes have been restarted, authentication will be -fully enabled throughout the cluster.

-

Note that using PasswordAuthenticator also requires the use of CassandraRoleManager.

-

See also: Setting credentials for internal authentication, CREATE ROLE, -ALTER ROLE, ALTER KEYSPACE and GRANT PERMISSION,

-
-
-
-

Authorization

-

Authorization is pluggable in Cassandra and is configured using the authorizer setting in cassandra.yaml. -Cassandra ships with two options included in the default distribution.

-

By default, Cassandra is configured with AllowAllAuthorizer which performs no checking and so effectively grants all -permissions to all roles. This must be used if AllowAllAuthenticator is the configured authenticator.

-

The default distribution also includes CassandraAuthorizer, which does implement full permissions management -functionality and stores its data in Cassandra system tables.

-
-

Enabling Internal Authorization

-

Permissions are modelled as a whitelist, with the default assumption that a given role has no access to any database -resources. The implication of this is that once authorization is enabled on a node, all requests will be rejected until -the required permissions have been granted. For this reason, it is strongly recommended to perform the initial setup on -a node which is not processing client requests.

-

The following assumes that authentication has already been enabled via the process outlined in -Enabling Password Authentication. Perform these steps to enable internal authorization across the cluster:

-
    -
  1. On the selected node, edit cassandra.yaml to change the authorizer option like so:
  2. -
-
authorizer: CassandraAuthorizer
-
-
-
    -
  1. Restart the node.
  2. -
  3. Open a new cqlsh session using the credentials of a role with superuser credentials:
  4. -
-
cqlsh -u dba -p super
-
-
-
    -
  1. Configure the appropriate access privileges for your clients using GRANT PERMISSION -statements. On the other nodes, until configuration is updated and the node restarted, this will have no effect so -disruption to clients is avoided.
  2. -
-
GRANT SELECT ON ks.t1 TO db_user;
-
-
-
    -
  1. Once all the necessary permissions have been granted, repeat steps 1 and 2 for each node in turn. As each node -restarts and clients reconnect, the enforcement of the granted permissions will begin.
  2. -
-

See also: GRANT PERMISSION, GRANT ALL <grant-all> and REVOKE PERMISSION

-
-
-
-

Caching

-

Enabling authentication and authorization places additional load on the cluster by frequently reading from the -system_auth tables. Furthermore, these reads are in the critical paths of many client operations, and so has the -potential to severely impact quality of service. To mitigate this, auth data such as credentials, permissions and role -details are cached for a configurable period. The caching can be configured (and even disabled) from cassandra.yaml -or using a JMX client. The JMX interface also supports invalidation of the various caches, but any changes made via JMX -are not persistent and will be re-read from cassandra.yaml when the node is restarted.

-

Each cache has 3 options which can be set:

-
-
Validity Period
-
Controls the expiration of cache entries. After this period, entries are invalidated and removed from the cache.
-
Refresh Rate
-
Controls the rate at which background reads are performed to pick up any changes to the underlying data. While these -async refreshes are performed, caches will continue to serve (possibly) stale data. Typically, this will be set to a -shorter time than the validity period.
-
Max Entries
-
Controls the upper bound on cache size.
-
-

The naming for these options in cassandra.yaml follows the convention:

-
    -
  • <type>_validity_in_ms
  • -
  • <type>_update_interval_in_ms
  • -
  • <type>_cache_max_entries
  • -
-

Where <type> is one of credentials, permissions, or roles.

-

As mentioned, these are also exposed via JMX in the mbeans under the org.apache.cassandra.auth domain.

-
-
-

JMX access

-

Access control for JMX clients is configured separately to that for CQL. For both authentication and authorization, two -providers are available; the first based on standard JMX security and the second which integrates more closely with -Cassandra’s own auth subsystem.

-

The default settings for Cassandra make JMX accessible only from localhost. To enable remote JMX connections, edit -cassandra-env.sh (or cassandra-env.ps1 on Windows) to change the LOCAL_JMX setting to no. Under the -standard configuration, when remote JMX connections are enabled, standard JMX authentication -is also switched on.

-

Note that by default, local-only connections are not subject to authentication, but this can be enabled.

-

If enabling remote connections, it is recommended to also use SSL connections.

-

Finally, after enabling auth and/or SSL, ensure that tools which use JMX, such as nodetool, are -correctly configured and working as expected.

-
-

Standard JMX Auth

-

Users permitted to connect to the JMX server are specified in a simple text file. The location of this file is set in -cassandra-env.sh by the line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

Edit the password file to add username/password pairs:

-
jmx_user jmx_password
-
-
-

Secure the credentials file so that only the user running the Cassandra process can read it :

-
$ chown cassandra:cassandra /etc/cassandra/jmxremote.password
-$ chmod 400 /etc/cassandra/jmxremote.password
-
-
-

Optionally, enable access control to limit the scope of what defined users can do via JMX. Note that this is a fairly -blunt instrument in this context as most operational tools in Cassandra require full read/write access. To configure a -simple access file, uncomment this line in cassandra-env.sh:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

Then edit the access file to grant your JMX user readwrite permission:

-
jmx_user readwrite
-
-
-

Cassandra must be restarted to pick up the new settings.

-

See also : Using File-Based Password Authentication In JMX

-
-
-

Cassandra Integrated Auth

-

An alternative to the out-of-the-box JMX auth is to useeCassandra’s own authentication and/or authorization providers -for JMX clients. This is potentially more flexible and secure but it come with one major caveat. Namely that it is not -available until after a node has joined the ring, because the auth subsystem is not fully configured until that point -However, it is often critical for monitoring purposes to have JMX access particularly during bootstrap. So it is -recommended, where possible, to use local only JMX auth during bootstrap and then, if remote connectivity is required, -to switch to integrated auth once the node has joined the ring and initial setup is complete.

-

With this option, the same database roles used for CQL authentication can be used to control access to JMX, so updates -can be managed centrally using just cqlsh. Furthermore, fine grained control over exactly which operations are -permitted on particular MBeans can be acheived via GRANT PERMISSION.

-

To enable integrated authentication, edit cassandra-env.sh to uncomment these lines:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
-#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
-
-
-

And disable the JMX standard auth by commenting this line:

-
JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-
-
-

To enable integrated authorization, uncomment this line:

-
#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
-
-
-

Check standard access control is off by ensuring this line is commented out:

-
#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
-
-
-

With integrated authentication and authorization enabled, operators can define specific roles and grant them access to -the particular JMX resources that they need. For example, a role with the necessary permissions to use tools such as -jconsole or jmc in read-only mode would be defined as:

-
CREATE ROLE jmx WITH LOGIN = false;
-GRANT SELECT ON ALL MBEANS TO jmx;
-GRANT DESCRIBE ON ALL MBEANS TO jmx;
-GRANT EXECUTE ON MBEAN 'java.lang:type=Threading' TO jmx;
-GRANT EXECUTE ON MBEAN 'com.sun.management:type=HotSpotDiagnostic' TO jmx;
-
-# Grant the role with necessary permissions to use nodetool commands (including nodetool status) in read-only mode
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=EndpointSnitchInfo' TO jmx;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=StorageService' TO jmx;
-
-# Grant the jmx role to one with login permissions so that it can access the JMX tooling
-CREATE ROLE ks_user WITH PASSWORD = 'password' AND LOGIN = true AND SUPERUSER = false;
-GRANT jmx TO ks_user;
-
-
-

Fine grained access control to individual MBeans is also supported:

-
GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=t1' TO ks_user;
-GRANT EXECUTE ON MBEAN 'org.apache.cassandra.db:type=Tables,keyspace=test_keyspace,table=*' TO ks_owner;
-
-
-

This permits the ks_user role to invoke methods on the MBean representing a single table in test_keyspace, while -granting the same permission for all table level MBeans in that keyspace to the ks_owner role.

-

Adding/removing roles and granting/revoking of permissions is handled dynamically once the initial setup is complete, so -no further restarts are required if permissions are altered.

-

See also: Permissions.

-
-
-

JMX With SSL

-

JMX SSL configuration is controlled by a number of system properties, some of which are optional. To turn on SSL, edit -the relevant lines in cassandra-env.sh (or cassandra-env.ps1 on Windows) to uncomment and set the values of these -properties as required:

-
-
com.sun.management.jmxremote.ssl
-
set to true to enable SSL
-
com.sun.management.jmxremote.ssl.need.client.auth
-
set to true to enable validation of client certificates
-
com.sun.management.jmxremote.registry.ssl
-
enables SSL sockets for the RMI registry from which clients obtain the JMX connector stub
-
com.sun.management.jmxremote.ssl.enabled.protocols
-
by default, the protocols supported by the JVM will be used, override with a comma-separated list. Note that this is -not usually necessary and using the defaults is the preferred option.
-
com.sun.management.jmxremote.ssl.enabled.cipher.suites
-
by default, the cipher suites supported by the JVM will be used, override with a comma-separated list. Note that -this is not usually necessary and using the defaults is the preferred option.
-
javax.net.ssl.keyStore
-
set the path on the local filesystem of the keystore containing server private keys and public certificates
-
javax.net.ssl.keyStorePassword
-
set the password of the keystore file
-
javax.net.ssl.trustStore
-
if validation of client certificates is required, use this property to specify the path of the truststore containing -the public certificates of trusted clients
-
javax.net.ssl.trustStorePassword
-
set the password of the truststore file
-
-

See also: Oracle Java7 Docs, -Monitor Java with JMX

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/snitch.html b/src/doc/4.0-beta1/operating/snitch.html deleted file mode 100644 index c5e47f645..000000000 --- a/src/doc/4.0-beta1/operating/snitch.html +++ /dev/null @@ -1,180 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Snitch" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Snitch

-

In cassandra, the snitch has two functions:

-
    -
  • it teaches Cassandra enough about your network topology to route requests efficiently.
  • -
  • it allows Cassandra to spread replicas around your cluster to avoid correlated failures. It does this by grouping -machines into “datacenters” and “racks.” Cassandra will do its best not to have more than one replica on the same -“rack” (which may not actually be a physical location).
  • -
-
-

Dynamic snitching

-

The dynamic snitch monitor read latencies to avoid reading from hosts that have slowed down. The dynamic snitch is -configured with the following properties on cassandra.yaml:

-
    -
  • dynamic_snitch: whether the dynamic snitch should be enabled or disabled.
  • -
  • dynamic_snitch_update_interval_in_ms: controls how often to perform the more expensive part of host score -calculation.
  • -
  • dynamic_snitch_reset_interval_in_ms: if set greater than zero, this will allow ‘pinning’ of replicas to hosts -in order to increase cache capacity.
  • -
  • dynamic_snitch_badness_threshold:: The badness threshold will control how much worse the pinned host has to be -before the dynamic snitch will prefer other replicas over it. This is expressed as a double which represents a -percentage. Thus, a value of 0.2 means Cassandra would continue to prefer the static snitch values until the pinned -host was 20% worse than the fastest.
  • -
-
-
-

Snitch classes

-

The endpoint_snitch parameter in cassandra.yaml should be set to the class that implements -IEndPointSnitch which will be wrapped by the dynamic snitch and decide if two endpoints are in the same data center -or on the same rack. Out of the box, Cassandra provides the snitch implementations:

-
-
GossipingPropertyFileSnitch
-
This should be your go-to snitch for production use. The rack and datacenter for the local node are defined in -cassandra-rackdc.properties and propagated to other nodes via gossip. If cassandra-topology.properties exists, -it is used as a fallback, allowing migration from the PropertyFileSnitch.
-
SimpleSnitch
-
Treats Strategy order as proximity. This can improve cache locality when disabling read repair. Only appropriate for -single-datacenter deployments.
-
PropertyFileSnitch
-
Proximity is determined by rack and data center, which are explicitly configured in -cassandra-topology.properties.
-
Ec2Snitch
-
Appropriate for EC2 deployments in a single Region, or in multiple regions with inter-region VPC enabled (available -since the end of 2017, see AWS announcement). -Loads Region and Availability Zone information from the EC2 API. The Region is treated as the datacenter, and the -Availability Zone as the rack. Only private IPs are used, so this will work across multiple regions only if -inter-region VPC is enabled.
-
Ec2MultiRegionSnitch
-
Uses public IPs as broadcast_address to allow cross-region connectivity (thus, you should set seed addresses to the -public IP as well). You will need to open the storage_port or ssl_storage_port on the public IP firewall -(For intra-Region traffic, Cassandra will switch to the private IP after establishing a connection).
-
RackInferringSnitch
-
Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each -node’s IP address, respectively. Unless this happens to match your deployment conventions, this is best used as an -example of writing a custom Snitch class and is provided in that spirit.
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/operating/topo_changes.html b/src/doc/4.0-beta1/operating/topo_changes.html deleted file mode 100644 index 0d5cbe3d6..000000000 --- a/src/doc/4.0-beta1/operating/topo_changes.html +++ /dev/null @@ -1,222 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Operating Cassandra" - -doc-title: "Adding, replacing, moving and removing nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Adding, replacing, moving and removing nodes

-
-

Bootstrap

-

Adding new nodes is called “bootstrapping”. The num_tokens parameter will define the amount of virtual nodes -(tokens) the joining node will be assigned during bootstrap. The tokens define the sections of the ring (token ranges) -the node will become responsible for.

-
-

Token allocation

-

With the default token allocation algorithm the new node will pick num_tokens random tokens to become responsible -for. Since tokens are distributed randomly, load distribution improves with a higher amount of virtual nodes, but it -also increases token management overhead. The default of 256 virtual nodes should provide a reasonable load balance with -acceptable overhead.

-

On 3.0+ a new token allocation algorithm was introduced to allocate tokens based on the load of existing virtual nodes -for a given keyspace, and thus yield an improved load distribution with a lower number of tokens. To use this approach, -the new node must be started with the JVM option -Dcassandra.allocate_tokens_for_keyspace=<keyspace>, where -<keyspace> is the keyspace from which the algorithm can find the load information to optimize token assignment for.

-
-

Manual token assignment

-

You may specify a comma-separated list of tokens manually with the initial_token cassandra.yaml parameter, and -if that is specified Cassandra will skip the token allocation process. This may be useful when doing token assignment -with an external tool or when restoring a node with its previous tokens.

-
-
-
-

Range streaming

-

After the tokens are allocated, the joining node will pick current replicas of the token ranges it will become -responsible for to stream data from. By default it will stream from the primary replica of each token range in order to -guarantee data in the new node will be consistent with the current state.

-

In the case of any unavailable replica, the consistent bootstrap process will fail. To override this behavior and -potentially miss data from an unavailable replica, set the JVM flag -Dcassandra.consistent.rangemovement=false.

-
-
-

Resuming failed/hanged bootstrap

-

On 2.2+, if the bootstrap process fails, it’s possible to resume bootstrap from the previous saved state by calling -nodetool bootstrap resume. If for some reason the bootstrap hangs or stalls, it may also be resumed by simply -restarting the node. In order to cleanup bootstrap state and start fresh, you may set the JVM startup flag --Dcassandra.reset_bootstrap_progress=true.

-

On lower versions, when the bootstrap proces fails it is recommended to wipe the node (remove all the data), and restart -the bootstrap process again.

-
-
-

Manual bootstrapping

-

It’s possible to skip the bootstrapping process entirely and join the ring straight away by setting the hidden parameter -auto_bootstrap: false. This may be useful when restoring a node from a backup or creating a new data-center.

-
-
-
-

Removing nodes

-

You can take a node out of the cluster with nodetool decommission to a live node, or nodetool removenode (to any -other machine) to remove a dead one. This will assign the ranges the old node was responsible for to other nodes, and -replicate the appropriate data there. If decommission is used, the data will stream from the decommissioned node. If -removenode is used, the data will stream from the remaining replicas.

-

No data is removed automatically from the node being decommissioned, so if you want to put the node back into service at -a different token on the ring, it should be removed manually.

-
-
-

Moving nodes

-

When num_tokens: 1 it’s possible to move the node position in the ring with nodetool move. Moving is both a -convenience over and more efficient than decommission + bootstrap. After moving a node, nodetool cleanup should be -run to remove any unnecessary data.

-
-
-

Replacing a dead node

-

In order to replace a dead node, start cassandra with the JVM startup flag --Dcassandra.replace_address_first_boot=<dead_node_ip>. Once this property is enabled the node starts in a hibernate -state, during which all the other nodes will see this node to be DOWN (DN), however this node will see itself as UP -(UN). Accurate replacement state can be found in nodetool netstats.

-

The replacing node will now start to bootstrap the data from the rest of the nodes in the cluster. A replacing node will -only receive writes during the bootstrapping phase if it has a different ip address to the node that is being replaced. -(See CASSANDRA-8523 and CASSANDRA-12344)

-

Once the bootstrapping is complete the node will be marked “UP”.

-
-

Note

-

If any of the following cases apply, you MUST run repair to make the replaced node consistent again, since -it missed ongoing writes during/prior to bootstrapping. The replacement timeframe refers to the period from when the -node initially dies to when a new node completes the replacement process.

-
    -
  1. The node is down for longer than max_hint_window_in_ms before being replaced.
  2. -
  3. You are replacing using the same IP address as the dead node and replacement takes longer than max_hint_window_in_ms.
  4. -
-
-
-
-

Monitoring progress

-

Bootstrap, replace, move and remove progress can be monitored using nodetool netstats which will show the progress -of the streaming operations.

-
-
-

Cleanup data after range movements

-

As a safety measure, Cassandra does not automatically remove data from nodes that “lose” part of their token range due -to a range movement operation (bootstrap, move, replace). Run nodetool cleanup on the nodes that lost ranges to the -joining node when you are satisfied the new node is up and working. If you do not do this the old data will still be -counted against the load on that node.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/plugins/index.html b/src/doc/4.0-beta1/plugins/index.html deleted file mode 100644 index ac59c2603..000000000 --- a/src/doc/4.0-beta1/plugins/index.html +++ /dev/null @@ -1,117 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Third-Party Plugins" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Third-Party Plugins

-

Available third-party plugins for Apache Cassandra

-
-

CAPI-Rowcache

-

The Coherent Accelerator Process Interface (CAPI) is a general term for the infrastructure of attaching a Coherent accelerator to an IBM POWER system. A key innovation in IBM POWER8’s open architecture is the CAPI. It provides a high bandwidth, low latency path between external devices, the POWER8 core, and the system’s open memory architecture. IBM Data Engine for NoSQL is an integrated platform for large and fast growing NoSQL data stores. It builds on the CAPI capability of POWER8 systems and provides super-fast access to large flash storage capacity and addresses the challenges associated with typical x86 server based scale-out deployments.

-

The official page for the CAPI-Rowcache plugin contains further details how to build/run/download the plugin.

-
-
-

Stratio’s Cassandra Lucene Index

-

Stratio’s Lucene index is a Cassandra secondary index implementation based on Apache Lucene. It extends Cassandra’s functionality to provide near real-time distributed search engine capabilities such as with ElasticSearch or Apache Solr, including full text search capabilities, free multivariable, geospatial and bitemporal search, relevance queries and sorting based on column value, relevance or distance. Each node indexes its own data, so high availability and scalability is guaranteed.

-

The official Github repository Cassandra Lucene Index contains everything you need to build/run/configure the plugin.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/search.html b/src/doc/4.0-beta1/search.html deleted file mode 100644 index b844f3b66..000000000 --- a/src/doc/4.0-beta1/search.html +++ /dev/null @@ -1,105 +0,0 @@ ---- -layout: docpage - -title: "Search" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "" -doc-header-links: ' - -' -doc-search-path: "#" - -extra-footer: ' - - - - -' - ---- -
-
- -
-
-
- - - - -
- -
- - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/searchindex.js b/src/doc/4.0-beta1/searchindex.js deleted file mode 100644 index 124c41e2d..000000000 --- a/src/doc/4.0-beta1/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["architecture/dynamo","architecture/guarantees","architecture/index","architecture/overview","architecture/storage_engine","bugs","configuration/cassandra_config_file","configuration/index","contactus","cql/appendices","cql/changes","cql/ddl","cql/definitions","cql/dml","cql/functions","cql/index","cql/indexes","cql/json","cql/mvs","cql/operators","cql/security","cql/triggers","cql/types","data_modeling/data_modeling_conceptual","data_modeling/data_modeling_logical","data_modeling/data_modeling_physical","data_modeling/data_modeling_queries","data_modeling/data_modeling_rdbms","data_modeling/data_modeling_refining","data_modeling/data_modeling_schema","data_modeling/data_modeling_tools","data_modeling/index","data_modeling/intro","development/ci","development/code_style","development/dependencies","development/documentation","development/gettingstarted","development/how_to_commit","development/how_to_review","development/ide","development/index","development/patches","development/release_process","development/testing","faq/index","getting_started/configuring","getting_started/drivers","getting_started/index","getting_started/installing","getting_started/production","getting_started/querying","index","new/auditlogging","new/fqllogging","new/index","new/java11","new/messaging","new/streaming","new/transientreplication","new/virtualtables","operating/audit_logging","operating/backups","operating/bloom_filters","operating/bulk_loading","operating/cdc","operating/compaction/index","operating/compaction/lcs","operating/compaction/stcs","operating/compaction/twcs","operating/compression","operating/hardware","operating/hints","operating/index","operating/metrics","operating/read_repair","operating/repair","operating/security","operating/snitch","operating/topo_changes","plugins/index","tools/cassandra_stress","tools/cqlsh","tools/index","tools/nodetool/assassinate","tools/nodetool/bootstrap","tools/nodetool/cleanup","tools/nodetool/clearsnapshot","tools/nodetool/clientstats","tools/nodetool/compact","tools/nodetool/compactionhistory","tools/nodetool/compactionstats","tools/nodetool/decommission","tools/nodetool/describecluster","tools/nodetool/describering","tools/nodetool/disableauditlog","tools/nodetool/disableautocompaction","tools/nodetool/disablebackup","tools/nodetool/disablebinary","tools/nodetool/disablefullquerylog","tools/nodetool/disablegossip","tools/nodetool/disablehandoff","tools/nodetool/disablehintsfordc","tools/nodetool/disableoldprotocolversions","tools/nodetool/drain","tools/nodetool/enableauditlog","tools/nodetool/enableautocompaction","tools/nodetool/enablebackup","tools/nodetool/enablebinary","tools/nodetool/enablefullquerylog","tools/nodetool/enablegossip","tools/nodetool/enablehandoff","tools/nodetool/enablehintsfordc","tools/nodetool/enableoldprotocolversions","tools/nodetool/failuredetector","tools/nodetool/flush","tools/nodetool/garbagecollect","tools/nodetool/gcstats","tools/nodetool/getbatchlogreplaythrottle","tools/nodetool/getcompactionthreshold","tools/nodetool/getcompactionthroughput","tools/nodetool/getconcurrency","tools/nodetool/getconcurrentcompactors","tools/nodetool/getconcurrentviewbuilders","tools/nodetool/getendpoints","tools/nodetool/getinterdcstreamthroughput","tools/nodetool/getlogginglevels","tools/nodetool/getmaxhintwindow","tools/nodetool/getreplicas","tools/nodetool/getseeds","tools/nodetool/getsstables","tools/nodetool/getstreamthroughput","tools/nodetool/gettimeout","tools/nodetool/gettraceprobability","tools/nodetool/gossipinfo","tools/nodetool/handoffwindow","tools/nodetool/help","tools/nodetool/import","tools/nodetool/info","tools/nodetool/invalidatecountercache","tools/nodetool/invalidatekeycache","tools/nodetool/invalidaterowcache","tools/nodetool/join","tools/nodetool/listsnapshots","tools/nodetool/move","tools/nodetool/netstats","tools/nodetool/nodetool","tools/nodetool/pausehandoff","tools/nodetool/profileload","tools/nodetool/proxyhistograms","tools/nodetool/rangekeysample","tools/nodetool/rebuild","tools/nodetool/rebuild_index","tools/nodetool/refresh","tools/nodetool/refreshsizeestimates","tools/nodetool/reloadlocalschema","tools/nodetool/reloadseeds","tools/nodetool/reloadssl","tools/nodetool/reloadtriggers","tools/nodetool/relocatesstables","tools/nodetool/removenode","tools/nodetool/repair","tools/nodetool/repair_admin","tools/nodetool/replaybatchlog","tools/nodetool/resetfullquerylog","tools/nodetool/resetlocalschema","tools/nodetool/resumehandoff","tools/nodetool/ring","tools/nodetool/scrub","tools/nodetool/setbatchlogreplaythrottle","tools/nodetool/setcachecapacity","tools/nodetool/setcachekeystosave","tools/nodetool/setcompactionthreshold","tools/nodetool/setcompactionthroughput","tools/nodetool/setconcurrency","tools/nodetool/setconcurrentcompactors","tools/nodetool/setconcurrentviewbuilders","tools/nodetool/sethintedhandoffthrottlekb","tools/nodetool/setinterdcstreamthroughput","tools/nodetool/setlogginglevel","tools/nodetool/setmaxhintwindow","tools/nodetool/setstreamthroughput","tools/nodetool/settimeout","tools/nodetool/settraceprobability","tools/nodetool/sjk","tools/nodetool/snapshot","tools/nodetool/status","tools/nodetool/statusautocompaction","tools/nodetool/statusbackup","tools/nodetool/statusbinary","tools/nodetool/statusgossip","tools/nodetool/statushandoff","tools/nodetool/stop","tools/nodetool/stopdaemon","tools/nodetool/tablehistograms","tools/nodetool/tablestats","tools/nodetool/toppartitions","tools/nodetool/tpstats","tools/nodetool/truncatehints","tools/nodetool/upgradesstables","tools/nodetool/verify","tools/nodetool/version","tools/nodetool/viewbuildstatus","tools/sstable/index","tools/sstable/sstabledump","tools/sstable/sstableexpiredblockers","tools/sstable/sstablelevelreset","tools/sstable/sstableloader","tools/sstable/sstablemetadata","tools/sstable/sstableofflinerelevel","tools/sstable/sstablerepairedset","tools/sstable/sstablescrub","tools/sstable/sstablesplit","tools/sstable/sstableupgrade","tools/sstable/sstableutil","tools/sstable/sstableverify","troubleshooting/finding_nodes","troubleshooting/index","troubleshooting/reading_logs","troubleshooting/use_nodetool","troubleshooting/use_tools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:54},filenames:["architecture/dynamo.rst","architecture/guarantees.rst","architecture/index.rst","architecture/overview.rst","architecture/storage_engine.rst","bugs.rst","configuration/cassandra_config_file.rst","configuration/index.rst","contactus.rst","cql/appendices.rst","cql/changes.rst","cql/ddl.rst","cql/definitions.rst","cql/dml.rst","cql/functions.rst","cql/index.rst","cql/indexes.rst","cql/json.rst","cql/mvs.rst","cql/operators.rst","cql/security.rst","cql/triggers.rst","cql/types.rst","data_modeling/data_modeling_conceptual.rst","data_modeling/data_modeling_logical.rst","data_modeling/data_modeling_physical.rst","data_modeling/data_modeling_queries.rst","data_modeling/data_modeling_rdbms.rst","data_modeling/data_modeling_refining.rst","data_modeling/data_modeling_schema.rst","data_modeling/data_modeling_tools.rst","data_modeling/index.rst","data_modeling/intro.rst","development/ci.rst","development/code_style.rst","development/dependencies.rst","development/documentation.rst","development/gettingstarted.rst","development/how_to_commit.rst","development/how_to_review.rst","development/ide.rst","development/index.rst","development/patches.rst","development/release_process.rst","development/testing.rst","faq/index.rst","getting_started/configuring.rst","getting_started/drivers.rst","getting_started/index.rst","getting_started/installing.rst","getting_started/production.rst","getting_started/querying.rst","index.rst","new/auditlogging.rst","new/fqllogging.rst","new/index.rst","new/java11.rst","new/messaging.rst","new/streaming.rst","new/transientreplication.rst","new/virtualtables.rst","operating/audit_logging.rst","operating/backups.rst","operating/bloom_filters.rst","operating/bulk_loading.rst","operating/cdc.rst","operating/compaction/index.rst","operating/compaction/lcs.rst","operating/compaction/stcs.rst","operating/compaction/twcs.rst","operating/compression.rst","operating/hardware.rst","operating/hints.rst","operating/index.rst","operating/metrics.rst","operating/read_repair.rst","operating/repair.rst","operating/security.rst","operating/snitch.rst","operating/topo_changes.rst","plugins/index.rst","tools/cassandra_stress.rst","tools/cqlsh.rst","tools/index.rst","tools/nodetool/assassinate.rst","tools/nodetool/bootstrap.rst","tools/nodetool/cleanup.rst","tools/nodetool/clearsnapshot.rst","tools/nodetool/clientstats.rst","tools/nodetool/compact.rst","tools/nodetool/compactionhistory.rst","tools/nodetool/compactionstats.rst","tools/nodetool/decommission.rst","tools/nodetool/describecluster.rst","tools/nodetool/describering.rst","tools/nodetool/disableauditlog.rst","tools/nodetool/disableautocompaction.rst","tools/nodetool/disablebackup.rst","tools/nodetool/disablebinary.rst","tools/nodetool/disablefullquerylog.rst","tools/nodetool/disablegossip.rst","tools/nodetool/disablehandoff.rst","tools/nodetool/disablehintsfordc.rst","tools/nodetool/disableoldprotocolversions.rst","tools/nodetool/drain.rst","tools/nodetool/enableauditlog.rst","tools/nodetool/enableautocompaction.rst","tools/nodetool/enablebackup.rst","tools/nodetool/enablebinary.rst","tools/nodetool/enablefullquerylog.rst","tools/nodetool/enablegossip.rst","tools/nodetool/enablehandoff.rst","tools/nodetool/enablehintsfordc.rst","tools/nodetool/enableoldprotocolversions.rst","tools/nodetool/failuredetector.rst","tools/nodetool/flush.rst","tools/nodetool/garbagecollect.rst","tools/nodetool/gcstats.rst","tools/nodetool/getbatchlogreplaythrottle.rst","tools/nodetool/getcompactionthreshold.rst","tools/nodetool/getcompactionthroughput.rst","tools/nodetool/getconcurrency.rst","tools/nodetool/getconcurrentcompactors.rst","tools/nodetool/getconcurrentviewbuilders.rst","tools/nodetool/getendpoints.rst","tools/nodetool/getinterdcstreamthroughput.rst","tools/nodetool/getlogginglevels.rst","tools/nodetool/getmaxhintwindow.rst","tools/nodetool/getreplicas.rst","tools/nodetool/getseeds.rst","tools/nodetool/getsstables.rst","tools/nodetool/getstreamthroughput.rst","tools/nodetool/gettimeout.rst","tools/nodetool/gettraceprobability.rst","tools/nodetool/gossipinfo.rst","tools/nodetool/handoffwindow.rst","tools/nodetool/help.rst","tools/nodetool/import.rst","tools/nodetool/info.rst","tools/nodetool/invalidatecountercache.rst","tools/nodetool/invalidatekeycache.rst","tools/nodetool/invalidaterowcache.rst","tools/nodetool/join.rst","tools/nodetool/listsnapshots.rst","tools/nodetool/move.rst","tools/nodetool/netstats.rst","tools/nodetool/nodetool.rst","tools/nodetool/pausehandoff.rst","tools/nodetool/profileload.rst","tools/nodetool/proxyhistograms.rst","tools/nodetool/rangekeysample.rst","tools/nodetool/rebuild.rst","tools/nodetool/rebuild_index.rst","tools/nodetool/refresh.rst","tools/nodetool/refreshsizeestimates.rst","tools/nodetool/reloadlocalschema.rst","tools/nodetool/reloadseeds.rst","tools/nodetool/reloadssl.rst","tools/nodetool/reloadtriggers.rst","tools/nodetool/relocatesstables.rst","tools/nodetool/removenode.rst","tools/nodetool/repair.rst","tools/nodetool/repair_admin.rst","tools/nodetool/replaybatchlog.rst","tools/nodetool/resetfullquerylog.rst","tools/nodetool/resetlocalschema.rst","tools/nodetool/resumehandoff.rst","tools/nodetool/ring.rst","tools/nodetool/scrub.rst","tools/nodetool/setbatchlogreplaythrottle.rst","tools/nodetool/setcachecapacity.rst","tools/nodetool/setcachekeystosave.rst","tools/nodetool/setcompactionthreshold.rst","tools/nodetool/setcompactionthroughput.rst","tools/nodetool/setconcurrency.rst","tools/nodetool/setconcurrentcompactors.rst","tools/nodetool/setconcurrentviewbuilders.rst","tools/nodetool/sethintedhandoffthrottlekb.rst","tools/nodetool/setinterdcstreamthroughput.rst","tools/nodetool/setlogginglevel.rst","tools/nodetool/setmaxhintwindow.rst","tools/nodetool/setstreamthroughput.rst","tools/nodetool/settimeout.rst","tools/nodetool/settraceprobability.rst","tools/nodetool/sjk.rst","tools/nodetool/snapshot.rst","tools/nodetool/status.rst","tools/nodetool/statusautocompaction.rst","tools/nodetool/statusbackup.rst","tools/nodetool/statusbinary.rst","tools/nodetool/statusgossip.rst","tools/nodetool/statushandoff.rst","tools/nodetool/stop.rst","tools/nodetool/stopdaemon.rst","tools/nodetool/tablehistograms.rst","tools/nodetool/tablestats.rst","tools/nodetool/toppartitions.rst","tools/nodetool/tpstats.rst","tools/nodetool/truncatehints.rst","tools/nodetool/upgradesstables.rst","tools/nodetool/verify.rst","tools/nodetool/version.rst","tools/nodetool/viewbuildstatus.rst","tools/sstable/index.rst","tools/sstable/sstabledump.rst","tools/sstable/sstableexpiredblockers.rst","tools/sstable/sstablelevelreset.rst","tools/sstable/sstableloader.rst","tools/sstable/sstablemetadata.rst","tools/sstable/sstableofflinerelevel.rst","tools/sstable/sstablerepairedset.rst","tools/sstable/sstablescrub.rst","tools/sstable/sstablesplit.rst","tools/sstable/sstableupgrade.rst","tools/sstable/sstableutil.rst","tools/sstable/sstableverify.rst","troubleshooting/finding_nodes.rst","troubleshooting/index.rst","troubleshooting/reading_logs.rst","troubleshooting/use_nodetool.rst","troubleshooting/use_tools.rst"],objects:{},objnames:{},objtypes:{},terms:{"000kib":[64,207],"000mib":56,"00t89":22,"011mib":207,"014kib":64,"017kib":64,"018kib":[64,207],"01t02":218,"021kib":[64,207],"022kib":64,"024kib":64,"028809z":204,"029kib":64,"031mib":207,"033kib":64,"036kib":64,"03t04":22,"040kib":64,"044kib":64,"045kib":64,"049kib":64,"054mib":207,"055z":204,"056kib":207,"061kib":207,"062mib":207,"063kib":207,"064kib":207,"0665ae80b2d711e886c66d2c86545d91":205,"06t22":218,"077mib":207,"078kib":207,"081kib":207,"082kib":207,"090kib":207,"092mib":207,"096gib":215,"0974e5a0aa5811e8a0a06d2c86545d91":207,"099kib":207,"0_222":49,"0_232":49,"0d927649052c":64,"0ee8b91fdd0":219,"0f03de2d9ae1":60,"0f9a6a95":49,"0h00m04":219,"0percentil":11,"0x0000000000000000":220,"0x0000000000000003":14,"0x00000004":13,"0x00007f829c001000":220,"0x00007f82d0856000":220,"0x00007f82e800e000":220,"0x00007f82e80cc000":220,"0x00007f82e80d7000":220,"0x00007f82e84d0800":220,"0x2a19":220,"0x2a29":220,"0x2a2a":220,"0x2a2c":220,"0x3a74":220,"100b":81,"100k":81,"100m":11,"100mb":[6,32],"1024l":61,"105kib":207,"10m":11,"10mb":6,"10s":[82,220],"10x":[6,67],"115kib":64,"115mib":207,"11e6":82,"11e8":219,"11e9":[60,64,76],"122kib":207,"128kb":220,"128mb":64,"128mib":[6,57],"128th":4,"12gb":71,"12h30m":22,"130mib":207,"142mib":211,"147mib":207,"14fa364d":49,"14t00":218,"150kib":207,"1520e8c38f00":76,"155kib":207,"15m":74,"160mb":67,"162kib":207,"165kib":207,"167kb":220,"16kb":50,"16kib":70,"16l":61,"16mb":[45,66],"16th":6,"173kib":207,"176kib":207,"17t06":218,"184kb":220,"192kib":57,"19821dcea330":64,"19t03":[168,211],"1f20868b27c7":[54,76],"1gb":64,"1kb":50,"1mo":22,"1n_r":28,"1st":[22,75],"1ubuntu1":49,"200m":[218,220],"203mib":207,"2062b290":219,"20m":220,"20t20":204,"217kb":220,"217mib":207,"21x":49,"22x":49,"22z":204,"232kib":64,"232mib":207,"233kib":64,"23t06":218,"23z":204,"244m":220,"245mib":207,"247mib":207,"24h":22,"25005289beb2":204,"250m":6,"251m":220,"253mib":207,"256mb":6,"256th":6,"258mib":207,"25mb":220,"265kib":207,"266k":49,"270mib":207,"27t04":218,"280mib":207,"28757dde":49,"28757dde589f70410f9a6a95c39ee7e6cde63440e2b06b91ae6b200614fa364d":49,"28t17":218,"295kib":207,"296a2d30":64,"296a2d30c22a11e9b1350d927649052c":[62,64],"299kib":207,"29d":22,"29t00":218,"2cc0":219,"2e10":10,"2gb":71,"2nd":[6,11,75,78],"2xlarg":71,"300mib":207,"300s":6,"307kib":207,"30kb":220,"30s":6,"30t23":218,"30x":49,"311x":49,"314kib":207,"320k":49,"322kib":207,"325kib":207,"327e":82,"32gb":71,"32mb":[6,45],"331mib":207,"333kib":207,"33m":218,"348mib":207,"353mib":215,"3578d7de":204,"35ea8c9f":219,"361kib":207,"366b":220,"370mib":207,"378711z":204,"383b":220,"384z":204,"385b":220,"386kib":207,"387mib":207,"388mib":207,"392kib":207,"392mib":207,"394kib":207,"3f22a07b2bc6":204,"3ff3e5109f22":13,"3gb":[70,220],"3ms":220,"3rd":[6,74,78],"401mib":207,"406mib":207,"40a7":219,"40f3":13,"40fa":219,"40s":220,"40x":49,"410kib":207,"412kib":207,"416mib":215,"41b52700b4ed11e896476d2c86545d91":208,"41f9":[54,76],"423b":220,"423kib":207,"4248dc9d790e":204,"431kib":207,"43kb":220,"440kib":207,"443kib":207,"446eae30c22a11e9b1350d927649052c":[62,64],"449mib":207,"452kib":207,"457mib":207,"458mib":207,"45f4":[54,76],"461mib":207,"465kib":207,"46b4":[54,76],"46e9":219,"476mib":207,"481mib":207,"482mib":211,"48d6":204,"4ae3":13,"4d40":204,"4dcdadd2":[54,76],"4f34":[54,76],"4f3438394e39374d3730":208,"4f58":219,"4kb":[11,50],"4mib":[6,57],"4th":76,"4xlarg":71,"500m":220,"501mib":207,"50kb":[6,220],"50m":[11,220],"50mb":[6,61,68,212],"50th":216,"512mb":6,"512mib":[6,57],"513kib":207,"521kib":207,"522kib":64,"524kib":207,"536kib":207,"543mib":207,"545kib":207,"54kb":220,"550mib":207,"5573e5b09f14":13,"559kib":207,"561mib":207,"563kib":207,"563mib":207,"56m":218,"571kib":207,"576kb":220,"5850e9f0a63711e8a5c5091830ac5256":213,"589f7041":49,"591mib":207,"592kib":207,"5gb":61,"5kb":6,"5level":61,"5mb":67,"603kib":207,"606mib":207,"60m":11,"61111111111111e":208,"613mib":207,"619kib":207,"61de":219,"635kib":207,"6365332094dd11e88f324f9c503e4753":[206,209,211,212,214,215],"638mib":207,"640kib":207,"646mib":207,"64k":6,"64kb":50,"64kib":57,"650b":220,"65c429e08c5a11e8939edf4f403979ef":[204,206],"65kb":220,"663kib":207,"665kib":207,"669kb":220,"683kib":64,"684b":[54,76],"684mib":207,"688kib":207,"690mib":207,"6e630115fd75":219,"6gb":219,"6ms":6,"701mib":207,"715b":220,"718mib":207,"71b0a49":218,"725mib":207,"730kib":207,"732mib":207,"734mib":207,"736kb":220,"7374e9b5ab08c1f1e612bf72293ea14c959b0c3c":38,"737mib":207,"738mib":207,"743kib":207,"744mib":207,"751mib":207,"752e278f":219,"75th":74,"771mib":207,"775mib":215,"780mib":207,"782kib":207,"783522z":204,"789z":204,"791mib":207,"793kib":207,"798mib":207,"79kb":220,"7f3a":219,"802kib":207,"807kib":64,"812mib":207,"813kib":207,"814kib":207,"832mib":207,"835kib":207,"840kib":207,"843mib":207,"845b":220,"846kib":207,"848kib":207,"84fc":204,"861mib":207,"86400s":66,"869kb":220,"872kib":207,"877mib":207,"880mib":207,"882kib":207,"889mib":207,"892kib":207,"894mib":207,"89h4m48":22,"8gb":[71,220],"8th":[6,65],"8u222":49,"903mib":207,"90percentil":11,"90th":74,"911kib":207,"920kib":207,"920mib":207,"9328455af73f":219,"938kib":207,"954kib":207,"957mib":207,"95ac6470":82,"95th":74,"965kib":207,"9695b790a63211e8a6fb091830ac5256":213,"974b":219,"975kib":207,"983kib":207,"98th":74,"993mib":207,"996kib":207,"99f7":[54,76],"99p":[11,64],"99percentil":[11,59],"99th":[74,216],"9dc1a293":219,"9e6054da04a7":219,"9eeb":[54,76],"9gb":220,"9percentil":11,"9th":74,"\u00eatre":9,"abstract":[34,39],"boolean":[9,11,12,14,17,20,22,28,29,60,82],"break":[11,31,42,66,75,213,217,220],"byte":[4,6,9,13,22,28,50,54,57,60,74,91,109,145,195,207,219],"case":[4,6,10,11,12,13,14,16,17,18,22,24,26,27,28,35,38,39,42,44,45,50,57,59,63,64,71,72,77,79,81,82,207,218,219,220],"catch":[34,57,209],"class":[3,6,11,14,22,29,34,40,44,50,53,54,56,59,60,61,62,64,66,70,73,76,77,81,146,158,179,207,218],"default":[0,4,6,10,11,13,14,17,18,20,22,27,33,40,43,44,45,46,50,53,54,57,58,59,61,62,63,64,65,66,67,68,69,70,72,74,75,76,77,79,81,82,86,105,109,116,145,146,148,151,161,162,168,183,185,196,204,207,208,212,216,218,219,220],"enum":[9,58],"export":[40,43,56,74,82,220],"final":[14,20,24,28,34,36,40,43,54,61,64,65,66,71,77,83,162,203,220],"float":[9,10,11,12,14,17,19,22,63,70],"function":[0,3,6,9,10,11,12,15,16,18,20,22,28,32,39,47,52,53,54,60,61,77,78,80,82,203],"goto":33,"import":[1,11,14,22,23,24,27,32,40,41,44,46,50,58,59,62,66,71,72,73,74,76,82,146,216,219,220],"int":[4,9,10,11,13,14,17,18,19,20,22,32,44,54,60,62,64,65,70,74,75,76],"long":[4,6,13,22,27,38,39,45,53,56,62,66,70,72,74,75,81,210,211,218,220],"new":[0,1,3,4,6,10,11,14,16,17,18,19,20,21,22,24,27,28,32,33,34,36,37,39,40,42,43,44,48,49,50,52,53,54,56,57,58,60,61,62,63,64,66,67,69,71,72,75,77,79,81,137,144,146,206,207,209,211,214,216],"null":[9,10,12,13,14,17,18,22,34,60,82],"public":[6,14,32,34,35,43,44,45,64,77,78],"return":[6,9,11,13,14,16,17,18,19,20,22,24,27,39,54,58,59,60,70,72,75,81,161,205,206,220],"short":[4,22,25,28,36],"static":[6,9,10,11,18,24,25,28,36,64,74,78,208],"super":[4,24,77,80,81],"switch":[4,6,10,20,24,40,43,45,57,73,74,77,78],"throw":[6,14,34,44,54,216],"transient":[3,6,11,52,55],"true":[6,11,12,17,20,22,27,28,40,45,53,54,56,58,59,60,61,62,65,66,67,69,72,77,79,82,143,146,213],"try":[0,6,11,24,26,27,34,35,40,42,45,56,61,64,66,70,76,161,207,219,220],"var":[4,6,34,49,204,205,206,207,208,209,210,211,212,213,214,215,218,220],"void":44,"while":[0,4,6,10,11,12,13,22,25,28,30,38,42,43,53,54,57,58,59,63,64,67,69,70,71,72,76,77,82,207,216,218,219,220],AES:6,AND:[9,11,13,14,18,20,29,54,60,64,77,81,82,218],AWS:[50,71,78],Added:[10,55],Adding:[6,11,20,22,28,45,52,54,57,73,77],And:[11,14,20,57,77],Are:39,Ave:22,BUT:34,Being:59,But:[13,20,22,27,33,34,42,45,64,75,82],CAS:[1,6,219],CCS:220,CFs:[161,168],CLS:82,CMS:220,DCs:[6,59],DNS:45,Doing:[10,83,203],EBS:71,For:[0,3,4,6,9,10,11,12,13,14,15,16,17,18,20,21,22,23,24,25,26,28,32,37,42,44,45,46,49,50,51,54,57,61,66,67,69,70,71,72,75,76,77,78,81,82,207,208,209,212,216,218,219,220],GCs:6,HDs:220,Has:39,IDE:[30,37,41,52],IDEs:[30,40,41],IDs:[25,27,146,186],INTO:[6,9,11,13,14,17,22,54,62,64,76],IPs:[6,78,167,186],Ids:192,Its:[54,64],JKS:6,JPS:220,KBs:[6,72],LCS:[11,66,208],LTS:[49,56],MVs:32,NFS:71,NOT:[6,9,10,11,13,14,16,18,20,21,22,64],NTS:[6,50],N_s:28,Not:[13,20,42,50,56,66,70],ONE:[0,6,11,74,75,81,82],One:[0,24,27,42,44,45,56,64,66,69,220],PFS:6,Pis:71,QPS:216,Such:22,THE:6,TLS:[6,64,73,207],That:[0,11,12,18,22,27,42,45,59,68,82,220],The:[0,1,3,4,6,8,9,10,11,12,14,16,18,19,20,21,22,23,24,25,26,27,28,29,30,32,33,34,36,37,38,40,42,43,44,45,46,49,50,51,52,53,54,55,56,57,58,59,62,63,64,65,67,68,69,70,71,72,74,75,76,77,78,79,80,81,82,86,89,94,96,102,106,112,115,116,119,124,128,130,132,137,144,146,148,152,153,159,161,168,171,172,179,185,186,187,194,196,199,200,202,206,207,208,209,211,212,213,214,217,218,219,220],Their:22,Then:[13,44,45,66,77,209,213,220],There:[6,10,11,12,13,14,22,25,27,28,30,40,42,44,45,50,57,66,68,72,74,76,77,81,210,212,216,219,220],These:[0,4,6,11,14,30,40,49,53,57,64,74,76,77,81,82,214,216,217,218,219,220],USE:[9,14,15,53,54,60,61,62],USING:[9,13,16,21,22,69],Use:[6,11,13,20,24,25,45,51,52,54,64,65,73,77,81,82,83,84,89,146,151,161,192,199,203,204,209,210,211,214,217],Used:[23,24,25,26,27,28,29,30,74,220],Useful:[66,220],Uses:[6,17,73,78],Using:[1,11,13,31,36,44,45,50,54,55,73,77,83,203,204,207,211,214,218],WILL:6,WITH:[9,11,12,16,18,20,29,32,50,53,54,59,60,62,63,64,65,66,70,76,77,81,82],Will:[6,52,54,72,109,146,179,209],With:[0,1,6,13,17,45,50,58,59,64,66,75,76,79,85,218,220],Yes:45,_build:56,_build_java:56,_by_:24,_cache_max_entri:77,_cdc:65,_development_how_to_review:37,_if_:6,_main:56,_must_:6,_only_:218,_path_to_snapshot_fold:64,_trace:[74,219],_udt:14,_update_interval_in_m:77,_use:14,_validity_in_m:77,_x86_64_:220,a6fd:219,a8ed:60,abil:[14,20,30,45,70],abilityid:16,abl:[0,1,6,14,22,25,26,28,33,36,40,44,45,53,54,58,59,62,66,77,216,217],abort:33,about:[0,1,4,6,20,23,24,26,27,28,29,36,40,41,42,44,45,50,54,58,60,63,66,76,78,82,88,146,167,208,218,219,220],abov:[0,4,6,8,11,12,13,14,22,35,40,42,45,50,68,70,74,81,83,203,207,214,220],absenc:12,absent:0,abstracttyp:22,ac79:219,acceler:80,accept:[0,6,10,11,12,13,17,42,44,50,63,72,75,79,104,146],access:[3,6,10,11,20,22,24,26,30,32,40,42,60,61,64,71,72,73,74,80,207,208,216,217,220],accident:[24,206],accompani:6,accomplish:[26,54],accord:[0,1,4,6,24,45],accordingli:[6,14,45],account:[1,6,22,36,44,220],accru:[66,74],accrual:0,accumul:[6,66,72,74],accur:[6,24,28,45,63,79,167,208],accuraci:[63,148,196],acheiv:77,achiev:[0,1,6,32,54,66,74],achil:47,ack:[4,6,59,75],acknowledg:[0,25,72,75],acoount:74,acquir:[20,57,74],across:[0,1,6,11,20,25,27,28,32,42,58,72,74,76,77,78,81,146,150,208,215],act:[0,26,28,59,218],action:[6,13,20,40,72,215,220],activ:[4,6,30,37,42,53,54,57,60,65,74,76,82,146,148,196,216,218,219,220],active_task:60,active_tasks_limit:60,activetask:74,actor:77,actual:[0,4,6,13,21,28,30,34,36,39,43,45,57,58,60,61,66,67,76,78,81,161,211,220],acycl:20,adapt:[23,24,25,26,27,28,29,30],add:[0,6,9,10,11,22,24,25,28,32,33,36,37,38,39,41,42,43,46,49,52,53,54,56,58,59,61,62,64,66,72,75,76,77,81,209,214,218],addamsfamili:11,added:[0,1,3,4,6,10,11,14,19,24,36,39,49,54,57,58,60,62,64,65,66,68,72,75,76,212],adding:[0,6,13,14,24,28,32,39,50,54,71,76,82,209,215],addit:[0,1,3,6,9,11,13,19,20,22,24,25,26,27,28,32,40,42,46,50,53,57,58,59,60,61,62,66,69,70,71,72,74,75,77,82,218,220],addition:[11,13,33,59,69,76,81,218],additional_write_polici:[11,64],addr:57,address:[6,8,17,22,25,26,29,33,40,42,46,52,53,54,56,57,60,61,62,64,74,76,78,79,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,219,220],addressload:54,addrow:64,adher:10,adjac:[32,69],adjust:[6,11,25,50,63],adler32:4,adopt:57,advanc:[0,3,6,36,60,61,73,77,217],advantag:[0,27,50,71],advers:[45,219],advic:[42,45],advis:[6,12,18,22,45,49],advoc:0,ae6b2006:49,aefb:204,af08:13,afd:22,affect:[0,11,24,32,39,42,45,53,58,66,75,168,211,216,220],afford:6,afraid:27,after:[5,6,10,11,12,13,14,16,17,18,24,25,27,32,40,42,43,45,49,50,53,54,56,59,62,64,65,66,67,71,72,73,74,75,76,77,78,82,209,210,213],afterward:[33,36,40,44],afunct:14,again:[0,6,25,42,43,54,56,62,66,76,79,82,210,213],against:[6,11,14,18,27,30,36,42,44,45,54,58,60,70,71,72,76,79,81,82,161,208,220],agent:[24,53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,220],agentlib:40,aggreg:[3,6,9,10,13,15,18,20,53,54,60,61,74,82],aggress:216,ago:210,agre:[0,75],ahead:48,aid:[3,12],aim:[6,218],akeyspac:14,akin:57,alg:[64,207],algorithm:[0,6,11,50,64,70,79,207,218],alia:[10,13,14,47],alias:[6,10,18],alic:20,align:34,aliv:[0,6],all:[0,1,3,4,6,9,11,12,13,14,17,18,19,22,23,24,25,26,27,28,29,30,32,33,34,35,36,38,39,40,42,44,49,52,53,54,56,57,58,59,60,61,63,64,65,66,67,68,69,70,72,74,75,76,77,79,81,82,83,86,87,88,104,116,121,137,138,143,146,148,150,159,162,168,183,185,187,196,198,199,200,203,205,209,211,215,216,218,219,220],allmemtableslivedatas:74,allmemtablesoffheaps:74,allmemtablesonheaps:74,alloc:[0,6,45,50,56,57,65,71,74],allocate_tokens_for_keyspac:[60,79],allocate_tokens_for_local_replication_factor:50,allow:[0,1,3,4,6,9,10,11,12,14,16,17,18,22,23,24,27,33,36,37,46,50,54,58,59,60,63,64,65,66,69,70,71,75,76,78,81,211,219,220],allowallauthent:[6,77],allowallauthor:[6,77],allowallinternodeauthent:6,allowallnetworkauthor:6,almost:[0,4,6,14,22,57,67,216,220],alon:[11,34],along:[0,6,13,27,37,43,56,61,72,143,146,218],alongsid:[51,82],alpha5:[11,22,52],alphabet:34,alphanumer:[11,20],alreadi:[6,11,14,16,18,22,24,26,27,42,45,50,54,56,57,64,68,70,77,81,83,199,203,212],also:[0,3,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,28,30,32,33,36,40,42,43,44,45,46,49,50,54,56,58,59,61,62,64,65,66,67,68,69,71,72,74,75,77,79,82,116,200,213,214,218,219,220],alter:[0,9,10,15,17,28,45,59,60,63,65,66,70,76,77],alter_keyspac:53,alter_keyspace_stat:12,alter_rol:53,alter_role_stat:12,alter_t:53,alter_table_instruct:11,alter_table_stat:12,alter_typ:53,alter_type_modif:22,alter_type_stat:[12,22],alter_user_stat:12,alter_view:53,altern:[10,11,12,13,17,22,40,42,46,49,64,71,72,75,77,207],although:[6,27,28,42,57,81,218,220],altogeth:57,alwai:[0,3,4,6,9,10,11,13,14,18,22,27,34,36,42,43,44,45,50,54,57,59,66,67,71,81,216,220],amazon:[0,3,49],amazonaw:56,amen:[24,26,27,29],amend:38,amenities_by_room:[24,29],amenity_nam:29,ami:49,among:[32,75],amongst:11,amount:[0,6,11,13,22,28,40,42,44,45,58,59,66,70,71,72,74,76,79,82,161,220],amplif:[50,67,71],anaggreg:14,analogu:13,analysi:[24,28,31,217,218],analyt:[24,63],analyz:[25,28,32,44,220],ancestor:[4,214],ani:[0,4,6,10,11,12,13,14,17,18,20,21,22,24,25,27,30,33,35,36,38,39,40,42,43,44,46,49,50,52,53,54,56,57,58,59,60,64,66,69,71,72,74,75,76,77,79,81,82,84,137,143,146,151,168,183,204,208,211,213,214,217,218,219],annot:34,announc:78,anonym:[12,22,53,60,61],anoth:[1,6,11,14,20,22,24,27,28,32,44,54,57,59,60,62,64,66,75,77,82,205,212,217,220],anotherarg:14,answer:[41,220],ant:[33,35,40,42,44,56],antclassload:44,anti:[0,6,22,31,59,72],anticip:[0,11,59],anticompact:[66,74,192,212],anticompactiontim:74,antientropystag:[60,74,219],antipattern:71,anymor:[38,66],anyon:34,anyth:[59,66],anywai:[6,56],anywher:[13,57,65],apach:[0,1,2,3,5,6,7,14,21,30,32,34,35,36,38,39,41,42,44,45,48,49,53,54,56,57,58,59,60,61,62,64,66,70,72,74,77,80,83,204,205,206,207,208,209,210,211,212,214,215,218],apart:61,api:[0,6,8,17,51,58,60,78],appar:24,appear:[6,11,12,14,24,54,66,69,75,82],append:[4,22,24,38,61,71,72,74,82,218],appendic:[15,52],appendix:[12,15],appl:22,appli:[0,4,6,9,10,11,12,13,20,22,38,42,44,45,57,72,74,75,79,81,82],applic:[0,1,3,6,11,20,23,24,25,27,28,29,31,32,34,37,39,40,52,53,55,56,59,60,61,64,70,77,81,218],appreci:42,approach:[4,24,25,26,27,28,32,57,66,79],appropri:[6,11,20,22,39,42,43,57,77,78,79,218],approv:33,approxim:[0,28,50,54,68,69,74,208],apt:[49,220],arbitrari:[11,12,22,62,81],architectur:[3,25,45,52,80],archiv:[4,6,43,49,54,65,109],archive_command:[53,54,109],archive_retri:[54,109],area:[3,37,70,220],aren:[13,76],arg:[14,54,146,184,204,208,214],argnam:14,argnum:14,argument:[6,11,13,14,16,17,45,46,50,53,54,62,64,70,81,82,84,85,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202],arguments_declar:14,arguments_signatur:14,arithmet:[10,12,15,52],arithmetic_oper:12,around:[6,20,24,27,32,53,54,66,71,78,220],arrai:[6,23,45,49,57],arriv:[6,42,45,57],arrow:26,artem:24,artifact:[26,35,40,41],artifici:11,asap:10,asc:[9,11,13,29,60],ascend:[11,13],ascii:[9,14,17,22],asdf:204,asf:[8,40,43],ask:[5,28,42,43,44,52,77],aspect:[0,11,59],assassin:146,assertionerror:34,assertrow:44,assess:[219,220],assign:[0,6,13,20,25,27,45],assist:24,associ:[0,6,11,26,54,60,80,214,216],assum:[0,6,11,14,25,26,27,28,36,40,77,78,216,220],assumpt:[0,77],assur:55,asterisk:25,astyanax:47,async:[6,77],asynchron:[16,45,54,58,71],asynchroni:74,ata:27,atabl:14,atom:[3,11,13,21,38,75],atomiclong:74,attach:[37,42,77,80,220],attack:77,attemp:74,attempt:[0,6,11,16,18,20,22,24,45,50,53,57,59,61,69,72,74,75,76,77,82,83,162,203,213,218],attent:[34,35,42,43],attract:24,attribut:[23,24,25,32,49,53,54,61,66],audienc:[0,59],audit:[3,6,27,52,54,55,95,105,146],audit_log:6,audit_logging_opt:[53,61],audit_logging_options_en:60,audit_logs_dir:[53,61],auditlog:[53,105],auditlogentrytyp:53,auditlogkeyspac:[53,54],auditlogview:[3,53,61],audt:61,aug:[53,54,62,64,213],auth:[6,53,61,64,207],authent:[10,49,53,61,64,73,82,207],authenticatedus:6,author:[9,20,22,36,42,73,81],authorizationproxi:77,authprovid:[64,207],auto:[0,6,11,45,56,62,81,187],auto_bootstrap:79,auto_snapshot:[60,62],autocompact:[66,96,106,146,187],autom:[30,34,37],automat:[6,13,14,16,32,33,36,40,44,45,49,53,58,62,66,76,77,79,81],automatic_sstable_upgrad:60,avail:[0,2,3,6,8,11,14,20,23,24,25,26,27,28,29,30,33,40,42,43,44,50,54,56,57,59,61,65,70,72,75,76,77,78,80,82,86,116,159,168,179,199,216,218,220],availabil:6,available_rooms_by_hotel_d:[24,28,29],availablil:50,averag:[6,11,14,28,49,68,74,207,218,219,220],average_live_cells_per_slice_last_five_minut:195,average_s:11,average_tombstones_per_slice_last_five_minut:195,averagefin:14,averagest:14,avg:[28,64,207,220],avg_bucket_s:68,avgqu:220,avgrq:220,avoid:[0,6,11,12,27,32,34,39,42,54,57,58,59,63,66,67,69,71,74,77,78,82,200,207],awai:[40,79,82,219],await:220,awar:[0,6,11,42,57,63,70,72,167,216,219],awesom:81,axi:56,az123:25,azur:71,b00c:76,b09:49,b10:49,b124:13,b135:64,b2c5b10:218,b32a:[54,76],b64cb32a:[54,76],b70de1d0:13,b7a2:219,b7c5:219,b957:204,b9c5:219,back:[3,6,11,32,36,54,57,59,60,61,66,72,74,75,79,143,146,219],backend:6,background:[43,45,73,77,218,220],backlog:[6,57],backpressur:[6,57],backpressurestrategi:6,backup:[3,6,52,66,73,79,82,97,107,146,188,213,214],backward:[6,10,11,15,20,22,70],bad:[6,14,45,70,77,78,216,219],balanc:[0,3,6,24,50,58,72,79,216,219],banana:22,band:22,bandwidth:[6,57,58,80,220],bank:24,bar:[12,34,220],bardet:22,bare:6,base:[0,1,4,6,10,11,13,14,18,19,20,22,24,25,27,28,32,33,35,37,38,41,42,43,44,45,49,50,53,54,55,57,61,66,70,71,74,75,77,79,80,208,216,219],baseurl:49,bash:[45,56,220],bashrc:56,basi:[0,6,11,33,45,54,57,70],basic:[0,6,11,24,25,32,57,60,66,68,69,71,81,83,203,214,217],batch:[0,2,4,6,9,11,15,27,44,52,53,54,59,60,73,75,81,82,216,220],batch_remov:[74,219],batch_stat:12,batch_stor:[74,219],batchlog:[1,13,74,118,146,163,169],batchtimemilli:54,batchtyp:81,bbee:64,bc9cf530b1da11e886c66d2c86545d91:211,be34:13,beatl:22,beca:82,becam:[0,3],becaus:[0,4,6,11,13,14,20,24,27,28,32,56,57,62,66,70,72,74,75,77,208,211,220],becom:[0,4,6,11,14,20,32,42,62,66,69,74,75,77,79],been:[0,1,4,6,10,11,13,14,15,20,22,24,32,39,42,43,50,54,56,57,58,59,60,62,64,66,67,70,71,75,76,77,168,211,214,216],befor:[0,4,6,10,11,13,14,16,19,21,22,25,26,28,33,36,37,40,41,43,44,47,53,54,57,59,60,61,62,64,66,68,70,72,74,75,77,78,79,81,82,109,185,203,204,205,206,207,208,209,210,211,212,213,214,215,216],began:3,begin:[9,12,13,24,26,27,44,77,82],beginn:42,begintoken:82,behalf:24,behav:[6,59],behavior:[0,6,10,11,14,17,22,34,39,63,69,73,79,162,216],behind:[6,34,44,45,53,54,61,67],being:[0,4,6,11,13,17,22,24,28,39,43,44,45,53,54,57,59,61,63,64,66,67,68,74,75,76,79,209,218,219,220],believ:[72,216],belong:[0,3,11,13,14,58,74,86,146],below:[6,11,12,13,17,20,22,23,24,25,26,27,28,32,35,42,55,57,61,66,68,74,82,92,207,209,216,218],benchmark:[54,58,70,71,81],benefici:69,benefit:[0,6,37,50,53,63,66,71,73,207],best:[0,3,6,24,27,29,30,32,36,43,44,50,66,67,72,73,77,78,216,220],best_effort:6,better:[6,34,36,37,42,57,67,70,71,72,207,219,220],between:[0,1,4,6,9,10,11,12,13,15,23,24,25,26,31,32,42,45,55,57,58,63,66,70,72,74,75,76,77,80,81,161,183,220],beyond:[6,59,69,82,200],big:[6,28,62,64,66,89,204,205,206,207,208,210,211,212,213,214,215],bigg:28,bigger:[11,50,68],biggest:14,bigint:[9,14,17,19,22,60],bigintasblob:14,bigtabl:3,bigtableread:[205,211,213,215],billion:28,bin:[40,49,51,54,56,64,82,218],binari:[14,43,48,53,54,61,77,98,108,146,189,218],binauditlogg:[53,105],bind:[6,10,12,14,45,54,64],bind_mark:[12,13,18,22],binlog:[54,61],biolat:220,biolog:11,biosnoop:220,birth:13,birth_year:13,bit:[14,17,22,28,35,42,43,45,49,56,59,70,71],bite:45,bitempor:80,bitrot:70,bitstr:9,black:6,blank:[6,34,45,208],blindli:45,blob:[9,10,12,17,22,52,70,72,81],blob_plain:43,blobasbigint:14,blobastyp:14,block:[4,6,11,38,46,53,54,57,58,60,61,66,67,70,71,73,74,76,77,83,109,203,218,219,220],blockdev:50,blocked_task:60,blocked_tasks_all_tim:60,blockedonalloc:6,blockingbufferhandl:57,blog:[6,13,50],blog_til:13,blog_titl:13,blogpost:81,bloom:[4,11,52,58,59,71,73,74,208],bloom_filter_false_posit:195,bloom_filter_false_ratio:195,bloom_filter_fp_ch:[4,11,63,64],bloom_filter_off_heap_memory_us:195,bloom_filter_space_us:195,bloomfilterdiskspaceus:74,bloomfilterfalseposit:74,bloomfilterfalseratio:74,bloomfilteroffheapmemoryus:74,blunt:77,bnf:12,bob:[13,20],bodi:[6,11,12,57,81],bog:23,boilerpl:41,book:[23,26],boolstyl:82,boost:6,boot:45,bootstrap:[0,6,50,52,56,58,66,70,73,74,77,146,151,179,209],born:13,borrow:57,both:[0,1,3,6,11,13,14,18,22,23,24,28,37,38,39,42,43,45,46,49,53,54,57,58,59,63,66,70,71,74,75,77,79,81,82,214,220],bottleneck:6,bottom:45,bound:[4,6,11,12,22,54,57,58,61,64,71,77],boundari:209,box:[3,6,26,77,78],brace:34,bracket:12,braket:12,branch:[33,36,38,39,40,43,44],branchnam:42,brand:0,breadcrumb:216,breakdown:[219,220],breakpoint:40,breed:44,brendangregg:220,breviti:57,brief:[50,220],briefli:[1,219],bring:[0,6,56,58],brk:45,broadcast:6,broadcast_address:78,broken:[6,66,74,211],brows:[6,43,70,204,205,206,207,208,209,210,211,212,214,215],browser:[82,220],bucket:[0,28,68],bucket_high:68,bucket_low:68,buff:220,buffer:[4,6,50,54,56,57,64,65,72,74],bufferpool:[56,73],buffers_mb:220,bug:[10,38,41,43,44,45,52,76],build:[18,27,30,33,35,36,37,41,42,43,44,49,52,55,64,74,76,80,81,146,202],builder:[6,64,123,146,176],buildfil:56,built:[1,18,28,32,40,56,74],bulk:[52,58,72,73,207],bump:[4,10,209],bunch:34,burn:65,busi:[0,24,27],button:[36,40,45],bytebuff:[14,57],byteorderedpartition:[6,14],bytesanticompact:74,bytescompact:74,bytesflush:74,bytesmutatedanticompact:74,bytespendingrepair:74,bytesrepair:74,bytestyp:[9,208],bytesunrepair:74,bytesvalid:74,c09b:76,c217:64,c22a:64,c3909740:60,c39ee7e6:49,c60d:204,c73de1d3:13,c7556770:60,c_l:28,cach:[0,6,11,20,35,45,46,50,64,71,73,78,137,139,140,141,146,170,171,219],cachecleanupexecutor:[60,74,219],cached_mb:220,cachenam:74,cachestat:220,cadenc:56,calcul:[0,6,23,24,25,31,57,63,64,65,69,74,75,78,207,208],call:[0,9,11,12,13,14,20,24,34,36,41,46,52,54,59,61,62,64,66,70,71,74,79,146,179,220],callback:[57,74],caller:34,can:[0,3,4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25,27,28,30,33,34,35,36,37,38,39,40,42,43,44,46,49,50,51,52,53,54,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,79,81,82,84,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,205,206,207,209,210,211,212,213,216,217,218,219,220],cancel:[10,162],candid:67,cannot:[1,6,9,11,13,14,17,18,20,22,27,32,44,53,56,57,58,59,60,64,66,72,77,84,146,219],cap:[2,12,120,125,131,146,173,178,181],capabl:[0,6,45,80,82],capac:[0,6,28,32,53,54,57,65,74,78,80,146,148,170,196,215,216,218,220],capacit:216,capacity_byt:60,capi:52,captur:[6,11,24,52,54,73,77,83,218],cardin:208,care:[6,66,81,161,220],carefulli:[35,50],carlo:20,carpent:[23,24,25,26,27,28,29,30],carri:[34,161],cascad:27,cascommit:74,cascontent:[132,182],casprepar:74,caspropos:74,casread:74,cassablanca:22,cassafort:47,cassanda:0,cassandra:[0,1,2,3,4,5,8,10,11,13,14,18,20,21,22,23,24,25,26,28,29,31,32,34,35,37,38,42,47,48,50,51,53,54,56,57,58,59,60,62,63,64,66,67,70,71,72,74,76,78,79,82,105,109,146,157,161,164,168,193,201,203,204,205,206,207,208,209,210,211,212,214,215,216,217,219,220],cassandra_flam:220,cassandra_hom:[4,6,56,65,72,77,218],cassandra_job_dsl_se:33,cassandra_stack:220,cassandra_use_jdk11:56,cassandraauthor:[6,77],cassandradaemon:[40,56],cassandrafullquerylog:54,cassandrakeyspacesimpl:59,cassandralogin:77,cassandranetworkauthor:6,cassandrarolemanag:[6,77],casser:47,cassi:47,cast:[10,13,18],caswrit:74,cat:[22,204,220],catalog:[62,64],catalogkeyspac:[62,64],catalokeyspac:62,categor:74,categori:[11,12,13,14,61,105],caught:[39,74],caus:[0,4,6,18,27,45,57,66,68,69,76,77,209,211,218,219,220],caution:[6,70],caveat:77,cbc:6,ccm:[39,44,220],ccmlib:44,cd941b956e60:219,cdc:[6,11,64],cdc_enabl:65,cdc_free_space_check_interval_m:65,cdc_free_space_in_mb:65,cdc_raw:[6,65],cdc_raw_directori:65,cdccompactor:6,cde63440:49,cdf7:60,cell:[6,22,28,74,116,200,204,208,219],center:[6,11,20,22,24,45,50,72,78,79,102,112,146,161],cento:49,central:[40,77,82,216],centric:[20,32,36],certain:[0,1,4,6,9,11,20,36,44,50,58,59,66,77,205],certainli:[14,24,26,28],certif:[73,146,157],cf188983:204,cfname:[130,148,196],cfs:34,chain:20,challeng:[3,37,80],chanc:[37,63,75,208],chang:[0,4,6,11,12,15,20,22,27,32,33,35,36,37,38,40,41,43,48,50,52,53,54,57,59,60,62,66,67,70,73,74,76,77,179,206,209,218,220],changelog:43,channel:0,charact:[11,12,13,17,20,22,28,34,81,82],character:6,chat:8,cheap:[6,11,54,55],chebotko:[24,25,30],check:[0,6,13,24,33,34,39,40,42,43,44,45,49,54,56,57,59,63,64,65,66,67,69,74,77,83,137,146,161,200,203,215,219],checklist:[41,42,52],checkout:[36,40,42,43],checksum:[4,6,11,58,70,146,200,214],chen:23,cherri:38,chess:13,child:82,chmod:[53,54,64,77],choic:[0,6,11,32,43,49,52,66,69,70,73,210],choos:[0,1,3,6,11,41,43,47,48,71,74],chord:0,chose:0,chosen:[0,6,11,14,219],chown:77,christoph:22,chrome:82,chronicl:[53,54,61],chunk:[4,6,11,45,50,57,58,60,70,82],chunk_length_in_kb:[11,50,64,70],chunk_length_kb:6,chunk_lenth_in_kb:11,chunkcach:74,chunksiz:82,churn:6,cipher:[6,64,77,207],cipher_suit:6,circular:20,circumst:[11,27,75],citi:[22,29],claim:30,clash:12,class_nam:[4,6],classload:[44,56],classpath:[6,14,22,74],claus:[10,11,14,16,17,18,20,34],clean:[6,34,54,74,83,86,146,164,203,207],cleanli:42,cleanup:[45,59,66,73,74,116,146,192,214],clear:[39,42,64,83,88,137],clearli:26,clearsnapshot:[62,146],click:[13,40,42,43,44,220],client:[0,1,4,6,8,10,11,13,17,20,22,27,32,39,45,46,48,49,50,52,53,61,64,65,71,72,73,75,82,88,146,207,211,217,218,219,220],client_encryption_opt:[64,77,207],clientrequest:74,clientstat:146,clock:[0,6],clockr:6,clockwis:0,clojur:48,clone:[40,43,45,56,82,220],close:[6,15,36,43,64,77,220],closer:63,closest:75,cloud:[50,73],clue:[24,220],cluster:[1,2,3,4,6,9,10,11,13,14,21,22,24,27,28,29,30,32,39,44,46,50,51,52,54,56,57,58,59,60,62,64,66,68,71,72,74,75,76,77,78,79,81,82,83,93,114,118,134,146,169,186,203,208,215,216,217,218,220],cluster_nam:[46,51,60],clustering_column:11,clustering_ord:11,clusteringtyp:208,cmake:220,cmd:220,cmsparallelremarken:40,coalesc:6,coalescingstrategi:6,codd:27,code:[6,10,12,14,21,25,28,32,33,36,37,38,39,40,41,44,52,56,70,74,216,220],codebas:43,codestyl:34,coher:80,col:[14,81],cold:6,collat:6,collect:[0,3,6,10,11,12,13,14,15,17,23,25,30,71,73,74,75,81,116,218],collection_liter:12,collection_typ:22,collector:218,color:[22,82,220],column1:9,column:[0,1,3,4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,27,28,29,30,32,49,54,57,60,62,70,74,75,76,80,81,82,130,148,168,185,196,208,211,213,218,219],column_count:54,column_definit:[11,54],column_nam:[11,13,16],columnfamili:[4,6,9,34,66,76,206,209],columnspec:81,colupdatetimedeltahistogram:74,com:[6,14,33,34,36,38,43,49,56,77,220],combin:[0,3,4,6,10,50,57,69,75],come:[6,9,26,28,77,220],comingl:69,comma:[6,11,12,13,46,53,61,64,77,79,82,105,148,151,196,207],command:[0,3,6,18,27,30,35,38,43,44,45,46,51,53,54,56,60,62,64,70,72,73,76,77,81,83,84,85,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,206,207,212,213,214,217,218,219,220],commandlin:62,comment:[4,6,11,15,18,29,34,36,37,39,64,77],commit:[4,6,8,11,36,41,42,43,52,57,74,214,220],commitlog:[2,6,45,46,71,73,208,218,219],commitlog_archiv:[4,6],commitlog_compress:4,commitlog_directori:[4,46,71],commitlog_segment_size_in_mb:[4,45],commitlog_sync:4,commitlog_sync_batch_window_in_m:4,commitlog_sync_period_in_m:4,commitlog_total_space_in_mb:4,commitlogposit:208,commitlogread:65,commitlogreadhandl:65,commitlogseg:[6,73,74],committ:[37,38,41,42,43,44],commod:[2,3],common:[0,14,15,24,25,27,34,36,39,42,50,53,55,57,58,69,73,76,82,216,217,220],common_nam:11,commonli:[54,72,146],commun:[0,6,8,24,37,39,40,42,45,46,51,57,61,64,77,207],commut:45,compact:[4,6,11,15,45,48,52,58,59,60,62,63,64,69,70,71,73,81,86,90,91,116,119,120,146,159,168,172,173,179,187,192,199,205,206,207,208,209,211,214,217,218,220],compacted_partition_maximum_byt:195,compacted_partition_mean_byt:195,compacted_partition_minimum_byt:195,compaction_:192,compaction_histori:218,compaction_throughput:219,compaction_window_s:69,compaction_window_unit:69,compactionbyteswritten:74,compactionexecutor:[60,74,219],compactionhistori:[66,146],compactionid:192,compactionparamet:66,compactionparametersjson:66,compactions_in_progress:214,compactionstat:[66,146,219],compactiontask:218,compactor:[122,146,175],compani:27,compar:[0,1,3,4,6,31,35,42,49,55,67,74,75,76,81,216,219],comparis:28,comparison:6,compat:[6,9,10,11,13,15,20,39,42,64,70,83,220],compatilibi:22,compet:[6,70],compil:[34,35,40,56,82],compilerthread3:220,complain:40,complet:[1,6,13,14,24,29,30,42,43,45,54,57,58,59,60,62,65,69,72,74,75,76,77,79,82,146,160,162,211,213,214,219],completed_task:60,completedtask:74,complex:[0,4,6,9,14,22,23,25,27,28,32,42],complexarg:14,compliant:[6,14,77],complic:42,compon:[0,4,6,11,24,32,39,63,74,77,146,179,220],compos:[11,13,22],composit:[4,11,32],compound:[17,32],comprehens:39,compress:[0,4,6,11,44,48,52,57,60,64,67,71,72,73,74,81,208],compression_level:[11,70],compression_metadata_off_heap_memory_us:195,compressioninfo:[4,58,62,64],compressionmetadataoffheapmemoryus:74,compressionratio:74,compressor:[4,6,11,70,72,208],compris:[4,11,53,70],compromis:[1,43,77],comput:[0,4,6,11,14,28,54,56,215],concaten:[14,53,61],concentr:23,concept:[0,20,27,30,32,66,69],conceptu:[24,26,27,30,31,32,52],concern:[13,14,25,220],conclus:6,concret:[12,22],concurr:[0,1,6,33,57,64,71,81,121,122,123,146,161,174,175,176,207,219,220],concurrent_compactor:219,concurrent_materialized_view_build:18,concurrent_writ:4,concurrentmarksweep:71,condens:13,condit:[6,10,12,13,20,22,32,34,38,60,68,74,77,81,82,220],conditionnotmet:74,conduct:49,conf:[6,43,45,46,49,56,64,74,77,82,207,218],confid:0,config:[33,57,64,74,77,82,83,203],configu:[61,220],configur:[0,3,4,11,20,22,27,33,40,43,44,45,48,49,52,55,56,57,59,60,64,73,74,77,78,80,81,82,92,109,146,164,179,206,207,208,216,218,219],confirm:[6,8,25,26,29,33,39,40],confirm_numb:29,conflict:[0,13,22,38,41],conform:[18,39,64],confus:[10,12,45,220],congratul:36,conjunct:82,connect:[0,6,11,22,23,30,40,49,51,52,54,58,60,64,74,77,78,81,82,88,92,145,146,207,220],connectednativecli:74,connectednativeclientsbyus:74,connection_stag:60,connectionsperhost:[64,207],connector:[23,45,47,77],connnect:74,consecut:[28,46],consensu:[1,59,72],consequ:[11,13,19,22,71],conserv:[6,72],consid:[0,6,13,22,24,28,32,37,42,46,61,63,66,69,71,72,75,215],consider:[13,22,23,24,25,27,32,64],consist:[2,3,6,11,12,13,14,29,32,39,57,59,60,62,72,73,74,77,79,83,216,219],consol:[40,46,61,82],constant:[10,11,15,17,22,64],constantli:[0,6,66],constitut:32,constrain:27,constraint:1,construct:[0,12,75,220],constructor:[6,34],consum:[6,44,53,54,57,63,65,74,219],consumpt:65,contact:[0,6,11,45,52,75,216],contain:[0,1,3,6,8,9,10,11,12,13,15,16,18,20,22,24,25,35,40,42,44,54,57,60,62,66,70,72,74,77,80,82,185,205,210,214,216,218,219,220],contend:[6,74],content:[4,6,11,12,13,24,36,49,52,53,54,58,66,69,75,82,109,204,220],contentionhistogram:74,context:[4,6,9,20,22,26,40,42,45,70,77,218],contigu:13,continu:[0,1,6,28,34,44,53,56,57,58,61,67,72,77,78],contrarili:12,contrast:[0,24,27,32,44,77],contribut:[5,33,36,38,44,52],contributor:[36,38,42],control:[0,3,6,10,11,13,15,39,46,54,66,77,78,82],conveni:[9,12,14,17,44,79],convent:[6,11,14,15,25,36,38,41,42,44,77,78],converg:0,convers:10,convert:[10,13,14,66,220],convict:0,coordin:[0,1,3,6,11,13,14,22,32,45,57,62,72,74,75,162,216,217],coordinator_read:60,coordinator_scan:60,coordinator_writ:60,coordinatorreadlat:[74,216],coordinatorscanlat:74,coordinatorwritelat:[74,216],cop:34,copi:[0,1,3,6,28,35,45,55,56,57,62,64,66,75,83,207,216],copyright:[23,24,25,26,27,28,29,30],core:[6,14,61,71,80,174,219,220],correct:[0,6,10,35,39,49,54,64,67,70,77,146,159,206,212],correctli:[6,11,36,45,50,53,57,61,66,77],correl:[6,10,78,216,219],correspond:[0,4,6,9,11,13,14,18,22,36,42,44,45,49,58,59,64,65,72,78,207],corrupt:[6,60,64,66,70,71,76,83,168,200,203],corrupt_frames_recov:60,corrupt_frames_unrecov:60,cost:[6,13,22,50,70,76],couchbas:[62,64],could:[0,3,6,12,22,24,25,27,28,32,37,39,42,53,54,57,58,60,62,64,66,67,76,82,218,220],couldn:[72,74],count:[4,6,9,13,22,28,45,50,57,58,60,66,74,79,81,208,218,219,220],counter1:211,counter:[0,4,6,9,14,19,59,60,71,74,81,83,139,146,168,170,171,203],counter_mut:[74,219],counter_read:81,counter_writ:81,countercach:74,countermutationstag:[60,74,219],counterwrit:[81,132,182],countri:[13,22,25,29],country_cod:22,coupl:[0,6,25,27],cours:[13,76,215,220],cover:[11,25,36,39,42,44,45,48,55,66,67,74,75,208],coverag:[35,37,54],cph:[64,207],cpu:[0,6,11,50,65,70,73,216,218,219],cpu_idl:220,cq4:[53,54,218],cq4t:[53,54],cqerl:47,cqex:47,cql3:[14,39,44,82],cql:[0,3,6,10,11,12,13,14,16,17,19,20,22,25,27,28,29,30,32,43,44,47,49,51,52,53,55,60,61,62,64,69,73,77,81,83,179,204,220],cql_type:[11,12,13,14,20,22],cqlc:47,cqldefinit:14,cqlkeyspac:[62,64,76],cqlsh:[45,48,49,52,54,60,62,64,76,77,83],cqlshrc:83,cqltester:[39,44],cqltrace:220,craft:77,crash:71,crc32:[4,58,62,64,213,214],crc:[4,57,58,213,214],crc_check_chanc:[11,64,70],crdt:0,creat:[0,3,4,6,9,10,12,13,15,17,19,23,24,25,26,27,28,29,30,32,33,36,40,41,44,45,48,49,53,54,56,58,59,60,64,65,66,67,68,69,70,72,75,76,77,79,81,82,89,207,212,220],create_aggreg:53,create_aggregate_stat:12,create_funct:53,create_function_stat:12,create_index:53,create_index_stat:12,create_keyspac:53,create_keyspace_stat:12,create_materialized_view_stat:12,create_rol:53,create_role_stat:12,create_t:53,create_table_stat:12,create_trigg:53,create_trigger_stat:12,create_typ:53,create_type_stat:[12,22],create_user_stat:12,create_view:53,createkeystor:6,createt:44,creation:[6,10,11,13,14,18,22,65,218],creator:20,credenti:[6,77],critic:[0,39,42,70,72,77,216,219],cross:[0,3,6,45,72,78],crossnodedroppedlat:74,crucial:[77,218,219,220],cryptographi:6,csv:[64,82],ctrl:220,cuddli:22,cue:25,culprit:216,cumul:[219,220],curent:208,curl:[38,49],current:[0,1,6,9,11,13,20,22,24,40,42,43,49,54,57,58,60,61,64,66,67,68,69,72,74,79,81,82,83,111,129,133,135,137,146,160,191,199,203,208,209,214,218,219],currentd:[10,14],currentlyblockedtask:74,currenttim:[10,14],currenttimestamp:[10,14],currenttimeuuid:[10,14],custom:[6,9,10,11,14,15,16,20,24,26,27,28,33,42,58,61,64,78,81,82,207],custom_option1:20,custom_option2:20,custom_typ:[14,22],cut:[43,218],cute:22,cvh:39,cycl:[6,56,57,65,70,75,109],cython:83,d1006625dc9e:[54,76],d132e240:64,d132e240c21711e9bbee19821dcea330:[62,64],d18250c0:204,d85b:204,d936bd20a17c11e8bc92a55ed562cd82:210,d993a390c22911e9b1350d927649052c:62,daemon:[40,146,193,220],dai:[14,17,19,22,28,61,66,69,76],daili:[33,53,54,61,109],danger:6,dart:48,dart_cassandra_cql:47,dash:12,data:[1,2,3,4,6,10,12,14,15,16,18,26,27,29,39,46,49,50,52,54,57,58,59,60,63,67,68,69,70,71,72,73,74,75,76,77,78,80,81,82,84,89,102,109,112,116,137,146,151,161,185,200,204,205,206,207,208,209,210,211,212,213,214,215,218,219,220],data_file_directori:[46,71],data_read:20,data_writ:20,databas:[0,1,3,12,13,15,21,24,27,30,31,32,43,49,52,53,54,59,61,62,66,71,77,217,218,220],datacent:[0,1,3,6,11,54,57,58,60,64,76,78,102,112,125,146,161,178,207,216,219],datacenter1:[6,50,81],dataset:[2,3,6,50,70,76,220],datastax:[6,14,30,47,216],datastor:219,datatyp:14,date:[1,4,9,10,15,17,19,24,25,26,27,28,29,32,61,75,83,168,203,204,208],dateof:[10,14],datestamp:17,datetieredcompactionstrategi:[11,66],datetim:15,datum:3,daylight:22,db532690a63411e8b4ae091830ac5256:213,db_user:77,dba:[64,77],dbd:47,dc1:[6,11,20,59,72,77,219],dc1c1:215,dc2:[6,11,59,72,77,219],dc3:20,dcassandra:[67,69,74,77,79],dcawareroundrobin:216,dcl:[53,61],dclocal_read_repair_ch:75,dcom:77,dcpar:161,ddl:[11,53,60,61,62,64,82],ddl_statement:12,deactiv:6,dead:[0,6,73,84,146,220],dead_node_ip:79,deal:[57,83,203],deb:[43,49],debemail:43,debfullnam:43,debian:[43,45,48,220],debug:[46,54,82,211,212,213,214,215,216,217,220],decai:216,decid:[9,36,57,64,67,68,72,78],decim:[9,14,17,19,22,82],decimalsep:82,decis:0,declar:[11,12,14,22,25],decod:[17,22,57,220],decommiss:[0,6,55,59,79,146],decompress:[70,220],decoupl:[0,58,59],decreas:[6,50,69,207,220],decrement:[13,22],decrypt:6,dedic:[4,6,24,54,58],dedupl:[143,146],deem:6,deep:[52,217,218],deeper:[42,220],default_time_to_l:[10,11,13,64],defend:45,defens:6,defer:[11,220],defin:[0,3,6,9,10,11,12,13,15,16,17,18,20,21,23,24,25,27,28,31,40,43,52,64,66,72,74,77,78,79,81,82,89,146,208],defineclass1:56,defineclass:56,definit:[9,13,14,15,18,22,23,24,25,26,27,28,29,30,32,52,55,62,63,64,81,208],deflat:[4,6,70,72],deflatecompressor:[11,70],degrad:[0,6,24],delai:[1,4,62,74,76],deleg:40,delet:[0,4,6,9,10,11,12,15,17,18,20,22,24,27,42,52,53,54,60,61,62,69,72,76,82,109,116,146,198,208,213,214,218],delete_stat:[12,13],deletiontim:4,delimit:6,deliv:[0,6,57,72,74],deliveri:[6,57,73,74,146,147,166,177],delta:[74,208],demand:[0,77],demo:55,demonstr:[53,54,56,62,64,76,217],deni:45,denorm:[22,24,25,32],denot:[6,12,25],dens:63,dep:35,departur:0,depend:[0,4,6,11,12,13,14,22,33,36,39,40,41,42,44,52,56,58,68,70,72,76,83,216],dependenic:35,depict:23,deploi:[35,43,45,46,49,220],deploy:[0,6,55,59,72,77,78,80],deprec:[6,10,11,14,15,64,66],depth:220,deriv:[57,64],desc:[9,11,13,32,54,60,82],descend:[11,13],describ:[2,6,7,9,10,11,12,13,14,15,17,20,22,26,27,30,39,40,42,43,54,55,63,77,83,146,203],describeclust:146,descript:[10,11,14,19,22,24,26,29,33,36,42,50,58,60,64,72,74,75,82],descriptor:[74,214],deseri:[55,70,215],design:[0,1,3,14,23,24,25,26,28,30,31,52,54,59,66,69,71,76],desir:[16,22,24,33,45,70,210],destin:[65,72,82],destroyjavavm:220,detail:[5,6,10,11,12,13,14,22,23,24,25,26,33,36,37,45,56,60,62,70,73,77,80,81,82,83,203,213,218,219,220],detect:[1,2,6,38,45,57,77,215],detector:[0,114,146],determin:[0,6,11,13,20,24,27,28,32,50,58,62,63,70,75,78,161,216,219,220],determinist:[0,45],detractor:27,dev1:61,dev:[6,8,43,45,50,220],devcent:30,devel:56,develop:[3,8,30,36,37,40,42,44,56,71],devic:[4,80,220],devscript:43,df303ac7:219,dfb660d92ad8:82,dfp:200,dht:[6,208],diagnost:[6,55,73],diagram:[23,24,26,30,57],diagrammat:24,diamond:23,dictat:[6,77],did:[26,39,74,206],didn:[0,24],die:6,dies:[52,79],diff:[15,34,218],differ:[0,1,6,11,12,13,14,15,20,22,24,25,31,32,33,38,40,42,44,45,46,50,53,54,55,58,59,64,66,67,68,70,71,74,75,76,79,81,216,220],difficult:[0,6,44,220],difficulti:22,digest:[4,6,58,62,64,75,213,214],digit:[17,22,45],digitalpacif:49,diminish:22,dinclud:35,dir:56,dir_path:[64,207],direct:[0,6,11,17,20,24,42,74,75,220],directli:[13,18,20,36,40,60,64,66,77,208,220],director:13,directori:[4,6,21,35,36,40,44,45,48,49,51,56,61,64,65,68,71,72,73,82,137,146,164,207,220],dirti:[4,6,54,220],disabl:[6,11,14,53,54,58,62,66,70,72,77,78,82,95,96,97,98,99,100,101,102,103,112,146,169,171,173,178,181,182,183],disable_stcs_in_l0:67,disableauditlog:[53,61,146],disableautocompact:[66,146],disablebackup:[62,146],disablebinari:146,disablefullquerylog:[54,146,218],disablegossip:146,disablehandoff:[72,146],disablehintsfordc:[72,146],disableoldprotocolvers:[60,146],disablesnapshot:168,disableuditlog:61,disadvantag:0,disagre:75,disallow:[6,55],disambigu:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],disappear:[11,75],discard:[6,57,59],disconnect:[57,66],discourag:[11,22,42],discov:[45,76],discoveri:[0,64],discret:0,discuss:[8,11,22,24,26,28,42,53,54,56,57,60,62,64,75],disk:[4,6,11,24,27,31,32,46,50,52,53,54,58,60,61,62,63,65,66,69,70,73,74,76,109,143,146,159,200,205,209,212,213,218,219,220],disk_spac:60,disk_usag:60,dispar:[6,27],dispatch:57,displai:[11,24,53,54,60,61,64,82,83,85,91,121,136,138,145,146,195,203,207,211,212,213,214,215,220],displaystyl:28,disrupt:[45,77],dissect:220,dist:[43,49],distanc:[23,80],distinct:[0,9,10,13],distinguish:[9,14,58,62],distribut:[1,2,3,6,23,32,35,42,44,45,49,60,66,74,76,77,79,80,81,208,209,217,218,219,220],distro:43,dive:[52,217,218],diverg:1,divid:[12,25,27],divis:19,djava:[40,45,77],dload:49,dml:[21,53,60,61],dml_statement:12,dmx4jaddress:74,dmx4jport:74,dns:45,dobar:34,doc:[6,35,36,39,43,49,76,77,206,220],document:[5,12,14,15,17,24,25,27,29,30,33,39,41,42,50,51,54,77,81,82],doe:[0,3,6,11,13,14,16,17,18,20,22,27,32,38,39,42,49,52,54,55,58,59,62,63,64,66,67,69,70,72,75,76,77,78,79,143,146,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220],doesn:[6,14,22,24,34,44,45,54,57,58,76,81,207,208,210,211,218,220],dofoo:34,doing:[6,13,18,27,44,45,50,66,67,74,79,220],dollar:[10,12],domain:[0,23,24,27,37,77,81,167,186],domin:220,don:[0,5,6,13,26,27,28,32,34,37,38,39,40,42,43,45,46,58,59,60,64,66,76,137,161,207,212,217,219,220],done:[6,11,13,22,25,27,33,36,37,42,43,44,46,51,54,57,67,68,81,209,212,213],dont:[60,61],doubl:[6,9,10,11,12,14,17,19,22,40,50,60,74,78],doubt:11,down:[0,6,20,23,28,50,54,58,66,74,76,78,79,100,146,161,209,216,218,219,220],downgrad:213,download:[6,30,33,40,43,49,56,74,80],downsampl:4,downstream:[26,219],downtim:[3,50,72],downward:20,dozen:219,dpkg:43,drag:11,drain:[4,57,72,146],draw:[0,25],drive:[0,6,50,66,71,218,219,220],driven:[3,27,31],driver:[6,12,14,20,28,30,44,48,52,60,82,216],driver_nam:60,driver_vers:60,drop:[1,6,10,15,52,53,54,55,57,59,60,61,62,64,66,67,69,74,76,77,109,205,208,209,211,216,219,220],drop_aggreg:53,drop_aggregate_stat:12,drop_funct:53,drop_function_stat:12,drop_index:53,drop_index_stat:12,drop_keyspac:53,drop_keyspace_stat:12,drop_materialized_view_stat:12,drop_rol:53,drop_role_stat:12,drop_tabl:53,drop_table_stat:12,drop_trigg:53,drop_trigger_stat:12,drop_typ:53,drop_type_stat:[12,22],drop_user_stat:12,drop_view:53,dropdown:220,droppabl:[6,57,66,208],dropped_mut:195,droppedmessag:73,droppedmut:74,dropwizard:74,drwxr:213,drwxrwxr:[62,64],dry:[83,203],dsl:33,dt_socket:40,dtest:[33,39,41],due:[0,1,11,13,22,35,45,50,57,62,69,72,74,79,216,220],dump:[53,54,61,82,83,203,218],duplic:[25,27,28,32,39,76,214],durabl:[0,2,4,62,65,72],durable_writ:11,durat:[6,10,15,19,20,66,72,74,81,148,196,207],dure:[6,11,14,21,23,35,42,44,45,58,59,60,64,67,68,69,70,72,74,75,77,79,81,82,168,205,211,215,218,220],duse:56,dverbos:35,dying:45,dynam:[6,23,72,73,75,77],dynamic_snitch:78,dynamic_snitch_badness_threshold:78,dynamic_snitch_reset_interval_in_m:78,dynamic_snitch_update_interval_in_m:78,dynamo:[2,3,52],e123fa8fc287:[54,76],e2b06b91:49,each:[0,1,3,4,6,10,11,12,13,14,17,18,20,22,23,24,25,26,27,28,29,32,33,36,38,42,49,50,51,52,53,54,56,57,58,59,60,61,62,64,66,67,70,71,72,74,76,77,78,79,80,81,82,83,146,171,187,200,203,204,218,219,220],each_quorum:[0,6,55],eagerli:57,earli:[6,12,42],earlier:[42,49,53,56,75],eas:220,easi:[3,9,23,36,42,54,220],easier:[0,36,42,50,53,204],easiest:45,easili:[0,23,27,77],east:[54,76],eben:[23,24,25,26,27,28,29,30],ec2:[6,50,53,54,56,60,62,64,71,76,78],ec2multiregionsnitch:[6,78],ec2snitch:[6,50,78],ecc:71,echo:[49,56,208],eclips:[34,41,44],ecosystem:39,eden:220,edg:39,edit:[3,36,40,43,46,49,74,77,214],editor:36,effect:[0,3,6,11,22,25,27,42,45,54,57,59,63,70,77,100,146,216,219,220],effectiv:74,effici:[6,11,32,53,61,64,66,69,78,79],effort:[0,6,36,42,72,76],eight:0,either:[0,4,6,8,12,13,14,16,22,27,28,33,34,36,38,40,42,45,49,51,56,57,62,65,66,72,74,77,81,198,216,218,219,220],elaps:[66,74,220],elast:50,elasticsearch:80,elder:40,element:[0,22,24,25,29,36,50,54,82],elig:6,elimin:[28,216],elixir:48,els:[11,13,34,42],elsewher:57,email:[8,16,22,29,43,52],embed:44,emerg:[3,35],emit:6,emploi:63,empti:[6,9,10,11,12,64,82,211],emptytyp:9,enabl:[0,6,11,14,17,20,44,45,55,56,60,62,66,69,70,72,78,79,82,105,106,107,109,112,113,146,183,207,208,218,220],enable_legacy_ssl_storage_port:6,enable_transient_repl:[59,60],enable_user_defined_funct:14,enableauditlog:[53,61,146],enableautocompact:[66,146],enablebackup:[62,146],enablebinari:146,enablefullquerylog:[6,54,146,218],enablegossip:146,enablehandoff:[72,146],enablehintsfordc:[72,146],enableoldprotocolvers:[60,146],encapsul:[34,74],enclos:[9,10,12,14,20,27,81],enclosur:12,encod:[15,22,39,57,61,82,208],encodingstat:208,encount:[0,5,13,43,57,74,81],encourag:[11,65,70],encrypt:[6,48,56,58,60,64,73,207],end:[22,24,27,33,45,53,54,61,66,67,68,75,77,78,82,89,124,146,161,209,214,220],end_dat:29,end_resultset:54,end_token:[89,161],end_token_1:151,end_token_2:151,end_token_n:151,endpoint:[0,6,57,59,72,74,75,78,84,124,146,161,198],endpoint_snitch:78,endpointsnitchinfo:77,endtoken:82,enforc:[17,27,57,77],engin:[0,2,3,11,28,42,52,74,80],enhac:37,enhanc:[37,57,71],enjoi:43,enough:[0,6,22,23,26,28,45,46,53,54,67,75,76,78,82,218,220],enqueu:[6,57,218],ensur:[1,13,18,21,28,43,45,48,54,57,59,65,66,70,72,75,77,206,218,219],entail:45,enter:[0,24,26,33,45,82,218,220],enterpris:49,entir:[0,4,6,11,14,22,27,45,50,54,57,58,59,63,64,69,72,76,77,79,82,83,203,205,216,220],entiti:[23,24,25,26,27,32],entri:[4,6,9,13,16,33,42,43,49,52,53,54,74,77,82,208],entropi:[0,6,72],entry_count:60,entry_titl:13,enumer:[20,204],env:[45,46,56,74,77],environ:[0,1,5,6,30,35,40,41,43,44,45,48,49,50,52,56,64,71,210],envis:24,ephemer:71,epoch:[22,54,208],epol:6,equal:[0,6,10,11,13,22,24,28,34,66,81],equival:[10,11,12,13,14,20,38,66,217],equivil:50,eras:[11,57],erlang:48,erlcass:47,err:82,errfil:82,error:[1,6,11,12,14,16,18,20,22,24,30,33,34,39,40,44,52,53,56,60,61,76,81,82,162,206,211,215,217,218,219],error_byt:60,error_count:60,escap:[12,17,81],especi:[0,24,25,42,45,50,66,82,220],essenc:24,essenti:[14,45,56,62,82],establish:[6,20,57,64,78,207],estim:[4,28,58,74,76,208,219],estimatedcolumncounthistogram:74,estimatedpartitioncount:74,estimatedpartitionsizehistogram:74,etc:[6,18,22,34,39,45,46,49,54,58,61,66,67,70,74,77,81,207,220],eth0:6,eth1:6,ev1:22,evalu:[6,19,29,31,52],even:[0,1,6,10,11,12,13,14,17,22,24,25,26,27,29,32,37,42,50,52,54,57,58,59,60,62,66,72,75,76,77,82,92,168,199,216,218,219,220],evenli:[0,6,32],evenlog:[206,209],event:[1,3,4,6,13,22,55,57,61,66,73,81,82,161,204],event_typ:13,eventlog:[204,206,209,212,214,215],eventlog_dump_2018jul26:204,eventlog_dump_2018jul26_d:204,eventlog_dump_2018jul26_excludekei:204,eventlog_dump_2018jul26_justkei:204,eventlog_dump_2018jul26_justlin:204,eventlog_dump_2018jul26_singlekei:204,eventlog_dump_2018jul26_tim:204,eventlog_dump_2018jul26b:204,eventu:[0,2,3,4,13,36,50,57,72,76],ever:[34,44,45,71],everi:[0,1,4,6,11,13,14,18,20,21,22,24,28,51,53,56,57,58,59,61,63,64,66,71,76,81,82,216,219,220],everyon:[23,75],everyth:[4,12,34,40,45,80],evict:74,evil:[6,14],ex1:81,ex2:81,exact:[11,12,14,70,72,217],exactli:[11,14,18,54,77,204,220],examin:[25,28],exampl:[0,3,6,11,13,14,17,20,22,23,24,25,26,27,28,31,37,43,44,49,50,51,53,54,56,59,60,61,64,66,67,69,70,72,73,77,78,81,82,204,205,206,207,208,209,210,211,212,214,215,216,217,218,219,220],example2:81,exaust:6,excalibur:11,exce:[4,6,11,17,34,57,218],exceed:[6,57,71,209],excel:[11,28,70],excelsior:11,except:[0,6,13,14,17,39,41,42,44,45,54,56,57,61,64,74,204,209,218,220],excess:63,exchang:[0,6,45,58],exclud:[0,11,53,61,74,83,105,129,146,203],excluded_categori:[53,61,105],excluded_keyspac:[53,61,105],excluded_us:[53,61,105],exclus:[22,35,44,161],execut:[6,9,11,12,13,14,20,30,33,35,40,44,51,54,61,66,74,77,82,203,204,205,206,207,208,209,210,211,212,213,214,215,219,220],executor:33,exhaust:[6,49,56,216],exhibit:13,exist:[0,1,6,9,10,11,12,13,14,16,17,18,20,21,22,27,28,36,37,39,40,43,44,52,53,54,60,61,63,64,66,69,70,75,78,79,81,205],exit:[54,83,214],exp:81,expand:[11,50,83],expans:[11,50,58,59],expect:[0,1,4,6,10,12,22,27,32,34,39,42,43,57,64,66,69,72,73,76,77,209,219],expens:[6,54,63,78],experi:[6,24,49,57,66,219],experienc:[0,6,218],experiment:[0,3,25,32,49,56,59,161],expert:59,expir:[6,10,11,13,22,57,60,69,72,73,76,77,168,205,208,211],expired_byt:60,expired_count:60,expiri:66,explain:[26,34,36,39,42],explan:[50,83,203],explicit:[0,10,11,20,29,57],explicitli:[3,4,6,10,11,13,17,22,34,53,60,62,69,78,81],explor:[24,40],expon:10,exponenti:[74,81,216],expos:[6,9,57,60,70,75,77],express:[0,6,10,12,19,78],expung:45,extend:[22,42,44,57,64,80,83,137,200,203],extens:[6,11,24,64,77],extent:1,extern:[27,52,53,73,74,79,80,217],extra:[0,4,6,57,66],extract:[34,49],extrem:[6,13,24,50,70,81],f6845640a6cb11e8b6836d2c86545d91:208,f8a4fa30aa2a11e8af27091830ac5256:207,facebook:3,facilit:6,fact:[22,37,44,45,216],factor:[0,6,11,23,28,52,58,59,64,70,72,76,77],factori:81,fail:[0,1,6,11,13,14,22,28,33,35,43,52,53,54,56,57,58,66,72,74,75,82,146,162],failur:[1,2,6,42,52,58,59,62,66,71,72,74,78,114,146,200,216],failuredetector:146,fairli:[6,24,65,77,220],fake:14,fall:[0,6,24,53,54,61],fallback:[6,66,78],fals:[6,11,12,17,20,22,54,60,61,62,63,64,65,66,69,70,74,76,77,79,82,168],famili:[6,62,71,130,148,185,196,213],familiar:23,fanout_s:[66,67],faq:83,far:[36,37,54],fare:220,fashion:0,fast:[0,6,24,28,32,63,66,70,80,218,220],faster:[0,6,32,42,58,59,70,71,146,171,219],fastest:[6,38,75,78],fatal:6,fault:[1,45,72],fav:[16,22],favor:56,favorit:220,fax:22,fct:14,fct_using_udt:14,fd576da0:76,fd8e5c20:76,fear:45,feasibl:22,featur:[0,2,25,27,29,30,37,39,40,42,50,52,53,54,56,57,58,59,77],fed:6,feed:54,feedback:42,feel:[36,38],felt:[26,28],fetch:[6,11,36,82],few:[0,6,25,62,66,67,71,216,218],fewer:[6,32,42,72],fewest:50,fffffffff:[17,22],fgc:220,fgct:220,field:[10,13,14,17,22,32,34,54,57,61,63,70,81,211],field_definit:22,field_nam:13,fifteen:74,fifteenminutecachehitr:74,fifth:219,figur:[0,1,24,25,26,27,28,32,37,62,68,75,205],file:[3,4,7,11,27,28,36,40,41,42,43,44,45,46,49,50,52,53,54,56,57,58,60,62,63,64,65,66,69,71,72,74,77,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,208,209,210,211,213,214,216,217,220],fileauditlogg:53,filenam:[4,11,82,130,146,208,212],filenamepattern:61,filesystem:[33,53,54,72,77],fill:[43,57,62,65,67],fillion:13,filter:[0,4,9,11,18,52,58,59,60,62,64,71,73,74,161,208,213,214],finalfunc:[9,14],find:[0,4,6,24,25,26,27,29,33,36,40,43,44,52,60,63,66,72,75,77,79,124,128,204,208,210,213,217,219,220],fine:[4,6,42,76,77],finer:[4,6],finish:[28,29,40,42,54,56,76,146,163,218],finish_releas:43,fip:[6,77],fire:[21,37],firefox:82,firewal:[6,45,46,78],first:[0,1,4,5,6,11,13,14,22,23,24,25,26,28,32,42,43,45,48,56,59,62,67,69,71,75,76,77,81,82,161,168,204,208,211,214,216,218,219,220],first_nam:29,firstnam:13,fit:[6,28,66,67,74],five:[70,74],fiveminutecachehitr:74,fix:[6,10,11,12,18,27,33,36,38,41,43,45,50,57,66,71,75,76,81,211],flag:[6,13,38,39,42,43,57,65,74,76,79,206],flash:80,flat:72,flexibl:[0,3,11,50,54,55,77],flight:[6,57,77],flip:11,floor:6,flow:[6,20,26,39,41,61],fluent:47,flush:[4,6,11,57,62,64,65,66,67,69,70,71,72,74,104,146,185,214,218],fname:14,focu:[23,26,33,42],focus:81,focuss:220,folder:[40,64,192,212],follow:[0,1,3,4,5,6,8,9,10,11,12,13,14,17,18,19,20,22,26,28,32,33,34,35,36,37,38,39,40,42,43,44,45,46,50,52,53,54,56,57,58,59,60,61,62,64,65,66,70,72,74,75,76,77,78,79,82,86,89,96,106,115,116,152,161,168,182,187,199,200,205,210,211,214,216,220],font:12,foo:[11,12,65,220],footprint:[3,54,70,146,148],forc:[4,6,11,13,58,76,82,89,92,146,160,161,162,215],forcefulli:[84,146],foreground:[46,75],foreign:[3,27,32],forev:[57,66],forget:5,fork:[36,42],form:[0,6,10,11,12,14,20,24,27,50,58,64,91,145,195],formal:[0,12,36,43],format:[4,6,10,11,17,22,25,28,36,38,39,41,42,53,54,57,61,64,74,82,83,90,109,130,151,195,197,203,214,219],former:[6,74],formerli:59,formula:28,fortabl:64,forward:[6,11,35,43,75],found:[0,1,5,6,12,14,23,33,36,37,42,44,46,51,62,64,75,77,79,81,82,83,192,200,203,207,208,213,214],four:[0,13,28,57,60],fourth:[28,219],fqcn:44,fql:[55,218],fql_log:218,fqltool:[3,54,56,218],fraction:[0,6],frame:[6,60],framework:[39,44],franc:[13,22],free:[0,6,11,22,30,36,38,40,57,74,80,215,220],freed:4,freestyl:33,frequenc:[6,11,32,65,76],frequent:[6,11,24,25,27,50,52,56,69,77,216,220],fresh:79,fri:54,friendli:[6,11,22,44],from:[0,1,3,4,6,9,11,12,13,14,15,17,18,19,20,22,23,24,25,26,27,28,29,30,32,37,38,41,42,44,49,50,51,52,53,54,55,56,58,59,61,63,65,66,67,69,70,71,72,73,74,75,76,77,78,79,81,83,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,155,156,159,160,161,162,164,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,205,206,208,209,211,214,216,218,219,220],fromjson:15,front:[24,54],froom:22,frozen:[9,10,11,13,14,22,29],fruit:[22,42],fsync:[4,6,65,72,74,219],fulfil:81,full:[0,1,3,6,9,11,13,16,20,26,42,45,51,52,55,64,66,67,70,72,73,75,77,80,81,82,99,109,146,152,161,164,207,209,214,219],full_nam:195,full_query_log_dir:54,full_query_logging_opt:54,fulli:[0,1,6,11,12,14,32,43,59,64,69,73,74,77],function_cal:12,function_nam:[13,14,20],fundament:17,further:[5,11,18,22,32,48,57,66,73,77,80,219],furthermor:[0,10,13,77],futher:61,futur:[6,9,10,11,22,42,57,59,72,111,146,191],g1gc:71,gain:30,galleri:23,game:[14,22],garbag:[11,58,71,73,74,116,218],garbage_collect:192,garbagecollect:146,gather:66,gaug:74,gaurante:0,gaussian:81,gc_grace_second:[11,64,208],gc_type:74,gce:[45,71],gcg:6,gcinspector:218,gcstat:146,gct:220,gcutil:220,gcviewer:218,gear:24,gen:220,gener:[0,2,4,6,8,11,12,13,14,17,22,30,32,33,36,39,40,41,42,43,45,54,57,60,70,71,72,77,80,81,82,132,168,182,211,217,218,219,220],genuin:34,geoloc:23,geospati:80,get:[0,4,6,8,24,25,27,28,32,33,35,36,38,40,41,42,45,49,50,52,53,54,56,60,62,63,64,66,67,70,72,74,75,76,83,121,122,123,126,129,146,203,205,209,217,219,220],getbatchlogreplaythrottl:146,getcompactionthreshold:146,getcompactionthroughput:146,getconcurr:146,getconcurrentcompactor:146,getconcurrentviewbuild:[18,146],getendpoint:146,getint:14,getinterdcstreamthroughput:146,getlocalhost:[6,45],getlogginglevel:[146,218],getlong:14,getmaxhintwindow:[72,146],getpartition:34,getreplica:146,getse:146,getsstabl:146,getstr:14,getstreamthroughput:146,gettempsstablepath:34,getter:[20,34],gettimeout:146,gettraceprob:146,getudtyp:64,gib:[53,54,72,91,145,195,219],gigabyt:72,gist:[4,34],git1:43,git:[5,33,36,38,40,42,43,56,218,220],gitbox:[40,43],github:[33,34,38,41,42,43,44,56,80,220],give:[0,18,20,22,28,36,42,44,52,70,82,206,218,219],giveawai:220,given:[0,6,11,12,13,14,16,22,24,26,27,28,29,32,33,42,53,57,58,59,62,63,64,66,67,69,75,76,77,79,81,82,87,89,94,96,106,119,128,132,146,152,172,179,183,187,194,204,206,208,209,210,213,214],glanc:220,global:[0,3,6,25,33,56,57,82,146,170],gms:218,gmt:22,gnupg:43,goal:[6,27,28,31,50,54,69,216],gocassa:47,gocql:47,goe:[11,27],going:[6,25,28,42,67,69,211,217,219,220],gone:[6,11],good:[6,28,34,36,42,44,45,54,57,66,70,76,82,210,216,218,219,220],googl:[3,34,82,220],gori:45,gossip:[1,6,45,56,57,72,74,78,100,110,134,146,190,218],gossipinfo:146,gossipingpropertyfilesnitch:[6,50,78],gossipstag:[60,74,218,219],got:6,gotcha:220,gp2:71,gpg:49,gpg_name:43,gpgcheck:49,gpgkei:49,grace:[73,76,83,203],gracefulli:57,grade:70,grafana:216,grai:22,grain:[27,77],grammar:[11,12,35],grant:[6,9,53,77],grant_permission_stat:12,grant_role_stat:12,granular:[4,6,11,75,116],graph:[20,83],graphit:216,gravesit:11,great:[26,37,42,66,217,218,219,220],greater:[0,6,22,45,50,78,175,176,218,220],greatest:60,greatli:[6,58],green:[22,40],grep:[4,206,208,210,218,219,220],groovi:33,group:[6,10,11,20,24,28,32,57,59,69,74,77,78,216],group_by_claus:13,grow:[22,24,80],growth:[0,3],guarante:[0,2,6,11,13,14,22,24,42,52,57,58,63,66,67,72,75,76,79,80,82,205],guard:58,guest:[23,24,25,26,27,29],guest_id:[25,29],guest_last_nam:29,gui:220,guid:[6,23,24,25,26,27,28,29,30,36,40],guidelin:[10,32,39,43,71],habit:43,hackolad:30,had:[0,3,6,9,10,24,57,64,66,75,211,217,219],half:[4,6,38,45,72],hand:[3,6,13,71,219],handi:[28,220],handl:[0,1,6,14,39,41,42,45,54,57,59,65,71,74,77,81,109,218],handler:57,handoff:[0,6,73,74,101,135,146,177],handoffwindow:[72,146],hang:42,happen:[0,6,13,34,38,42,52,66,67,72,74,78,216,218,219,220],happi:42,happili:71,hard:[3,6,14,27,28,62,65,66,71,213,218],harder:6,hardest:37,hardwar:[2,3,6,33,50,52,58,72,73,216],has:[0,1,3,4,6,10,11,12,13,14,18,20,22,25,27,28,30,32,34,42,43,45,50,54,56,57,58,59,60,61,62,64,65,66,67,70,71,72,74,75,77,78,79,81,82,83,203,207,216,218,219,220],hash:[2,4,6,32,49,60,66,72,75,76,215,220],hashcod:34,hashtabl:23,haskel:48,hasn:[0,54,59,109],have:[0,3,5,6,9,10,11,12,13,14,15,18,19,20,22,24,25,26,27,28,29,32,33,34,36,37,38,39,40,42,43,44,45,46,49,50,53,54,57,58,59,60,61,62,63,64,66,67,68,69,70,71,72,74,76,77,78,109,168,205,207,209,211,214,215,216,217,218,219,220],haven:42,hayt:47,hdd:[4,6,71],head:[36,42,57,220],header:[40,57,82],headroom:6,health:220,healthi:[50,220],heap:[4,6,40,46,52,54,56,63,64,70,71,74,218,219,220],heap_buff:6,hear:27,heartbeat:[0,6,218],heavi:[0,6,50,66,218,219,220],heavili:[50,71],held:[6,71,146,150],help:[0,5,6,10,24,25,26,28,30,35,37,42,44,49,51,53,54,61,62,64,70,72,81,83,85,146,184,207,211,212,213,214,215,216,217,218,219,220],helper:44,henc:[5,6,11,22],here:[6,23,24,26,27,28,29,35,36,38,43,44,45,47,49,50,67,70,74,77,81,219],hewitt:[23,24,25,26,27,28,29,30],hex:[12,17,130],hexadecim:[10,12,130],hibern:79,hidden:[79,220],hide:[34,39,60,83,203],hierarch:[0,20],hierarchi:[20,76],high:[0,2,6,11,32,36,43,45,50,66,67,70,71,72,80,216,218,219],higher:[0,19,20,42,63,67,70,74,75,79,148,196,218,220],highest:[67,208,209],highli:[1,3,42,45,70,71,77,218,219],highlight:[27,30],hint:[0,6,11,12,18,45,46,52,55,59,60,73,74,76,101,102,111,112,127,135,146,147,166,177,180,191,198,219],hint_delai:74,hinted_handoff_disabled_datacent:72,hinted_handoff_en:[60,72],hinted_handoff_throttl:72,hinted_handoff_throttle_in_kb:72,hintedhandoff:[6,73],hintedhandoffmanag:74,hints_compress:72,hints_creat:74,hints_directori:[46,60,72],hints_flush_period_in_m:72,hints_not_stor:74,hintsdispatch:[60,74,219],hintsfail:74,hintsservic:73,hintssucceed:74,hintstimedout:74,histogram:[4,66,72,74,146,149,194,208,218],histor:[11,27,42],histori:[27,33,34,61,88,90,146],hit:[6,67,74,220],hit_count:60,hit_ratio:60,hitrat:74,hoc:44,hold:[0,6,10,13,20,45,58,68,72,82,216,218,220],home:[0,22,56,60,81,82],honor:[6,40],hope:66,hopefulli:42,horizont:[0,56],hospit:25,host:[0,6,11,36,46,49,52,53,54,58,61,62,64,74,75,76,78,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,219,220],hostnam:[6,45,53,54,56,60,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,220],hot:[6,50,73,74,220],hotel:[23,26,27,28,29,31],hotel_id:[24,25,28,29],hotels_by_poi:[24,25,29],hotspot:[0,11],hotspotdiagnost:77,hottest:6,hour:[6,22,42,43,53,69,72],hourli:[53,54,61,109],how:[0,3,5,6,7,8,11,12,22,23,24,25,26,27,28,33,37,39,40,41,42,44,48,49,50,51,52,53,54,55,59,62,64,65,66,68,70,72,74,75,78,80,81,82,109,206,218,219,220],howev:[0,4,6,9,10,11,12,13,15,17,18,20,22,24,26,27,28,33,42,44,45,46,49,60,63,64,70,71,72,76,77,79,82],hoytech:220,html:[6,81,206],http:[6,33,34,36,38,40,43,49,56,70,74,204,205,206,207,208,209,210,211,212,214,215,220],httpadaptor:74,hub:45,hudson:33,huge:57,huge_daili:61,human:[11,53,61,65,91,145,195,219],hundr:28,hurt:11,hybrid:11,hypothet:38,iauthent:6,iauthor:6,ibm:80,icompressor:70,id1:32,id2:32,idct:[64,207],ide:40,idea:[6,14,28,30,36,41,42,44,45,54,67,68,82,219,220],ideal:[6,44,69,74,77],idealclwritelat:74,idempot:[13,22,72],idemptot:22,ident:[0,59,81],identifi:[0,3,6,9,10,11,13,14,15,16,20,21,22,23,24,25,26,28,30,32,60,72,81,216],idiomat:8,idl:6,idx:65,ieee:[17,22],iendpointsnitch:[6,78],iftop:220,ignor:[0,6,10,14,22,34,60,64,82,195,207],iinternodeauthent:6,illeg:14,illegalargumentexcept:209,illustr:[1,20,32,62,75,209],imag:[22,36,220],imagin:66,imbal:[0,50],immedi:[4,6,11,22,24,42,57,59,63,70,77,86,146],immut:[4,45,62,66,70,71],impact:[6,11,39,42,50,53,54,57,69,73,77,218,220],imper:[0,3],implement:[0,1,3,4,6,10,13,14,18,20,28,29,32,33,34,44,45,53,54,57,59,60,61,65,70,72,77,78,80],implementor:6,impli:[0,1,11,12,22,32,53,58,59,75],implic:[0,77],implicitli:[14,20],import_:82,importantli:54,impos:[6,57],imposs:[50,67],improv:[0,3,6,11,22,32,37,42,44,52,55,63,67,70,71,73,78,79,82,220],inaccur:220,inact:45,inam:210,inboundconnectioniniti:56,inboundmessagehandl:57,inc:[23,24,25,26,27,28,29,30],incast:220,includ:[0,3,4,6,10,11,12,13,18,20,22,23,24,25,26,27,28,30,32,33,34,35,36,42,43,49,53,54,56,57,58,59,60,61,62,64,66,67,68,71,72,74,75,77,80,82,83,105,162,199,203,210,216,217,218,219,220],included_categori:[53,61,105],included_keyspac:[53,61,105],included_us:[53,61,105],inclus:[42,70,161],incom:[6,57,61],incomingbyt:74,incompat:[6,10,57,60],incomplet:[39,214],inconsist:[45,72,75,76],incorrect:45,increas:[0,3,4,6,11,18,45,50,58,59,63,67,70,71,72,74,76,78,79,161,207,215,216],increment:[2,3,6,10,13,22,42,43,50,57,59,66,72,73,74,97,107,146,162,168,188,211,214],incremental_backup:[60,62],incur:[13,22,56,59,74],inde:0,indefinit:[53,61],indent:34,independ:[0,11,66,71,77,219],index:[2,4,6,9,10,11,12,13,15,22,25,32,49,52,54,58,59,60,62,64,65,66,73,82,146,152,207,213,214,218],index_build:192,index_identifi:16,index_nam:16,index_summari:192,index_summary_off_heap_memory_us:195,indexclass:16,indexedentrys:74,indexinfocount:74,indexinfoget:74,indexnam:152,indexsummaryoffheapmemoryus:74,indic:[0,3,5,6,12,13,24,26,34,42,45,56,61,64,65,70,76,161,208,209,216,218,219,220],indirectli:13,indirectori:64,individu:[0,6,10,14,22,24,28,42,43,44,57,58,71,77,207,215],induc:13,industri:25,inequ:[10,13],inet:[9,11,14,17,22,60],inetaddress:[6,45],inetaddressandport:53,inetworkauthor:6,inexpens:71,inf:56,infin:[9,10,12],inflex:50,influenc:11,info:[6,46,48,56,61,74,94,146,204,218],inform:[0,4,6,12,13,22,24,25,26,27,29,49,50,51,54,57,58,60,64,72,74,75,76,77,78,79,81,82,85,88,114,134,136,137,138,145,146,167,184,186,206,207,208,216,217],infrastructur:[42,80],ing:11,ingest:[6,72],ingestr:82,inher:[1,11,22],inherit:20,init:74,initcond:[9,14],initi:[3,6,14,18,34,39,41,43,50,57,59,60,61,64,74,75,77,79,82,146,179,207],initial_token:[60,79],inject:61,innov:80,input:[0,9,10,14,17,22,39,58,64,70,82,210,218],inputd:22,insecur:6,insensit:[11,12],insert:[0,6,9,10,11,12,14,15,16,20,22,45,48,52,54,61,62,64,71,76,77,81,82,214],insert_stat:[12,13],insertedtimestamp:204,insertstmt:64,insid:[6,11,12,13,22,34,81,82],insight:[24,30,218,219],inspect:[0,6,40,81,82,215],instabl:6,instal:[6,21,33,35,44,45,48,52,56,64,77,82,213,220],instanc:[0,10,11,12,13,14,16,18,19,20,21,22,24,33,40,44,45,57,65,66,71,74],instantan:74,instanti:10,instantli:[6,57],instead:[0,4,10,11,13,18,22,24,27,28,30,34,36,45,53,60,61,64,66,67,167,186,204,220],instrospect:217,instruct:[0,6,8,11,36,37,38,40,49,52,72,220],instrument:[35,77],insuffic:216,insuffici:220,insufici:218,int16:54,int32:54,intact:57,intasblob:13,integ:[0,10,11,12,13,17,22,28,57,65,74,211],integr:[3,30,32,41,44,49,52,57,80],intellig:0,intellij:[30,34,41],intend:[11,27,32,39,59,77,207],intens:[6,44,45],intent:[0,39,58],intention:20,inter:[6,57,58,64,78,125,146,178,207],inter_dc_stream_throughput_outbound_megabits_per_sec:58,interact:[3,30,44,51,82,220],interest:[0,23,24,26,27,54,68,77,219],interfac:[3,6,10,14,26,34,36,45,46,49,54,60,70,77,80,220],interleav:81,intern:[6,9,11,13,18,22,36,39,45,56,60,71,72,74,83,203,216,220],internaldroppedlat:74,internalresponsestag:[60,74,219],internet:6,internod:[0,6,45,52,55,58,64,77,207,216,220],internode_application_receive_queue_capacity_in_byt:57,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:57,internode_application_receive_queue_reserve_global_capacity_in_byt:57,internode_application_send_queue_capacity_in_byt:57,internode_application_send_queue_reserve_endpoint_capacity_in_byt:57,internode_application_send_queue_reserve_global_capacity_in_byt:57,internode_application_timeout_in_m:[6,57],internode_encrypt:[6,77],internode_inbound:[57,60],internode_outbound:[57,60],internode_tcp_connect_timeout_in_m:6,internode_tcp_user_timeout_in_m:6,internodeconnect:[132,182],internodeus:[132,182],interpret:[6,10,22,30,82],interrupt:[45,54],intersect:0,interv:[4,6,9,24,74,77,81,208],intra:[6,57,74,78,81],intrins:22,introduc:[0,1,6,10,17,24,28,37,54,57,59,61,72,79,214],introduct:[10,20,31,44,52],introspect:220,intrus:206,inttyp:64,intvalu:14,invalid:[0,6,13,20,39,57,64,77,137,139,140,141,146,209,215,219],invalidatecountercach:146,invalidatekeycach:146,invalidaterowcach:146,inventori:28,invert:81,invertedindex:21,investig:[6,41,217,218,219,220],invoc:[14,54],invoic:27,invok:[14,38,54,58,64,77,200],involv:[0,1,6,13,23,24,28,32,36,57,58,67,70,75,76,77,214,218,220],ioerror:34,ios:220,ip1:6,ip2:6,ip3:6,ip_address:84,ipaddressandport:57,ipartition:64,ipv4:[6,17,22,45,57],ipv6:[6,17,22,57],irolemanag:6,irrevers:[11,22],irrevoc:57,is_avail:[28,29],isn:[0,18,34,42,45],iso8601:[61,204],iso:22,isol:[1,6,11,13,57,74,216,217,219],issu:[0,6,20,28,33,35,36,37,38,42,43,44,45,49,50,54,55,57,63,66,70,161,204,205,206,207,208,209,210,211,212,214,215,216,218,219,220],item:[12,22,23,24,25,33,39,40,42,54],iter:[0,6,25,58,209],its:[0,1,4,6,11,12,13,14,22,24,26,40,45,49,53,54,56,57,58,61,62,64,66,67,69,72,74,77,78,79,80,81,205,209],itself:[6,11,16,27,45,53,57,66,72,79,219],iv_length:6,jaa:77,jacki:38,jamm:40,januari:22,jar:[14,34,35,40,44,56,74],java7:77,java8_hom:40,java:[3,6,14,21,22,34,40,42,48,49,52,54,55,65,69,71,74,77,146,184,209,217,218,220],java_hom:[56,220],javaag:40,javac:56,javadoc:[33,34,39,49],javas:6,javascript:[6,14],javax:77,jbod:71,jce8:6,jce:6,jcek:6,jconsol:[52,66,77],jdbc:30,jdk11:56,jdk:[6,33,56],jdwp:40,jeff:[23,24,25,26,27,28,29,30],jenkin:[35,41,52],jetbrain:40,jira:[5,6,35,37,39,41,42,44,65,70,204,205,206,207,208,209,210,211,212,214,215],jks:81,jkskeyprovid:6,jmap:220,jmc:[66,77],jmx:[6,18,20,52,53,54,57,60,62,64,73,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],jmx_password:77,jmx_user:77,jmxremot:77,jni:56,job:[42,76,86,116,159,161,168,199],job_thread:161,john:[13,22],join:[3,6,8,13,32,52,54,56,69,76,77,79,146,218,219],joss:13,journal:[62,64],jpg:22,jre:56,jre_hom:56,jsmith:22,json:[9,10,13,15,52,64,66,70,90,195,197,204],json_claus:13,jsonz:62,jsr:[6,14],jsse:6,jsserefguid:6,jstackjunit:44,jstackjunittask:44,judgement:34,jul:[56,220],jump:56,junit:[33,34,35,40,44],junittask:44,jurisdict:6,just:[0,6,14,20,24,25,27,37,40,42,44,45,53,57,60,65,66,76,77,81,216,220],jvm:[6,21,40,45,46,50,56,69,73,77,79,217,218],jvm_extra_opt:40,jvm_opt:[46,77],jvmstabilityinspector:39,k_i:28,kashlev:30,kbp:72,keep:[0,6,8,11,25,27,28,32,34,37,42,45,50,57,58,61,64,66,67,74,75,83,137,203,214,216,219,220],keepal:[6,45],kei:[0,3,4,6,9,10,11,13,14,17,22,24,25,26,27,28,29,30,32,44,45,49,50,54,57,58,60,62,64,65,66,67,70,71,72,74,75,76,77,80,81,83,86,124,128,130,140,146,150,170,171,195,203,208],kept:[1,4,6,32,64,66,67,70,74,214],kernel:[6,45,65,220],key_alia:6,key_id:43,key_password:6,key_provid:6,keycach:74,keycachehitr:74,keyserv:43,keyspac:[0,3,6,9,10,12,14,15,16,20,22,25,28,29,48,52,54,55,59,61,63,66,70,72,73,76,77,79,81,82,83,86,87,89,94,96,105,106,115,116,119,124,128,130,137,146,148,150,151,152,153,159,161,167,168,172,185,186,187,194,195,196,199,200,202,203,204,205,206,207,210,211,212,213,214,215,218,219],keyspace1:[20,60,64,205,207,208,209,210,211,213,218],keyspace_definit:81,keyspace_nam:[11,14,20,22,54,60,64,66,76,218],keystor:[6,64,77,207],keystore_password:6,keystorepassword:77,keytyp:208,keyvalu:60,keyword:[10,11,13,14,15,16,17,22],kib:[54,62,72,76,91,145,195,219],kick:[146,163],kill:6,kilobyt:70,kind:[0,3,11,12,22,33,42,57,60,62,65,66,75,216,219],kitten:22,knife:[146,184],know:[0,4,6,11,13,22,24,34,37,43,58,60,66,210,218,219,220],knowledg:37,known:[0,6,20,22,24,27,28,32,47,51,59,63,66],krumma:44,ks_owner:77,ks_user:77,kspw:[64,207],ktlist:[62,185],kundera:47,label:[22,33],lack:[74,218,219],lag:74,laid:27,land:70,landlin:22,lang:[52,56,74,77,209,220],languag:[3,6,9,10,12,14,21,22,32,36,47,51,52,82],larg:[0,1,3,6,11,13,14,22,31,33,44,52,57,59,64,67,71,74,77,80,82,204,210,212,216,218,219,220],large_daili:61,larger:[0,6,44,45,50,54,67,70,71],largest:[6,74],last:[0,6,12,13,14,15,19,36,50,54,56,66,67,74,84,146,208,209,210,216,218,220],last_nam:29,lastli:[13,22],lastnam:13,latenc:[0,1,3,6,11,32,45,50,53,54,59,60,70,74,78,80,81,217,218],latent:[0,216,220],later:[0,11,22,26,28,34,36,42,45,72],latest:[0,6,32,33,43,49,66,75,82,200,206,218],latest_ev:81,latter:[12,27],launch:0,law:27,layer:71,layout:[0,11,36],lazi:11,lazili:11,lead:[0,6,10,11,22,24,50,66,69,218,220],leak:54,learn:[6,23,24,26,44,45,50,82],least:[0,4,6,11,12,13,18,36,45,59,64,66,71,76],leav:[0,6,12,13,24,34,44,45,54,57,76,82,216,218,219],left:[6,17,19,49,67,214],legaci:[4,6,20,81],legal:10,len:57,length:[4,6,10,11,17,22,39,50,57,58,66],lengthier:42,less:[0,4,6,22,35,42,45,49,57,58,63,70,71,212,215,218,219,220],let:[6,23,24,25,26,27,28,37,43,67],letter:17,level:[1,4,6,10,11,13,19,20,34,39,46,58,59,60,61,64,66,70,71,72,73,74,77,82,83,126,137,146,179,203,208,209,211,216,218,219],leveledcompactionstrategi:[11,58,63,66,67,69,206,209,219],leverag:[30,50],lexic:45,lib:[4,6,21,35,39,40,44,49,56,204,205,206,207,208,209,210,211,212,213,214,215,220],libqtcassandra:47,librari:[8,39,41,44,47,74,82],licenc:39,licens:[35,39,40,42],lie:216,lies:216,lieu:75,life:42,lifespan:71,light:57,lightweight:[0,2,3,27,32,59,83],like:[0,3,6,11,12,13,14,17,22,25,26,27,28,34,36,38,39,42,44,45,49,52,57,59,60,64,66,67,69,70,71,72,75,76,77,209,210,211,216,217,218,220],likewis:20,limit:[0,1,4,6,9,10,11,20,22,28,32,45,49,50,54,55,58,65,66,70,77,81,218,220],line:[6,12,24,27,34,42,43,44,46,50,51,54,56,57,62,64,65,77,83,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,207,209,214,218],linear:[0,3,71],lineariz:2,linearli:63,link:[6,8,11,12,42,44,54,57,62,65,213],linux:[6,36,43,45,49,50,56,217,218,220],list:[1,3,4,5,6,9,10,11,12,13,14,17,24,26,27,29,32,33,35,36,40,41,42,43,44,46,49,51,52,53,54,55,56,57,58,61,64,65,66,72,74,76,77,79,81,82,83,84,86,87,88,89,94,96,102,105,106,112,115,116,119,121,124,128,129,130,132,136,137,143,144,146,148,151,152,153,156,159,160,161,162,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,205,207,208,209,210,215],list_liter:[13,22],list_permiss:53,list_permissions_stat:12,list_rol:53,list_roles_stat:12,list_us:53,list_users_stat:12,listarg:14,listen:[6,49,52,56,74,220],listen_address:[46,51,52],listen_interfac:46,listsnapshot:[62,146,213],liter:[10,12,14,17,20,30,82],littl:[34,54,216,219],live:[0,3,13,30,36,52,66,74,79,208,213,214,218,220],live_scan:60,livediskspaceus:74,liveness_info:204,livescannedhistogram:74,livesstablecount:74,load:[0,3,6,11,21,22,37,50,52,56,58,72,73,74,76,77,78,79,81,83,138,146,153,161,186,203,216,219,220],loader:[64,207],loadm:207,local:[0,1,3,4,6,11,20,35,40,41,42,44,51,53,54,55,60,62,71,72,74,77,78,82,146,155,161,165,198,208,216,217,218,220],local_jmx:77,local_on:[0,75,77,82,216,219],local_quorum:[0,72,75,82,216,220],local_read:60,local_read_count:195,local_read_lat:60,local_read_latency_m:195,local_scan:60,local_seri:82,local_writ:60,local_write_latency_m:195,localhost:[6,49,51,54,56,60,61,77],localpartition:60,locat:[6,24,26,32,43,48,49,61,70,74,77,78,82,192,207,216,218,220],lock:[6,45,74,220],log:[0,3,4,6,11,13,36,39,43,44,48,49,52,55,57,59,60,65,73,74,77,81,83,95,99,105,109,126,146,161,164,179,192,203,217,220],log_al:66,log_dir:54,logback:[46,61,218],logdir:[53,61],logger:[34,46,55,61,105],loggernam:61,logic:[0,6,21,25,28,30,31,52,57,59,218,219],login:[6,9,20,43,44,53,54,56,61,77,83,216],login_error:53,login_success:53,logmessag:[53,61],lol:22,longer:[6,9,10,11,30,43,45,54,57,58,66,79,86,146,211,214,216],longest:218,look:[0,6,12,24,25,28,33,36,37,38,42,44,57,66,67,71,75,76,209,211,216,218,220],lookup:[26,32,74],loop:[34,57],lose:[0,4,6,27,57,66,79,214],loss:[0,6,22,50,57,69,76,220],lost:[1,58,61,62,66,79,211],lot:[6,24,27,28,36,51,52,54,66,76,83,203,212,218,219,220],low:[0,3,6,42,80,146,148,220],lower:[0,4,6,11,12,13,20,45,63,64,66,67,70,74,79,216,218],lowercas:12,lowest:[6,42,67,72,208],lsm:[0,219,220],lucen:52,luckili:217,lwt:[0,32,59],lww:0,lz4:[4,6,57,70,72],lz4_compressor_typ:70,lz4_high_compressor_level:70,lz4compressor:[4,6,11,50,64,70,72,208],lz4hc:[6,70],mac:220,macaddr:9,machin:[6,11,44,45,54,62,74,77,78,79,208,217,220],made:[0,3,6,22,32,52,53,56,57,58,59,63,71,72,75,77,218],magazin:[32,62,64],magazine_nam:32,magazine_publish:32,magic:57,magnet:[4,6],magnitud:13,mai:[0,1,4,6,9,10,11,13,14,16,17,18,20,22,25,26,27,28,32,33,35,36,39,40,42,44,45,50,53,54,56,57,58,59,60,61,62,63,64,66,69,70,72,74,75,77,78,79,81,82,168,209,211,216,217,218,219,220],mail:[5,37,42,43,52],main:[0,6,14,18,40,43,45,48,49,54,56,62,64,70,77,82,209,216,218],main_actor:13,mainli:[6,58],maintain:[0,6,11,23,37,42,57,72],mainten:[0,72,74],major:[0,4,10,36,42,49,66,69,75,77,89,146,213,219],make:[0,1,6,8,9,21,22,23,24,25,27,28,29,30,32,33,34,35,36,37,40,42,44,45,46,54,57,59,60,64,66,67,69,70,75,76,77,79,81,82,143,146,204,218,220],malform:216,malfunct:57,malici:77,man:6,manag:[0,3,6,20,24,25,27,28,30,33,36,40,41,43,44,52,59,60,74,77,79,83,85,146,203],mandatori:[3,11,14,64],mani:[0,3,6,11,23,24,25,27,30,34,39,42,54,59,60,64,66,67,68,70,71,72,74,75,77,81,82,83,86,89,96,106,109,115,116,161,168,187,199,200,203,209,215,216,219,220],manifest:[62,64,83,203],manipul:[3,12,15,18,44,52,204],manual:[6,25,35,38,45,214,220],map:[0,3,6,9,10,11,13,14,17,20,23,29,32,50,52,65,74,218,220],map_liter:[11,16,20,22],mar:22,mark:[0,6,20,24,42,54,66,76,79,100,146,208,210,214],marker:[4,6,11,12,39,45,214],markup:36,marshal:208,mashup:23,massiv:[37,50,220],master:[2,3],match:[0,4,6,12,13,14,17,20,74,75,78,208,213],materi:[0,3,6,10,11,12,15,23,24,25,26,27,28,29,30,31,52,59,60,74,82,146,202],materialized_view_stat:12,math:72,matrix:55,matter:[11,45,75,220],maven:35,max:[4,6,11,52,54,56,57,60,66,72,74,77,81,82,109,119,127,146,161,172,180,208,211,218,219],max_archive_retri:[53,54],max_hint_window_in_m:[72,79],max_hints_delivery_thread:72,max_hints_file_size_in_mb:72,max_index_interv:64,max_log_s:[53,54,61,109],max_m:60,max_map_count:45,max_mutation_size_in_kb:[4,6,45],max_partition_s:60,max_queue_weight:[53,54,61,109],max_threshold:[64,66],maxattempt:82,maxbatchs:82,maxfiledescriptorcount:74,maxfiles:61,maxhintwindow:180,maxhistori:61,maxim:71,maximum:[4,6,14,50,53,54,57,60,61,63,72,74,82,109,121,146,168,174,208,211,212,216,218,219],maximum_live_cells_per_slice_last_five_minut:195,maximum_tombstones_per_slice_last_five_minut:195,maxinserterror:82,maxldt:205,maxoutputs:82,maxparseerror:82,maxpartitions:74,maxpools:74,maxrequest:82,maxrow:82,maxt:205,maxtasksqueu:74,maxthreshold:172,maxtimestamp:4,maxtimeuuid:10,mayb:13,mbean:[6,20,66,74,77],mbeanserv:20,mbit:[64,207],mbp:[6,58,72],mct:6,mean:[0,6,9,11,12,13,14,17,18,22,27,52,54,59,64,66,67,70,72,74,75,78,81,82,161,216,217,218,219,220],meaning:[13,58],meanpartitions:74,meant:[0,22,45,72,74],measur:[6,24,28,32,39,42,44,57,70,74,79,81,82,220],mebibyt:60,mechan:[0,53,65],media:[23,24,25,26,27,28,29,30],median:[60,74,218],medium:220,meet:[0,1,3,6,39,75,77],megabit:[72,207],megabyt:[6,72,212,219],mem:220,member:[0,34,43,77,81],membership:[1,2,6],memlock:45,memor:54,memori:[0,4,6,11,50,52,53,54,57,61,63,65,67,70,73,80,215,218,220],memory_pool:74,memtabl:[2,6,11,62,63,64,65,68,69,70,71,74,185,218,220],memtable_allocation_typ:4,memtable_cell_count:195,memtable_cleanup_threshold:4,memtable_data_s:195,memtable_flush_period_in_m:[11,64],memtable_off_heap_memory_us:195,memtable_switch_count:195,memtablecolumnscount:74,memtableflushwrit:[60,74,219],memtablelivedatas:74,memtableoffheaps:74,memtableonheaps:74,memtablepool:6,memtablepostflush:[60,74,219],memtablereclaimmemori:[60,74,219],memtableswitchcount:74,mention:[6,22,27,42,56,74,77,207,216],menu:[0,40],mere:34,merg:[0,36,38,42,43,63,68,70,71,73,75,220],mergetool:38,merkl:[0,6,74,76],mess:[42,44],messag:[0,1,6,22,33,36,39,42,52,53,54,55,56,58,59,61,64,72,74,75,76,77,207,211,212,213,214,215,216,218,219],messagingservic:57,met:13,meta:[13,56,74,81],metadata:[0,4,20,28,58,70,71,74,83,203,211,214,215,218],metal:6,meter:74,method:[10,13,14,20,34,37,39,40,44,48,52,54,56,64,77,81],methodolog:30,metric:[6,55,60,72,73,81,217,219,220],metricnam:74,metricsreporterconfigfil:74,mib:[53,54,91,145,195],micro:219,microsecond:[6,11,13,22,74,208,219],microservic:25,midnight:22,midpoint:0,might:[0,6,11,13,23,24,25,26,27,28,54,62,64,66,67,74,75,84,86,87,89,94,96,102,106,109,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,215,216,219],migrat:[6,50,60,74,78],migrationstag:[60,74,219],milli:4,millisecond:[4,6,10,11,22,28,54,74,148,168,196,208,211,219,220],min:[4,6,11,45,65,66,74,81,82,119,146,172,208,218,219],min_index_interv:64,min_sstable_s:68,min_threshold:[64,66],minbatchs:82,mind:28,minim:[6,25,27,54,59,69,71],minimum:[6,11,14,28,32,46,74,76,206,208],minlocaldeletiontim:208,minor:[10,12,36,54,73,75],minpartitions:74,mint:205,minthreshold:172,mintimestamp:208,mintimeuuid:10,minttl:208,minu:28,minut:[6,22,27,53,54,56,61,66,69,72,74,77,81,109],mirror:[36,49],misbehav:[52,66,217],misc:[132,182],miscelen:74,miscellan:6,miscstag:[60,74,219],mismatch:[0,6,54,57,75],misrepres:211,miss:[11,18,33,35,66,72,74,76,79,214,220],misslat:74,misspel:206,mistak:50,mistaken:[54,62,64,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202],mistun:218,mit:43,mitig:[6,77],mix:[6,49,50,54,66,69,81,220],mkdir:[53,54,56,64,218],mmap:45,mnt:16,mock:44,mode:[4,6,49,53,54,62,64,70,77,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,216],model:[3,11,15,20,26,27,29,42,52,56,58,77,81,220],moder:28,modern:[71,72,220],modestli:6,modif:[13,20,218],modifi:[6,9,10,11,14,20,22,27,32,42,60,63,69,70,210],modification_stat:13,modul:82,modular:39,modulo:0,moment:[6,42],monitor:[45,49,52,53,73,77,78,85,146,216,220],monkeyspeci:[11,18],monkeyspecies_by_popul:18,monoton:[0,11,59,73],month:[22,28,56],monument:23,more:[0,1,4,6,10,11,12,13,22,23,24,25,28,29,32,34,36,37,42,44,46,49,50,51,52,53,54,56,57,58,59,60,61,62,63,64,67,69,70,71,72,73,74,75,77,78,79,81,83,89,115,116,146,148,161,168,184,196,200,203,208,209,215,217,219,220],moreov:[13,64],most:[0,1,6,11,12,13,22,24,27,28,36,37,40,42,44,45,46,49,50,54,57,60,61,66,67,68,70,71,72,74,75,77,82,88,146,196,208,209,216,218,219,220],mostli:[4,6,11,22,25,66,217,218],motiv:[44,69],mount:[6,220],move:[0,6,28,42,45,52,54,59,65,73,74,76,146,211,214,219],movement:[0,58,59,73,218],movi:[13,22],movingaverag:6,msg:61,mtime:[11,210],mtr:220,much:[0,5,6,11,23,24,27,28,50,58,60,63,64,65,66,67,68,69,70,78,207,216,218,220],multi:[1,2,3,6,12,39,55,62,72,218,220],multilin:41,multipl:[1,4,6,10,11,12,13,14,19,22,23,24,27,30,32,34,39,40,42,45,46,50,54,58,69,71,72,75,78,81,82,83,151,203,204,216,219],multipli:[28,67],multivari:80,murmur3:4,murmur3partit:4,murmur3partition:[6,14,64,82,208],museum:23,must:[0,1,3,4,6,10,11,13,14,17,18,20,24,27,28,32,34,35,40,42,43,44,45,46,50,53,54,56,57,58,59,62,64,66,69,70,74,75,77,79,81,82,185,203,204,205,206,207,208,209,210,211,212,213,214,215],mutant:16,mutat:[0,4,6,13,45,57,60,65,72,74,75,200,219],mutatedanticompactiongaug:74,mutationsizehistogram:74,mutationstag:[60,74,219],mv1:18,mvn:35,mx4j:74,mx4j_address:74,mx4j_port:74,mx4jtool:74,mxbean:20,myaggreg:14,mycolumn:17,mydir:82,myevent:13,myfunct:14,myid:12,mykei:17,mykeyspac:[14,50],mytabl:[11,14,17,19,21],mytrigg:21,n_c:28,n_r:28,n_v:28,nairo:22,naiv:0,name:[4,6,9,10,11,12,13,14,16,17,18,20,21,22,24,26,29,30,32,33,36,39,40,42,43,44,45,46,49,53,54,60,61,62,74,76,77,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,206,207,210,213,216,218,219,220],names_valu:13,nan:[9,10,12,60],nano:[57,60],nanosecond:[22,74],narrow:[216,218,219,220],nathan:13,nativ:[4,6,10,12,15,17,30,39,45,51,53,54,56,60,61,64,74,82,98,108,146,152,189,207,219,220],native_transport_port:46,native_transport_port_ssl:77,native_typ:22,natur:[11,22,26,34,57,66,69,70,72,220],navig:36,nbproject:40,ncurs:220,nearli:40,neccessari:6,necessari:[6,11,14,20,27,42,53,54,57,61,70,77,204,208,211],necessarili:[6,12,25,46],need:[0,1,4,6,10,11,12,13,20,22,23,24,25,26,27,28,32,33,34,39,40,42,43,44,45,46,51,53,54,57,58,59,60,61,62,63,64,66,67,70,71,72,75,76,77,78,80,82,124,128,207,212,213,215,219,220],needlessli:0,neg:[6,50],negat:[19,77],neglig:[13,220],neighbor:[0,58,216],neighbour:66,neither:[6,18,22,77],neon:40,nerdmovi:[13,16],nest:[12,13,34],net:[6,40,45,48,61,77],netbean:[30,41],netstat:[79,146],netti:[6,55,57],network:[0,1,6,13,45,50,57,58,62,71,72,75,76,77,78,145,146,149,218],network_author:20,network_permiss:6,networktopologystrategi:[48,59,77,81],never:[0,6,10,11,12,13,14,22,27,34,45,50,57,59,66,67,76,209],nevertheless:[13,32],new_rol:20,new_superus:77,newargtuplevalu:14,newargudtvalu:14,newer:[0,28,66,69,71,82,116,205,220],newest:[11,69,205],newli:[0,11,20,22,42,65,146,153],newreturntuplevalu:14,newreturnudtvalu:14,newtuplevalu:14,newudtvalu:14,newvalu:64,next:[0,6,24,45,51,54,56,59,60,62,64,67,68,72,75,82,217,218],ngem3b:13,ngem3c:13,nic:[56,220],nice:[28,57],nid:220,nifti:38,night:[24,28],ninth:0,nio:[6,14,55,58,74],nntp:220,node:[1,3,4,6,11,13,14,21,22,27,28,32,39,44,46,47,50,51,52,53,54,55,56,57,59,60,61,62,63,64,65,66,67,71,73,74,75,76,78,80,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,208,209,217,218,219,220],nodej:48,nodetool:[3,4,6,18,49,52,54,55,56,58,59,60,62,63,70,73,76,77,79,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,210,213,215,216,217,218,220],nois:[6,218],noiser:218,noisi:218,nologin:9,nomenclatur:0,nomin:28,non:[0,6,9,10,11,12,13,14,20,22,25,45,56,57,58,60,63,66,70,74,77,82,208,211],nonblockingbufferhandl:57,none:[0,1,6,11,13,22,59,60,62,64,74,76,77,208],nonsens:20,noopauditlogg:53,nor:[6,11,18,22,49],norecurs:[9,20],norm:74,normal:[1,11,14,17,20,27,32,40,43,45,49,54,56,59,60,70,72,74,76,81,82,216,218,219,220],nosql:[3,30,80],nosuperus:[9,20],notabl:[14,17,49],notat:[10,12,13,24,25,28,82],note:[0,4,5,6,10,11,12,13,14,15,17,20,22,24,25,38,41,42,43,45,49,50,56,57,59,64,66,70,77,203,204,205,206,207,208,209,210,211,212,213,214,215,218,220],noth:[6,11,14,38,44,45,205],notic:[6,24,29,58,77,219,220],notif:8,notifi:57,notion:[0,11,12],notori:[0,23],noun:27,now:[10,24,25,26,29,33,34,36,40,50,66,79,220],nowinsecond:54,npercentil:11,ntp:[0,6],nullval:82,num_cor:82,num_token:[50,79],number:[0,1,4,6,10,11,12,13,14,15,17,18,22,24,25,26,27,28,29,32,33,40,42,43,44,45,50,53,54,55,57,59,61,62,63,64,66,67,69,70,72,74,75,77,79,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,208,212,216,217,219,220],number_of_cor:6,number_of_dr:6,number_of_partitions_estim:195,numer:[3,15,19,33,63,81],numprocess:82,numsampl:218,ny229:25,object:[1,3,6,11,12,39,55,56,57,58,64,204],objectnam:20,observ:34,obsolet:[6,71,74,215],obtain:[12,24,26,28,62,64,77,220],obviou:[14,38,49],obvious:[11,23],occasion:[76,219],occup:[13,220],occupi:[6,57,74],occur:[0,6,10,12,13,21,22,45,56,57,66,71,74,76,203,204,205,206,207,208,209,210,211,212,213,214,215,220],occurr:22,octet:[6,78],odbc:30,oddli:6,off:[0,4,6,25,45,50,57,65,70,74,77,82,146,163,220],off_heap_memory_used_tot:195,offer:[0,15,44,54,70],offheap:[63,71],offheap_buff:6,offheap_object:6,offici:[36,42,52,80,82],offset:[4,11,58,65,74],often:[0,6,11,12,25,26,27,28,34,36,37,42,44,45,53,54,61,64,66,70,71,72,76,77,78,82,109,209,216,219,220],ohc:6,ohcprovid:6,okai:34,old:[0,4,6,54,57,60,66,69,79,83,103,113,146,203,214,220],older:[4,6,14,40,49,66,71,75,82,205,213],oldest:[4,6,11,53,54,61,205],omit:[4,6,10,11,13,17,22,24,54,72,179],onc:[0,4,6,11,12,14,22,25,28,29,33,38,40,42,44,45,50,54,57,62,65,66,67,70,71,74,76,77,79,81,82,209,216],one:[0,1,4,6,9,10,11,12,13,14,17,18,20,22,24,25,27,28,32,34,37,40,42,44,46,49,50,52,53,54,56,57,58,59,61,62,63,64,66,67,68,71,72,74,75,76,77,78,79,82,83,86,89,96,106,115,116,132,146,161,168,182,185,187,199,200,203,204,208,211,213,214,216,218,219,220],oneminutecachehitr:74,ones:[6,11,12,13,14,18,20,54,74,209],ongo:[37,67,74,79],onli:[0,4,6,9,11,12,13,14,17,18,20,22,24,26,27,28,32,34,36,42,43,44,46,49,50,52,54,56,57,58,59,60,62,63,64,65,66,67,68,70,71,74,75,76,77,78,79,81,82,83,161,185,195,203,205,207,210,211,212,213,215,216,219,220],onlin:[3,82],only_purge_repaired_tombston:66,onto:[0,4,66],open:[0,3,5,6,20,33,37,41,43,56,58,64,77,78,80,207,220],openfiledescriptorcount:74,openhft:61,openjdk:[49,56],openssl:56,oper:[0,1,2,6,10,11,12,13,15,16,18,20,22,27,34,41,49,50,52,53,54,55,57,59,60,61,62,63,64,65,66,71,72,74,76,77,79,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,206,213,214,216,217,218,219,220],operand:19,operatingsystem:74,operationtimedoutexcept:216,opertaion:6,oplog:214,opnam:81,opportun:[36,63],oppos:[50,57],ops:[45,81],opt:14,optim:[0,4,6,11,12,13,28,32,45,55,58,59,66,67,69,71,79,208,218],optimis:[76,161],option1_valu:20,option:[0,3,4,6,9,10,11,12,13,14,16,20,22,24,27,28,30,36,40,42,44,45,49,50,55,56,57,58,60,62,70,71,73,75,77,79,81,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,207,208,209,210,211,212,213,214,215,218,219,220],oracl:[6,49,77,220],order:[0,1,4,6,9,10,11,14,18,22,24,25,26,27,28,29,32,34,42,45,54,59,60,63,64,65,69,78,79,81,82,208],ordering_claus:13,orderpreservingpartition:6,ordinari:6,org:[6,14,21,33,34,35,36,40,43,44,45,49,56,61,64,66,70,74,77,204,205,206,207,208,209,210,211,212,214,215,218],organ:[3,4,24,27,32,33,40,47],orgapachecassandra:43,orient:3,origin:[0,4,9,28,36,38,42,53,65,72,168,209,211,212,213],orign:13,os_prio:220,osx:36,other:[0,3,4,6,10,11,12,13,14,18,20,22,23,24,25,27,29,30,32,35,36,37,38,40,42,46,49,52,57,58,61,62,63,64,66,67,69,71,72,73,74,75,77,78,79,146,151,162,205,208,209,214,216,217,218,219,220],other_rol:20,otherwis:[0,6,9,12,13,16,22,24,58,72,76,121,205,216],our:[5,6,8,33,36,37,38,40,43,57,59,66,220],ourselv:38,out:[1,2,3,4,6,11,12,24,25,27,30,34,35,37,40,42,43,54,58,66,67,68,69,74,75,76,77,78,79,80,161,204,205,216,219,220],outag:0,outbound:[6,58,60],outboundtcpconnect:6,outdat:75,outgo:[6,57,220],outgoingbyt:74,outlin:[33,77],outofmemori:64,outofmemoryerror:52,output:[14,20,39,40,43,49,53,54,56,58,59,63,64,66,68,76,81,82,83,89,90,195,197,203,208,211,212,214,215],output_dir:64,outputdir:64,outsid:[11,21,22,57],outstand:[214,219],oval:23,over:[0,3,4,6,11,22,24,27,32,45,50,54,57,58,59,66,67,74,76,77,78,79,81,209,211,214],overal:[14,57],overflow:[17,83,168,203],overhead:[6,32,45,50,55,59,70,74,79],overidden:77,overlap:[0,66,67,209],overli:[27,28],overload:[6,14,45,207],overrid:[6,11,33,34,53,64,72,77,79,82,168,207,211],overridden:[6,11,54,60,61],overs:75,overview:[2,52,73],overwrit:[24,61,70,71,72,77],overwritten:[74,116],own:[0,11,12,14,22,37,41,42,45,49,54,56,57,64,66,70,74,76,77,80,81,124,130,137,146,200,209,219],owner:22,ownership:[0,66,167],ownersip:218,p0000:22,p50:219,p50th_m:60,p99:[11,220],p99th:11,p99th_m:60,pacif:22,packag:[33,40,44,45,46,48,51,82,218],packet:[6,218],page:[6,22,33,36,37,40,44,45,50,71,74,80,83,217,219],paged_rang:219,paged_slic:74,pages:82,pagetimeout:82,pai:[34,35,43],pair:[6,11,20,22,57,62,69,77],pane:30,paper:[0,50],parallel:[18,44,55,66,67,76,161,219],param:[11,57],paramet:[4,6,11,14,32,33,34,39,40,46,54,57,61,63,70,71,78,79,146,179],parameter:33,params:57,paranoid:6,parent:[35,207],parenthes:[29,32,53],parenthesi:[11,81,82,216],park:23,parnew:71,pars:[6,12,53,57,61,65,82,220],parser:[9,10],part:[3,5,6,11,13,14,18,22,24,25,26,28,35,39,40,42,44,45,49,64,77,78,79,82,207,216],parti:[39,52,74,204],partial:[4,11,28,75,214],particip:[0,21,57],particular:[0,6,11,12,13,14,17,20,22,24,32,45,57,62,71,74,77,216,218,219,220],particularli:[12,22,49,77,218,219,220],partit:[1,2,3,4,6,10,11,13,14,24,27,29,30,31,45,50,54,58,60,62,63,64,66,67,71,72,74,75,81,116,124,128,146,168,196,204,208,216,218,219],partition:[4,10,13,14,60,64,76,82,93,146,161,208],partition_kei:[11,13],partitionspercounterbatch:74,partitionsperloggedbatch:74,partitionsperunloggedbatch:74,partitionsvalid:74,partli:13,pass:[39,42,46,82,184,207,208,219,220],password:[6,9,13,20,53,54,60,62,64,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207],password_a:20,password_b:20,passwordauthent:[6,77],passwordfilepath:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],past:[24,74],patch:[10,13,33,34,36,38,39,41,44,52,58],path1:[53,54,61],path2:[53,54,61],path:[0,5,6,16,27,39,53,54,56,58,59,62,63,64,69,70,71,74,77,80,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,205,207,211,213,215,218,220],pathn:[53,54,61],patter:20,pattern:[6,11,20,22,23,29,30,31,32,61,216,219,220],paus:[6,45,72,146,147,218,220],pausehandoff:[72,146],pauser:[53,54],paxo:[1,13,55,74,82],paxos_prepar:57,payload:57,payloads:57,pcap:220,peak:[207,220],peer:[0,6,20,50,54,56,57,74,220],peerip:74,pem:[54,56],penalti:[6,13,50],pend:[6,55,57,60,66,72,74,76,146,160,219],pending_flush:195,pending_task:60,pendingcompact:74,pendingflush:74,pendingrangecalcul:[60,74,219],pendingtask:74,pendingtasksbytablenam:74,pennsylvania:22,peopl:[36,42,45],per:[4,6,10,11,13,28,32,34,38,42,43,45,54,57,59,63,64,65,66,67,68,69,70,72,74,76,77,81,82,146,169,177,204,207,214,216,218,219,220],per_second:60,percent:74,percent_repair:195,percentag:[6,74,78,220],percentil:[11,74,216,219,220],percentrepair:74,perdiskmemtableflushwriter_0:[60,74,219],perf:220,perfdisablesharedmem:220,perfect:14,perfectli:[27,57],perform:[0,1,3,6,11,13,20,22,24,25,27,28,30,32,37,38,39,41,42,45,46,50,53,55,58,59,60,61,63,66,67,70,71,72,74,75,76,77,78,82,161,218,219,220],perhap:[24,28,57,216,218],period:[0,4,6,33,53,57,59,71,74,76,77,79,146,148,220],perl:48,perman:[11,45,66,71,72,218],permiss:[6,9,12,23,24,25,26,27,28,29,30,44,49,53,54,64,77],permit:[6,20,54,57,58,65,77],persist:[0,4,45,56,63,65,71,77,220],person:220,perspect:[24,26,45],pet:22,petabyt:[0,1],peter:23,pgp:43,phantom:47,phase:[79,82,219],phi:[0,6],phone:[13,22,25,29],phone_numb:29,php:48,physic:[6,11,28,29,30,31,45,52,71,78],pib:0,pick:[6,38,42,45,58,66,67,70,77,79,81,151],pickl:50,pid:[45,220],piec:[0,12,66,74],pile:6,pin:[6,78],ping:[42,220],pkcs5pad:6,place:[5,6,16,21,23,32,34,38,42,54,65,69,74,76,77,82,146,153,207,212,218,220],placehold:[14,82],plai:[14,22],plain:4,plan:[11,28,38,42,50],plane:[0,36],platform:[20,33,49,80],platter:[6,71],player:[14,22],playorm:47,pleas:[5,6,11,13,14,22,33,34,36,40,42,43,44,45,56,61,77,81,215],plu:[14,28,66,74,219],plug:[6,28,33],pluggabl:[0,20,77],plugin:[30,52,74],pmc:43,poe:22,poi:[26,29],poi_nam:[24,29],point:[0,3,4,6,10,17,22,23,24,26,27,30,34,36,40,43,50,52,57,59,70,72,77,81,82,124,146,207,216,220],pointer:[14,58],pois_by_hotel:[24,29],polici:[6,11,42,43,77,200,216],poll:77,pom:41,pool:[6,43,56,74,76,146,174,197,219,220],poorli:24,pop:81,popul:[11,18,81],popular:[23,24,30,40,49,71],port:[0,6,40,46,52,53,54,60,61,62,64,74,77,81,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207,220],portabl:62,portion:[0,24,50,71,82,212],posit:[0,4,6,10,11,19,22,58,63,74,79,204,208],possbili:6,possess:20,possibl:[0,1,6,10,11,13,14,17,20,22,28,33,39,42,44,45,50,57,58,59,63,66,67,71,72,74,77,79,81,209,216,218],post:[13,33,41,57,146,171],post_at:13,postal_cod:29,posted_at:13,posted_bi:11,posted_month:11,posted_tim:11,potenti:[0,6,9,11,12,14,24,28,39,54,61,69,71,77,79,168,209,211],power8:80,power:[6,11,80],pr3z1den7:22,practic:[0,6,11,12,13,27,29,30,43,57,59,72,73,77],pre:[6,11,17,22,54,57,58,71,77,211,212,214],preced:[19,32,45,62,64,81],precis:[10,17,22,66,208],precondit:74,predefin:11,predict:[13,28,54,209],prefer:[0,6,11,12,22,27,34,42,57,59,70,77,78],preferipv4stack:40,prefix:[11,12,22,208,214],premis:50,prepar:[6,14,15,53,55,61,64,74],prepare_releas:43,prepare_stat:53,preparedstatementscount:74,preparedstatementsevict:74,preparedstatementsexecut:74,preparedstatementsratio:74,prepend:22,prerequisit:[41,48],prescript:49,presenc:6,presens:4,present:[0,3,12,13,18,30,46,60,65,74,77,211,220],preserv:[6,11,17,20,27,28,75],preserveframepoint:220,pressur:[6,58,74,219,220],presum:27,pretti:[24,28,82,220],prevent:[0,6,11,44,65,70,74,75,76,207,211,220],preview:[36,55,76,161],previewkind:76,previou:[0,6,10,11,22,28,49,61,67,76,79,213],previous:[6,24,28,57,72,214],previous_vers:43,previsouli:[112,146],price:27,primari:[0,3,9,10,11,13,14,22,24,25,27,28,29,32,44,54,60,62,64,65,69,70,75,76,77,79,81],primarili:[0,6,11],primary_kei:[11,18],print0:210,print:[49,53,54,62,64,72,76,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,218],prio:220,prior:[6,13,20,22,64,79],prioriti:[1,42,220],privat:[6,34,43,77,78],privileg:[20,77],proactiv:57,probabilist:[0,63,70],probabl:[0,4,6,11,24,44,63,66,76,133,146,183,218,219,220],problem:[0,5,6,11,14,24,27,38,39,43,45,50,70,72,77,216,217,219,220],problemat:[22,50,216],proc:[6,45],proce:[39,70,79,216],procedur:[13,43,77],proceed:215,process:[0,1,6,14,24,25,32,33,35,37,38,39,40,41,42,44,45,50,52,57,58,59,60,65,70,71,72,74,75,76,77,79,80,82,85,121,146,147,166,174,206,207,211,213,214,215,218,219,220],processed_byt:60,processed_count:60,processor:[3,53,54,70],prod_clust:82,produc:[13,14,26,37,54,57,66,69,109,216],product:[0,1,6,11,27,28,35,37,42,45,48,49,52,54,56,59,71,78],profil:[13,40,70,83,146,148,220],profileload:146,program:[14,44,217,220],programm:0,programmat:210,progress:[34,38,42,49,57,60,63,64,73,76,81,83,146,202,203,214,219],project:[26,33,34,35,36,37,43,44,74],promin:11,promot:4,prompt:[43,82],prone:50,propag:[0,6,14,34,39,57,72,78],proper:[0,11,22,36,45,54,77],properli:[6,27,39],properti:[4,6,11,18,20,25,32,40,43,48,53,54,59,65,66,75,77,78,79],propertyfilesnitch:[6,78],proport:[6,13,50],proportion:[6,72,118,146,169],propos:[6,24,43,55,74],protect:[6,11,24,57,59,70,71,76,77,214],protocol:[0,1,6,39,45,51,54,55,60,61,64,74,77,82,88,98,103,108,113,146,189,207,218,220],protocol_vers:60,prove:[25,220],provid:[0,1,3,4,5,6,11,12,13,14,15,17,22,24,27,28,30,32,40,42,43,51,53,54,57,58,61,64,65,66,70,71,72,74,75,76,77,78,79,80,81,83,145,146,156,160,207,208,209,212,214,215,216,218],provis:[50,64,220],proxim:[6,78],proxyhistogram:[146,219],prtcl:[64,207],prune:57,prv:[58,76,161],ps1:77,ps22dhd:13,pt89h8m53:22,publicationfrequ:32,publish:[23,24,25,26,27,28,29,30,32,35,62,64],published_d:81,pull:[0,27,36,44,50,69,74,76,161],pure:[66,220],purg:71,purpos:[12,13,22,27,57,58,62,71,77],push:[38,42,43,74],put:[15,24,28,42,46,54,58,64,66,67,68,79,137,161,209,219],pwf:[53,54,62,64,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],pylib:49,python:[14,33,42,44,48,49,82],pytz:83,qos:61,quak:[14,22],qualifi:[11,14,42,64,219],qualiti:[36,55,77],quantiti:[1,22,59,219],queri:[1,3,6,10,11,12,13,14,16,18,20,24,25,28,29,30,31,48,52,53,55,57,59,60,61,62,64,66,69,74,76,80,81,82,99,109,146,164,217,220],query_fail:54,queryabl:1,queryopt:[53,54],queryst:[53,54],querytimemilli:54,queryvalidationexcept:216,question:[8,20,27,41,52,220],queu:[6,55,74],queue:[6,24,53,54,57,61,74,109,219,220],quick:[24,54,64,137,200,215],quickli:[24,45,58,62,67,72,219],quill:47,quintana:22,quit:[0,28,60,66,82,207,219,220],quorum:[0,11,55,72,73,77,82,216],quot:[9,10,11,12,14,17,20,81,82],quotat:20,quoted_identifi:12,quoted_nam:11,r_await:220,r_k:28,race:[22,38],rack1:6,rack:[0,6,48,54,60,76,77,78,216,219],rackdc:[6,78],rackinferringsnitch:[6,78],raid0:71,raid1:71,raid5:71,rain:12,rais:[6,12,45,53,72,216],raison:9,ram:[50,63,70,71,220],ran:210,random:[0,11,14,45,70,79],randomli:[6,79],randompartition:[6,13,14],rang:[0,6,10,11,13,22,24,26,39,55,58,66,67,73,74,76,81,82,89,94,132,146,151,161,182,216,219],range_slic:[74,219],rangekeysampl:146,rangelat:74,rangemov:79,rangeslic:74,rapid:[11,32,59,71],rapidli:220,rare:[10,27,63,64,216],raspberri:71,rate:[0,6,11,23,26,72,74,77,81,82,207,220],ratebasedbackpressur:6,ratefil:82,rather:[6,13,24,26,45,57,58,60,65,66,71,81],ratio:[6,70,71,74,81,208],ration:4,raw:[4,6,14,83,203,218],rdbm:[0,31,52],reacah:61,reach:[0,1,4,6,11,28,42,45,50,53,54,59,65,66,75,209],reachabl:54,read:[0,1,3,6,11,13,22,24,29,32,34,36,39,44,45,48,52,54,55,57,60,63,64,66,67,68,69,70,71,72,73,74,77,78,81,82,132,182,195,200,207,208,215,216,218,219,220],read_ahead_kb:220,read_lat:195,read_repair:[0,11,59,74,75,219],read_repair_ch:75,read_request_timeout:45,readabl:[11,25,53,54,61,65,91,145,195,219],reader:54,readi:[0,11,24,29,36,42,49,60,77],readlat:[74,216],readm:36,readrepair:74,readrepairstag:[74,219],readstag:[60,74,76,219],readtimeoutexcept:216,readwrit:77,real:[1,4,8,11,23,25,27,34,45,80,218],realclean:35,realis:81,realiz:[27,66],realli:[6,27,28,44,58,60,206,210,216,220],realtim:65,reappear:76,reason:[0,4,6,11,13,14,15,18,27,28,45,46,53,57,64,69,71,76,77,79,219,220],rebas:36,rebuild:[0,58,59,63,66,70,74,146,152,168],rebuild_index:146,receiv:[0,1,6,14,18,42,45,49,57,58,59,60,66,71,72,75,79,216,220],received_byt:60,received_count:60,recent:[1,6,42,44,56,71,75,88,209,214],recent_hit_rate_per_second:60,recent_request_rate_per_second:60,reclaim:[54,57,59,61,69],recogn:[13,28,40,42,56],recommend:[4,6,11,22,27,36,45,48,49,52,54,56,59,60,61,64,71,77,79,218],recompact:66,recompress:70,reconcil:[0,1,11,75],reconnect:77,reconstruct:209,record:[4,6,11,13,19,22,23,24,26,27,28,42,50,53,54,61,66,74,81,220],recov:[6,45,55,58,60,66],recoveri:[6,58],recreat:[20,54,82],rectangl:23,recurs:[54,109],recycl:[4,6,74],redhat:[49,56],redirect:61,redistribut:[6,76,218],redo:42,reduc:[4,6,11,25,28,37,45,54,57,58,59,62,67,70,72,75,76,83,92,118,146,161,169,203],reduct:6,redund:[0,32,34,39,42,57,59,71],reenabl:[108,110,111,146],ref:[43,61,204,205,206,207,208,209,210,211,212,214,215],refer:[1,6,11,12,13,14,22,24,25,26,27,28,32,33,34,35,44,45,50,51,59,61,79,81,82,216,218],referenc:[6,26,28,81],referenti:3,refin:[29,31,52],reflect:[65,66,204],refresh:[6,64,77,82,146,154],refreshsizeestim:146,refus:[52,57],regain:57,regard:[11,13],regardless:[0,6,20,42,59,72,220],regener:[58,63],regexp:12,region:[6,78],regist:22,registri:77,regress:[39,44],regular:[9,12,28,36,40,44,45,55,57,74,82],regularcolumn:208,regularli:[50,76],regularstatementsexecut:74,regularupd:81,reifi:58,reilli:[23,24,25,26,27,28,29,30],reinforc:30,reinsert:[168,211],reject:[6,13,45,57,65,77,216],rel:[6,22,25,28,70,82,220],relat:[0,3,8,10,12,13,23,24,27,28,30,31,35,40,42,53,58,66,72,74,81,208,216,220],relationship:[6,23,24,27,32],releas:[6,10,27,28,32,41,42,49,52,56,59,82,220],relev:[13,20,22,42,64,70,77,80,207,208,211,220],relevel:[83,203],reli:[0,6,14,22,24,45,70],reliabl:[1,3,37,50,69],reload:[6,73,146,155,156,157,158],reloadlocalschema:146,reloadse:146,reloadssl:[77,146],reloadtrigg:146,reloc:[58,146,159,192,218],relocatesst:146,remain:[0,6,13,14,20,22,32,38,50,57,58,60,66,69,74,76,79,195,219],remaind:[17,19,70],remeb:61,remedi:67,rememb:[24,25,28,61,216],remind:24,remot:[0,4,36,38,40,52,53,54,62,64,67,77,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,216],remov:[0,1,4,6,10,11,12,13,14,15,17,18,20,22,35,39,43,45,52,54,57,59,64,65,73,75,77,84,87,116,146,160,209,214,215,220],removenod:[79,84,146],renam:[9,22],render:36,reorder:6,repair:[0,4,6,11,18,45,52,55,60,64,68,69,70,72,73,74,78,79,83,137,146,162,179,200,203,208,211,215,219],repair_admin:146,repairedat:210,repairpreparetim:74,repairtim:74,repeat:[12,43,54,70,77],replac:[0,6,9,14,20,22,35,39,45,52,53,54,57,58,59,66,73,75,76,109,213,214],replace_address_first_boot:[0,79],replai:[0,3,4,22,55,71,72,74,118,146,163,169,208],replaybatchlog:146,repli:[37,57],replic:[1,2,3,6,11,28,29,32,50,52,53,54,55,58,60,62,64,66,71,72,76,77,79,81,84,146],replica:[1,6,11,13,28,45,55,66,72,74,75,76,78,79,92,128,146,216,219,220],replica_2:72,replication_factor:[0,11,29,53,54,59,62,76,77,81],repo:[35,38,40,43,49],repo_gpgcheck:49,report:[6,27,35,41,42,49,52,73,216],report_writ:20,reportfrequ:82,repositori:[5,8,33,35,36,37,40,42,44,49,80],repres:[0,6,10,17,20,22,23,24,25,26,27,28,45,66,70,74,77,78,81,82,208,218],represent:[10,17,25,204],reproduc:[25,37],reproduct:37,request:[0,1,6,11,13,20,21,32,36,44,45,53,54,55,58,59,60,61,62,63,66,69,71,73,75,77,78,82,146,183,199,215,216,219,220],request_count:60,request_failur:53,request_respons:[57,74,219],requestresponsest:219,requestresponsestag:[60,74,76,219],requesttyp:74,requir:[0,1,3,6,11,13,14,20,24,27,28,32,34,36,38,39,40,41,42,43,45,49,50,53,54,58,59,63,64,70,71,75,77,81,206,207,210,213],require_client_auth:6,require_endpoint_verif:6,requisit:75,resampl:6,reserv:[6,10,12,15,23,26,27,28,29,30,31,57,60,220],reservations_by_confirm:29,reservations_by_guest:[24,29],reservations_by_hotel_d:29,reservoir:216,reset:[6,13,36,54,57,146,165,179,206],reset_bootstrap_progress:79,resetfullquerylog:[54,146],resetlocalschema:146,resid:[6,13,45,74,220],resolut:[0,6,13,41,45],resolv:[0,35,38,45,75,167,186],resort:[84,146],resourc:[0,20,50,54,55,77,207,219],resp:14,respect:[6,10,11,14,33,35,43,54,57,60,62,64,76,78,109,218],respond:[0,6,12,72,75,220],respons:[0,1,6,20,25,32,45,57,58,59,74,75,79,219],ressourc:22,rest:[6,11,12,22,32,33,39,79,216],restart:[3,45,66,72,77,79,146,153,171,206,218],restor:[58,64,66,73,79,82,207,213,214],restrict:[6,10,11,13,18,25,59,76],restructuredtext:36,result:[0,6,10,11,12,14,17,20,22,25,27,28,30,32,37,42,45,50,54,57,58,64,66,68,74,76,82,203,204,205,206,207,208,209,210,211,212,213,214,215,220],resum:[72,85,146,166],resumehandoff:[72,146],resurrect:66,resync:[146,165],retain:[20,45,53,54,61,66,72,211,213],retent:27,rethrow:34,retir:36,retri:[0,6,11,22,54,57,74,75,109],retriev:[0,11,13,20,24,32,35],reus:[28,39],reveal:28,revers:[11,13,24],revert:218,review:[11,34,36,41,42,44,52],revis:[25,81],revok:[9,53,77],revoke_permission_stat:12,revoke_role_stat:12,rewrit:[63,66,70,83,146,168,199,203,211],rewritten:[71,168,211],rfc:[14,22],rhel:[49,52],rich:[22,218],rid:35,rider:22,riderresult:22,right:[6,19,23,24,25,26,27,28,29,30,40,43,45,66,70,76,82,219,220],ring:[6,52,56,59,64,72,76,77,79,82,142,144,146,179,207,216],rise:[0,216],risk:[0,1,11,24,49,69],riski:69,rmb:220,rmem_max:6,rmi:[45,77],robin:72,rogu:14,role:[6,9,10,12,15,73],role_a:20,role_admin:20,role_b:20,role_c:20,role_manag:77,role_nam:20,role_opt:20,role_or_permission_stat:12,role_permiss:6,roll:[45,61,72,77,109],roll_cycl:[53,54,61,109],rollcycl:61,rollingfileappend:61,rollingpolici:61,rollov:61,romain:22,room:[5,8,23,24,26,27,28,29,43],room_id:28,room_numb:[28,29],root:[6,38,42,49,54,56,64,215,218],rotat:[6,54,218],rough:70,roughli:[0,6,64],round:[13,24,67,72,74],rout:[0,6,57,78],routin:[72,220],row:[0,3,4,6,10,11,13,14,15,17,18,24,28,32,44,50,51,54,58,60,62,63,64,70,71,74,75,76,81,82,83,116,137,141,146,168,170,171,203,208,211,215,220],row_column_count:54,rowcach:[52,74],rowcachehit:74,rowcachehitoutofrang:74,rowcachemiss:74,rowindexentri:74,rows_per_partit:[11,64],rpc:[6,74],rpc_address:[54,60],rpc_timeout_in_m:[132,182],rpm:[43,48],rpmmacro:43,rrqm:220,rsa:43,rsc:200,rst:36,rubi:[14,48],rule:[0,6,12,14,42,45,216,218],run:[0,4,5,6,12,22,24,28,30,33,35,38,40,42,43,45,46,49,53,55,56,57,58,59,60,62,64,66,68,69,71,72,74,75,76,77,79,80,81,83,137,146,161,184,203,206,207,208,210,212,213,217,218,219,220],runnabl:220,runtim:[3,6,18,28,42,48,49,56,73,126,146],runtimeexcept:34,rust:48,s_j:28,s_t:28,safe:[0,14,22,54,57,59,66,77,220],safeguard:71,safepoint:218,safeti:[11,54,66,79],sai:[52,57],said:[11,42,45,64,146,199,220],salient:60,same:[0,1,4,5,6,11,12,13,14,15,17,18,19,20,22,24,25,27,32,36,38,40,42,46,50,52,53,54,57,59,60,63,64,66,67,68,69,72,74,75,76,77,78,79,81,161,209,214,216,218,220],samerow:81,sampl:[4,6,12,14,25,54,58,64,74,81,82,109,146,148,150,196],sampler:[60,74,148,196,219],san:71,sandbox:[6,14],sasi:6,satisfi:[0,11,24,27,34,59,71,74,75,79],satur:[6,74,219,220],save:[6,13,22,33,35,45,46,57,63,70,71,79,81,146,171],saved_cach:6,saved_caches_directori:46,sbin:45,scala:[14,48],scalabl:[0,2,3,43,80],scalar:15,scale:[1,2,3,44,70,80,81],scan:[6,13,24,59,63,74],scenario:38,scene:45,schedul:[0,6,33,57,60],scheduled_byt:60,scheduled_count:60,schema:[0,3,9,11,14,17,30,31,50,52,54,60,62,64,74,75,77,81,82,93,146,155,165,206,208],schema_own:20,schema_vers:54,scheme:0,scientif:24,scope:[20,25,53,61,74,77],score:[6,14,22,78],script:[6,14,30,33,40,43,44,49,53,54,56,83,109,203,204,205,206,207,208,209,211,212,213,214,215,220],scrub:[63,66,70,74,83,146,192,203],sda1:50,sda:220,sdb:220,sdc1:220,sdc:220,search:[24,27,28,42,62,80,218],searchabl:220,second:[0,6,11,12,13,22,24,27,28,32,45,56,62,65,71,72,75,76,77,81,82,83,146,169,177,203,216,218,219,220],secondari:[0,2,3,10,12,13,15,25,27,52,59,60,66,74,80,146,152],secondary_index_stat:12,secondaryindexmanag:[60,74,219],secret:43,section:[2,4,5,7,10,11,12,13,15,20,22,43,45,48,49,51,53,54,55,57,61,62,64,66,74,76,77,79,83,203,207,218,219],sector:50,secur:[6,14,15,43,52,73],seda:3,see:[0,4,6,10,11,12,13,14,17,20,22,23,24,26,27,28,35,37,40,42,43,44,49,51,52,54,57,61,66,68,70,74,77,78,79,82,116,146,161,206,208,209,212,218,219,220],seed:[0,6,46,52,56,78,129,146,156],seedprovid:6,seek:[0,3,4,6,50,71,74],seem:24,seen:[6,11],segment:[4,6,54,61,65,72,74,82,109,218,219],segment_nam:65,segmentid:208,select:[0,6,9,10,11,12,14,15,19,20,24,26,27,29,30,33,40,43,44,45,51,53,54,58,59,60,61,62,63,64,67,69,75,76,77,81,82,151,218,219,220],select_claus:13,select_stat:[12,18],self:[24,39],selinux:45,semant:[3,10,13,14,27],semi:45,send:[0,6,8,11,32,45,57,58,59,60,72,75,81,216,220],sender:57,sendto:81,sens:[10,13,15,25,45],sensic:14,sensit:[11,12,14,17,60,220],sensor:[22,24,28],sent:[0,6,11,22,45,57,58,59,60,72,74,75,216,220],sentenc:42,separ:[0,4,6,11,13,24,25,27,28,32,34,36,42,46,53,54,57,58,59,61,62,64,66,69,71,77,79,82,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,207,211],septemb:42,seq:[6,161],sequenc:[12,54,57],sequenti:[1,6,58,71,161],seren:13,seri:[11,24,43,49,66,69,82],serial:[4,6,54,58,72,83],serializeds:57,serializingcacheprovid:6,seriou:[36,216,219],serv:[1,13,24,50,57,58,59,71,77,220],server:[6,12,13,22,27,40,41,43,44,45,49,50,56,57,64,71,74,77,80,81,207,216],server_encryption_opt:[64,77,207],servic:[0,6,25,40,49,56,72,74,77,79,218,220],session:[6,20,57,76,77,83,146,162],set:[0,1,3,4,6,9,10,11,12,13,14,17,18,24,27,29,32,36,39,41,42,44,46,50,52,56,57,58,59,61,63,64,65,66,69,70,71,72,74,75,77,78,79,81,82,83,86,105,116,146,159,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,199,203,204,206,207,213,215,216,217,218,219,220],set_liter:[20,22],setbatchlogreplaythrottl:146,setcachecapac:146,setcachekeystosav:146,setcompactionthreshold:[66,146],setcompactionthroughput:[66,146],setconcurr:146,setconcurrentcompactor:146,setconcurrentviewbuild:[18,146],sethintedhandoffthrottlekb:[72,146],setint:[14,64],setinterdcstreamthroughput:146,setlogginglevel:[146,218],setlong:14,setmaxhintwindow:[72,146],setra:50,setstr:14,setstreamthroughput:146,setter:[20,33,34],settimeout:146,settraceprob:146,setup:[6,36,42,44,54,77],sever:[3,4,13,20,24,26,27,30,53,54,57,58,60,62,66,68,72,76,77,81,207],sfunc:[9,14],sha1:[43,213],sha256:49,sha:[38,43],shadow:[18,66,69],shall:[54,56,60,62,64,72],shape:81,shard:[4,28],share:[0,11,13,40,50,208,216,220],sharedpool:[57,82],sharp:47,shed:45,shelf:0,shell:[51,52,56,83],shift:22,ship:[35,44,51,77,82,218,220],shop:[23,24,26],shortcut:18,shorter:[36,77],shorthand:[26,82],shortli:28,shortlog:43,should:[0,4,5,6,10,11,12,13,14,17,20,22,24,27,32,33,35,36,39,40,42,44,45,46,47,48,49,50,51,53,54,56,57,59,61,62,63,64,66,67,68,69,70,71,72,74,76,77,78,79,81,82,151,161,182,212,214,216,220],shouldn:[11,46],show:[20,23,24,25,27,35,52,57,61,76,79,83,94,114,134,146,150,160,167,186,187,195,202,203,215,216,218,219,220],shown:[12,24,25,26,28,32,75,82,195,207],shrink:[6,50],shuffl:57,shut:6,shutdown:[4,6,71],side:[6,11,13,17,22,27,32,57,58,77,216],sign:[13,22,45],signal:[6,146,157],signatur:[49,65],signifi:220,signific:[6,36,40,42,44,50,57,58,70,71,75,216],significantli:[0,6,28,50,58,76,220],silent:14,similar:[0,6,13,14,24,26,53,56,57,59,64,70,71,215,216,220],similarli:[0,10,17,29,34,54,62,71,146,151],similiar:76,simpl:[6,11,23,24,25,27,28,35,37,40,44,59,72,77],simple_classnam:44,simple_select:13,simplequerytest:44,simpler:0,simplereplicationstrategi:77,simpleseedprovid:6,simplesnitch:[6,78],simplest:49,simplestrategi:[29,50,53,54,59,62,76,81],simpli:[0,4,6,11,13,14,17,22,27,28,32,40,44,57,69,71,74,79,200],simplic:54,simplifi:[25,28],simul:44,simultan:[0,1,6,57,58,71,72,82,86,116,159,168,199],sinc:[0,6,11,13,14,22,28,32,36,40,44,45,50,54,56,60,64,66,67,72,74,75,76,78,79,206,209,211,219,220],singl:[0,1,3,6,10,11,12,13,14,17,18,20,22,24,27,28,29,32,34,42,46,49,50,51,52,54,56,57,58,67,69,72,73,74,75,76,77,78,82,83,89,203,216,218,219,220],singleton:39,sit:54,site:[36,49],situat:[6,44,66,72,220],six:56,size:[0,4,6,11,22,25,31,32,34,45,46,50,53,54,55,56,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,77,81,82,83,109,143,146,203,206,208,209,210,213,218,219,220],size_byt:[58,60],size_estim:[146,154,218],sizeandtimebasedrollingpolici:61,sizeof:28,sizetieredcompactionstrategi:[11,64,68,69,219],sjk:146,skinni:219,skip:[0,6,13,45,54,57,62,74,79,82,83,168,185,203,206,212],skipcol:82,skipflush:62,skiprow:82,sla:[39,50],slack:[5,42,52,76],slash:12,slave:33,sleep:220,sleepi:[53,54],slf4j:[34,35,53,61],slf4jexceptionhandl:54,slight:0,slightli:6,slow:[0,3,6,11,58,59,70,78,216,218,219,220],slower:[6,11,63,70,72,219,220],slowest:6,slowli:[6,22],small:[0,4,6,11,13,22,28,45,50,57,66,71,83,203,207,216,220],smaller:[0,4,6,28,45,68,71,82,212],smallest:[0,11,14,74,209],smallint:[9,10,14,17,19,22,29],smith:22,smoother:10,smoothli:6,snappi:[4,6,70,72],snappycompressor:[11,70],snapshot:[3,4,6,27,35,54,56,73,74,83,87,143,146,168,203,211,215,220],snapshot_before_compact:62,snapshot_nam:[87,213],snapshotnam:[87,146],snippet:56,snitch:[0,6,48,52,73,75,93,146],snt:220,socket:[6,77,182],soft:[36,59],softwar:[24,35],sold:24,sole:[11,37],solid:[6,50,71],solr:80,solut:[33,50,53],solv:0,some:[0,1,3,6,9,11,12,13,14,22,24,25,26,27,28,30,35,36,37,40,42,44,45,46,50,53,54,57,58,59,60,62,64,65,66,70,72,74,75,77,79,82,208,210,216,218,219,220],some_funct:14,some_keysopac:[11,59],some_nam:12,someaggreg:14,somearg:14,somefunct:14,someon:[38,66],someth:[6,23,27,64,75,210,218,220],sometim:[6,12,13,24,57,72,216,217,218,219,220],someudt:14,somewher:76,soon:[26,77],sooner:6,sophist:0,sort:[4,11,13,22,24,28,32,60,64,66,71,80,195,209,218],sort_kei:195,sound:28,sourc:[3,4,5,6,8,14,33,35,36,37,41,43,49,53,56,61,64,74,83,151,204,213,216],source_elaps:82,space:[0,4,6,28,32,34,45,54,57,58,59,62,65,66,67,69,71,74,212,220],space_used_by_snapshots_tot:195,space_used_l:195,space_used_tot:195,span:[0,6,13,66],spare:[33,218],sparingli:13,spark:47,spd:64,speak:[0,70,217,218,220],spec:[39,51,54,74,81,82],speci:[11,18],special:[0,12,13,44,45,57,60,66,74,83,214],specif:[0,9,11,12,13,22,24,28,32,36,40,42,45,47,53,54,57,60,62,64,65,66,69,74,76,77,81,82,146,151,161,207],specifc:74,specifi:[0,6,10,11,12,13,14,16,18,20,22,24,27,28,35,40,45,51,53,54,58,59,62,64,65,69,70,72,74,75,77,79,81,82,83,87,89,130,146,151,161,167,180,182,185,192,195,198,203,207,213,216],specific_dc:161,specific_host:161,specific_keyspac:151,specific_sourc:151,specific_token:151,specifii:20,specnam:81,specul:[0,11,55,74,75],speculative_retri:[11,64],speculative_write_threshold:59,speculativefailedretri:74,speculativeinsufficientreplica:74,speculativeretri:74,speculativesamplelatencynano:74,speed:[6,49,52,58,64,70,83,203,219],spend:[70,220],spent:[49,70,74,220],sphinx:41,spike:45,spin:[6,50,66,71],spindl:[4,6],spirit:[6,78],split:[28,34,45,57,66,68,69,74,81,82,83,89,203],spread:[0,6,11,78],sql:[0,3,13,15,30],squar:12,squash:[36,42],src:151,ssd:[6,16,50,71,220],ssh:[54,56,216],ssl:[6,45,60,64,73,81,82,83,146,157,203],ssl_cipher_suit:60,ssl_enabl:60,ssl_protocol:60,ssl_storage_port:[60,78],ssp:[64,207],sss:17,sstabl:[2,6,11,28,45,52,62,63,68,69,70,71,73,76,83,86,89,116,130,137,143,146,153,159,168,199,200,204,208,209,211,212,213,215,218,219,220],sstable_act:218,sstable_compression_ratio:195,sstable_count:195,sstable_s:68,sstable_size_in_mb:[66,67],sstable_task:[60,218],sstabledump:[83,203],sstableexpiredblock:[66,83,203],sstablelevelreset:[83,203],sstableload:[62,73,77,83,203],sstablemetadata:[83,203,206,210],sstableofflinerelevel:[83,203],sstablerepairedset:[83,203,208],sstablerepairset:210,sstablescrub:[83,203],sstablesperreadhistogram:74,sstablesplit:[83,203],sstableupgrad:[83,203],sstableutil:[83,203,204,208],sstableverifi:[83,203],sstablewrit:34,stabil:[33,42,57],stabl:[82,218],stack:[6,57,211,212,213,214,215,220],stackcollaps:220,staff:[24,26,81],staff_act:81,stage:[3,42,43,55,60,121,146,174,216,219],stai:[23,24,52,59,66],stakehold:[24,26],stale:77,stall:[6,79],stamp:61,stand:[44,49],standalon:44,standard1:[60,64,205,207,208,210,211,213,218],standard:[6,22,33,37,45,49,53,74,81,204,208,218],start:[0,6,9,13,24,25,26,27,28,30,36,41,45,46,49,50,52,54,56,59,62,66,67,69,70,71,72,74,75,76,77,79,89,161,192,209,213,216,218,219,220],start_dat:29,start_native_transport:60,start_token:[89,161],start_token_1:151,start_token_2:151,start_token_n:151,starter:42,starttl:64,startup:[4,6,21,40,45,49,60,67,74,79,214],startupcheck:218,starvat:6,stat:208,state:[0,1,6,11,14,50,53,54,56,59,63,66,71,74,76,79,146,186,217,218],state_or_provinc:29,statement:[6,9,10,11,13,14,15,16,17,20,21,22,27,39,41,55,60,61,62,63,64,66,74,75,77,81,82,216,220],static0:11,static1:11,staticcolumn:208,statist:[4,58,62,64,66,74,82,91,117,146,149,194,195,197,207,208,213,214,219],statu:[20,33,39,42,45,49,54,62,72,76,77,82,83,146,160,187,188,189,190,191,200,203,216,217],statusautocompact:146,statusbackup:[62,146],statusbinari:146,statusgossip:146,statushandoff:[72,146],stc:[11,66],stdev:[81,220],stdin:82,stdout:82,stdvrng:81,steadi:59,step:[0,6,26,28,33,36,40,41,43,77,217,218],still:[0,1,6,10,11,13,14,17,20,22,27,28,30,33,34,54,57,72,76,77,79,82,205,216,220],stop:[0,4,6,54,56,57,82,104,146,164,193,203,204,205,206,207,208,209,210,211,212,213,214,215,218],stop_commit:6,stop_paranoid:6,stopdaemon:[56,146],storag:[0,1,2,3,11,15,16,28,32,42,45,52,54,59,62,64,70,71,73,80,207,208],storage_port:[46,60,78],storageservic:[6,34,56,77],store:[1,4,6,10,11,12,13,22,24,26,27,28,29,32,50,52,53,54,57,58,59,60,62,63,64,66,70,71,72,74,77,80,82,101,109,111,146,191,207,208,211],store_queri:54,store_typ:6,stort:209,straggler:54,straight:[35,79,220],straightforward:[28,65],strain:28,strategi:[6,11,28,50,58,62,64,70,72,73,78,81,206,219],stratio:52,stream:[4,6,50,52,55,57,59,62,64,66,67,69,70,72,73,76,85,125,131,146,151,161,178,179,181,182,207,214,220],stream_entire_sst:[58,60],stream_throughput_outbound_megabits_per_sec:[58,64,207],streamer:58,street:[22,29],strength:6,stress:[52,56,83,220],stresscql:81,strict:[10,57,66],strictli:[8,11,14],string:[4,6,10,11,12,13,14,16,17,20,21,22,33,53,57,64,74,82,130,204],strong:0,strongli:[6,11,12,28,77],structur:[0,4,6,9,20,23,27,32,36,39,63,64,73,74,83,203,220],struggl:[0,3],stub:77,stuck:209,style:[0,6,24,25,28,39,40,41,42,44,52],stype:[9,14],sub:[0,11,13,22,54,62,66,67,220],subclass:6,subdirectori:[6,21,49,62],subject:[6,14,20,58,77],submiss:[6,42],submit:[41,42,44,52,89],subopt:81,subqueri:27,subrang:6,subscrib:[8,37,53],subscript:8,subsequ:[0,6,11,13,20,26,45,54,56,62,64,69,70,75,77],subset:[0,20,59,66,82,216],substanti:[58,220],substract:19,subsystem:77,subtract:208,subvert:69,succe:[0,1],succed:74,succeed:215,succesfulli:74,success:[26,28,43,53,56,57,61,75,82],successfulli:[0,1,43,54,57,59,61,72,74,76,215],sudden:6,sudo:[45,49,53,54,56,64,220],suffer:220,suffici:[0,6,28,54,57,59,77],suffix:49,suggest:[12,27,36,37,42,71,215],suit:[6,32,33,42,44,64,77,207],suitabl:[13,14,28,39,42,59],sum:[28,65],sum_i:28,sum_j:28,sum_k:28,sum_l:28,summari:[4,6,42,58,62,64,74,207,208,213,214],sun:[34,77,220],sunx509:207,supercolumn:9,supersed:[10,168,211],superus:[9,20,77],suppli:[3,11,13,27,38,64,204,216],supplier:24,support:[0,3,4,6,9,10,11,12,13,14,15,16,18,19,20,22,24,25,27,29,30,32,37,40,42,44,45,47,49,52,53,54,55,58,60,62,64,67,70,72,75,77,82,83,168,192,211,218,220],suppos:13,sure:[0,6,8,24,30,33,34,35,36,37,40,42,44,45,46,66,67,81,220],surfac:[0,77],surplu:45,surpris:[0,59],surprisingli:6,surround:[17,29,82],suscept:14,suse:49,suspect:[5,42,220],suspend:40,svctm:220,svg:220,svn:43,swamp:45,swap:[0,4,6,220],swiss:[146,184],symbol:[24,220],symlink:64,symmetri:17,symptom:45,sync:[0,4,6,27,36,45,72,74,76,161,220],synchron:[53,58,76],synctim:74,synonym:20,synopsi:[54,62,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202],syntact:[11,20],syntax:[10,12,13,14,20,22,30,36,53,54,62,66,70,81],syntaxerror:216,sys:6,sysctl:[6,45],sysf:220,sysintern:6,system:[0,1,3,6,11,14,20,25,28,33,40,44,45,46,49,50,51,53,54,57,60,61,62,66,71,74,77,80,82,120,122,123,125,131,137,146,153,154,155,173,175,176,178,181,207,212,214,216,217,220],system_auth:[6,54,60,77],system_distribut:[54,60],system_schema:[20,53,54,60,61],system_trac:[54,60,161],system_view:[54,60,218],system_virtual_schema:[53,54,55,61,218],tab:[34,40],tabl:[0,1,3,4,9,10,12,13,14,15,16,17,18,20,21,22,24,25,26,27,28,29,32,44,50,52,53,54,55,56,58,59,61,63,64,66,70,72,73,76,77,81,82,83,86,89,96,104,106,115,116,119,124,128,137,146,152,153,155,159,161,168,172,185,187,192,194,195,199,200,203,205,207,208,210,214,215,216,218,219],table1:[20,76],table2:76,table_definit:81,table_nam:[11,13,16,20,21,54,60,64,66,195,218],table_opt:[11,18],tablehistogram:[146,219],tablestat:[70,146],tag:[22,39,43,62,185],tail:[11,49,54,59,218],take:[0,3,6,10,11,13,14,22,27,28,36,39,40,42,43,45,50,54,57,58,59,63,66,67,68,70,71,72,79,146,185,210,212,215,218,219,220],taken:[1,6,65,69,74,81,213],talk:26,tar:49,tarbal:[46,48,82],tarball_instal:49,target:[11,20,35,40,44,54,61,67,72,207],task:[0,6,26,33,35,37,40,42,58,74,76,82,218,219,220],task_id:60,taskdef:44,taught:27,tbl:75,tcp:[6,45,57,220],tcp_keepalive_intvl:45,tcp_keepalive_prob:45,tcp_keepalive_tim:45,tcp_nodelai:6,tcp_retries2:6,tcp_wmem:6,tcpdump:220,teach:[0,6,78],team:[24,43,45],technetwork:6,technic:[11,15],techniqu:[0,3,27,28,32,72,217,220],technot:6,tee:49,tell:[6,13,39,45,46,74,220],templat:[33,43],tempor:6,temporari:[72,77,83,203],temporarili:[0,1,6,59],tempt:[24,28],ten:28,tend:[4,6,28,45,71,72],tendenc:[6,26],tension:26,tent:43,terabyt:70,term:[0,6,13,14,15,18,22,26,27,28,54,56,80],termin:[12,20,82],ternari:34,test:[0,6,25,34,35,39,41,42,49,51,52,54,56,58,60,71,81,82],test_keyspac:[77,218],testabl:[39,42],testbatchandlist:44,testmethod1:44,testmethod2:44,testsom:44,teststaticcompactt:44,text:[4,9,11,12,13,14,17,22,25,28,29,32,36,43,54,60,61,62,64,65,70,76,77,80,81,220],than:[0,1,4,6,11,12,13,14,15,18,19,22,24,28,34,42,50,52,53,54,57,58,65,66,67,68,69,70,71,72,75,77,78,79,81,162,175,176,205,207,209,212,213,216,218,219,220],thei:[0,1,3,6,9,10,11,12,13,14,15,18,19,20,22,23,24,25,26,27,30,32,34,39,42,44,52,54,56,58,59,60,61,62,63,64,67,70,71,72,74,77,205,209,214,215,216,218,219,220],them:[0,6,10,11,13,14,22,23,26,27,32,33,34,37,42,43,44,45,51,54,57,59,61,63,66,69,72,74,75,77,146,199,207,214,216,218,220],themselv:[0,13,20],theorem:1,theoret:11,therefor:[0,24,28,36,42,44,77,206,214],theses:77,thi:[0,1,2,4,5,6,7,10,11,12,13,14,15,17,18,20,22,23,24,25,26,27,28,29,30,33,34,35,36,37,38,39,40,42,43,44,45,46,48,49,50,52,53,54,55,56,57,58,59,61,62,63,64,66,67,68,69,70,71,72,74,75,76,77,78,79,81,82,83,84,86,87,89,92,94,96,102,106,112,115,116,118,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,218,219,220],thing:[6,22,24,28,37,38,42,45,48,57,66,76,220],think:[6,26,27],third:[22,28,39,52,74,219],thobb:82,those:[6,11,12,13,14,16,17,18,20,22,23,24,27,42,43,45,65,66,67,68,72,77,82,199,207,211,212,214,216,220],though:[10,12,22,29,32,52,57,60,62,66,70,74],thought:212,thousand:82,thousandssep:82,thread:[4,6,18,54,56,57,58,61,71,72,74,76,77,81,86,116,146,159,161,168,177,197,199,209,218,219],thread_pool:60,threaddump:220,threadpool:[73,217],threadpoolnam:74,threadprioritypolici:40,three:[0,6,11,25,28,54,57,58,60,62,63,66,70,72,75,76,77,82,216,218,219],threshold:[4,11,53,59,65,71,78,119,146,172,179,220],thrift:[9,81],throttl:[6,33,57,58,60,64,72,83,118,146,169,173,177,178,181,203],throttled_count:60,throttled_nano:60,through:[0,5,9,10,11,12,13,18,24,25,26,33,36,40,42,45,49,50,51,57,60,61,65,66,82,220],throughout:[26,77],throughput:[0,3,6,32,50,58,64,66,70,71,74,120,125,131,146,173,178,181,207,218,219],throwabl:[39,44],thrown:[22,64,209],thu:[6,10,11,12,13,18,22,45,74,75,78,79,146,199],thumb:[6,42],thusli:22,tib:[91,145,195],tick:42,ticket:[5,36,37,38,39,42,43,44,65],tid:220,tie:45,tier:66,ties:[13,219],tighter:6,tightli:6,tild:82,time:[0,1,3,4,6,8,9,10,11,12,13,15,16,17,18,24,26,27,28,32,34,36,39,40,42,43,44,45,49,50,54,56,57,58,59,60,61,62,63,65,66,70,74,75,76,77,80,81,82,146,148,208,210,215,216,218,219,220],timefram:79,timehorizon:6,timelin:11,timeout:[6,22,45,55,72,74,82,132,146,182,216,219],timeout_in_m:182,timeout_typ:[132,182],timer:[6,74],timestamp:[0,4,9,10,11,13,14,15,17,19,24,28,52,53,54,57,61,62,69,72,75,82,83,168,203,205,208,211],timeunit:69,timeuuid:[9,10,11,17,22,81],timewindowcompactionstrategi:[11,66],timezon:[17,82],tini:[6,66],tinyint:[9,10,14,17,19,22],tip:216,titl:[24,29,42,81],tjake:34,tlp_stress:60,tls_dhe_rsa_with_aes_128_cbc_sha:6,tls_dhe_rsa_with_aes_256_cbc_sha:6,tls_ecdhe_rsa_with_aes_128_cbc_sha:6,tls_ecdhe_rsa_with_aes_256_cbc_sha:6,tls_rsa_with_aes_128_cbc_sha:6,tls_rsa_with_aes_256_cbc_sha:6,tmp:[54,213,214,218,220],tmpf:220,tmplink:214,toc:[4,58,62,64,213,214],tock:42,todai:12,todat:14,todo:39,togeth:[0,6,11,13,14,27,28,32,33,69,216,219,220],toggl:77,tojson:15,token:[4,6,9,10,12,13,45,48,54,57,58,59,64,66,67,72,74,76,81,82,89,94,137,138,144,146,151,161,167,200,208,209,216,218,219],tokenawar:216,tokenrang:81,toler:[0,1,63,72],tom:13,tombston:[4,6,11,17,24,45,67,73,74,76,116,168,205,208,211,220],tombstone_compact:192,tombstone_compaction_interv:66,tombstone_threshold:66,tombstones_scan:60,tombstonescannedhistogram:74,ton:44,too:[6,11,12,14,22,28,39,50,60,66,67,70,81,216,219,220],took:[216,218],tool:[3,6,12,31,33,35,36,42,43,45,49,50,52,53,56,61,62,66,73,74,77,79,81,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219],toolset:220,top:[13,22,33,42,43,52,74,148,195,196,209],topcount:[148,196],topic:[50,82],topolog:[6,78,167],toppartit:146,total:[4,6,13,28,49,53,54,56,58,60,62,64,65,66,68,74,75,81,143,146,207,218,219,220],total_replica:[0,11,59],totalblockedtask:74,totalcolumnsset:208,totalcommitlogs:74,totalcompactionscomplet:74,totaldiskspaceus:74,totalhint:74,totalhintsinprogress:74,totallat:74,totalrow:208,totalsizecap:61,totimestamp:14,touch:[8,23,28,45,66,68],tough:44,tounixtimestamp:14,tour:22,tpstat:[76,146,219],trace:[6,30,57,74,83,133,146,161,183,211,212,213,214,215,218,220],tracerout:220,track:[0,4,6,54,57,66,72,74],trackeddatainputplu:57,tracker:[36,42],trade:[25,70],tradeoff:[0,1,6,50,70,220],tradit:[69,70],traffic:[6,50,54,58,59,75,77,78,220],trail:34,trailer:57,transact:[0,2,3,13,21,24,27,32,57,59,74,83,192,203],transfer:[6,45,58,62,77,207],transform:13,transient_replica:[0,11,59],transit:[10,20,35,55],translat:220,transmit:[58,75],transpar:[6,27,45],transport:[6,40,53,60,64,74,81,98,108,146,189,207,219],trap:24,treat:[0,6,10,27,45,78],tree:[0,6,35,40,74,76],tri:[6,53,58,67,69,216],trigger:[4,6,9,11,12,15,33,52,54,60,61,63,69,70,72,73,77,86,146,158],trigger_nam:21,trigger_stat:12,trip:[6,13],trivial:77,troubl:[28,218],troubleshoot:[6,39,41,49,50,52,76,216,218,219,220],truediskspaceus:[62,143,146],truesnapshotss:74,truli:9,truncat:[4,6,9,10,15,20,53,54,60,62,72,77,81,132,146,182,198],truncate_stat:12,truncatehint:[72,146],trunk:[36,38,39,40,42,44],trust:[49,77],trusti:220,trustor:6,truststor:[6,60,64,77,81,207],truststore_password:6,truststorepassword:77,tspw:[64,207],tstamp:204,ttl:[4,6,9,10,11,14,17,22,28,69,73,168,208,211],tty:82,tunabl:2,tune:[0,11,45,50,63,67,70,71,75,218,219],tupl:[0,6,9,10,12,13,14,15,17],tuple_liter:[12,13],tuple_typ:22,tuplevalu:[10,14],turn:[0,6,24,42,45,54,70,77,216],twc:[11,69],twice:[4,6,22,57],two:[0,1,3,6,11,12,13,14,17,19,27,28,32,40,52,53,54,56,57,60,61,62,63,64,66,68,69,70,71,72,75,77,78,82,208,219,220],txt:[4,14,38,39,42,43,49,58,62,64,213,214],type:[0,3,4,6,10,11,12,13,14,15,19,20,24,25,28,29,30,39,41,49,52,53,54,55,59,60,61,64,71,73,76,77,81,82,132,146,182,192,204,207,208,212,214,218,219],type_hint:12,typeasblob:14,typecodec:14,typic:[0,3,6,11,13,23,25,27,28,30,32,45,62,63,64,66,69,70,71,74,76,77,80,82,213,216,218,219,220],typo:36,ubuntu:[40,49],udf:[6,14],udf_stat:12,udfcontext:[10,14],udt:[14,17,25,30],udt_liter:12,udt_nam:22,udt_stat:12,udtarg:14,udtnam:14,udtvalu:[10,14],ulimit:45,ultim:[0,27],ultra:70,unabl:[4,39,52,56,219],unacknowledg:6,unaffect:22,unari:19,unauthorized_attempt:53,unavail:[0,6,11,59,72,74,77,79,220],unavailableexcept:216,unblock:74,unbootstrap:58,unbound:[6,22],uncaught:218,unchecked_tombstone_compact:[66,69],uncom:[6,74,77],uncommit:1,uncommon:[24,42],uncompress:[4,6,11,58,70,72,74],unconfirm:6,undecor:4,undelet:66,under:[0,6,22,33,34,44,50,61,74,77,220],underli:[6,18,57,66,70,77,220],underlin:23,undersold:24,understand:[1,6,23,42,45,50,72,76,77,218,220],understood:23,undropp:57,unencrypt:[6,49,77],uneven:0,unexpect:[4,57,203,204,205,206,207,208,209,210,211,212,213,214,215],unexpectedli:22,unfinishedcommit:74,unflush:[62,185],unfortun:[44,72],unifi:1,uniform:81,uniq:218,uniqu:[0,3,11,14,22,23,24,25,26,30,55,57,81,208],unit:[22,27,39,41,54,60,69,146,170,207,212,219],unix:[61,217],unixtimestampof:[10,14],unknown:[55,209],unknowncfexcept:57,unless:[6,11,13,16,18,20,22,34,49,57,59,65,70,77,78,208,212,220],unlik:[0,6,10,13,22,32],unlimit:[6,45,64,82,207],unlock:26,unlog:[9,74,81],unmodifi:72,unnecessari:[39,58,79],unnecessarili:[57,65],unpack:49,unpredict:13,unprepar:74,unprocess:57,unprotect:57,unquot:12,unquoted_identifi:12,unquoted_nam:11,unreach:[0,76],unrecogn:54,unrecov:[57,60],unrecover:1,unrel:[42,216],unrepair:[6,59,68,73,74,76,83,203],unrespons:[11,59],unsafe_aggressive_sstable_expir:[66,69],unsecur:77,unselected_column:18,unset:[6,10,13,17,58,72,210],unsign:22,unspecifi:6,unsubscrib:[8,52],unsuccess:61,unsupport:50,unsupportedclassversionerror:56,unsupportedoperationexcept:54,until:[0,4,6,11,18,22,54,57,59,63,65,66,67,70,72,75,77,78],unus:[6,57],unusu:39,unwrit:[6,57],upcom:24,updat:[0,1,3,6,9,10,11,12,14,15,17,18,20,22,24,27,32,36,39,41,42,44,49,52,53,60,61,64,66,70,72,74,75,77,81,82,218,219],update_paramet:13,update_stat:[12,13],updatewithlwt:81,upgrad:[4,6,11,59,60,66,146,199,213,214],upgrade_sst:192,upgradesst:[63,66,70,146],upload:[42,49,64],upon:[6,22,43,53,61,63,65,70],upper:[12,17,66,77],ups:71,upstream:42,uptim:[138,146],urgent:[6,43,57],url:[36,38,81],usag:[0,4,6,11,22,50,52,53,54,57,59,60,62,63,64,65,70,73,74,82,83,203],use:[0,4,6,9,10,11,12,13,14,16,17,18,20,22,23,24,25,26,27,28,32,33,34,36,39,40,42,44,46,49,50,51,52,53,54,56,58,59,60,61,62,63,64,65,66,67,70,71,72,74,75,76,77,78,79,81,82,86,116,129,146,148,159,168,196,199,204,207,208,210,211,212,214,216,217,218,219,220],use_k:61,use_keyspac:53,use_stat:12,usec:220,usecas:66,useconcmarksweepgc:40,usecondcardmark:40,used:[0,1,3,4,6,9,10,11,12,13,14,15,16,17,18,20,22,24,25,26,28,30,32,35,39,40,42,43,44,45,53,54,56,57,58,59,60,61,62,64,66,70,71,72,74,75,77,78,79,81,82,84,86,87,89,94,96,102,105,106,112,115,116,119,121,124,128,130,132,136,137,144,146,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,204,205,206,207,208,209,210,212,213,214,216,219,220],useecassandra:77,useful:[0,4,6,11,14,24,28,30,35,42,54,59,60,62,64,66,70,72,74,76,79,82,84,86,87,89,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,206,208,218,219,220],useparnewgc:40,user1:13,user2:13,user3:13,user4:13,user:[0,3,5,6,8,9,10,11,12,13,15,16,17,18,24,25,26,27,33,39,41,42,43,45,49,54,55,56,58,59,60,61,62,63,64,66,67,69,70,71,76,77,82,83,89,105,146,205,213,218,220],user_count:13,user_defined_typ:22,user_funct:20,user_nam:13,user_occup:13,user_opt:20,useract:13,userid:[11,13,14],userindex:16,usernam:[6,13,14,53,54,60,62,64,74,77,82,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,207],usertyp:64,uses:[0,4,6,11,12,13,14,16,20,21,24,25,36,44,45,53,54,56,60,75,77,81,215,219,220],usethreadprior:40,using:[1,3,4,6,10,11,12,13,14,18,20,22,23,25,27,28,29,30,32,33,40,41,42,43,44,48,49,50,51,52,53,54,56,57,58,59,60,61,62,63,64,67,69,70,71,72,73,74,75,77,79,82,89,151,168,185,204,206,208,209,210,211,214,216,217,218,219,220],using_byt:60,using_reserve_byt:60,usr:[56,82,220],usual:[0,6,13,22,27,38,42,44,50,63,77,161,211,216,218],utc:[17,82],utd:11,utf8:[22,82],utf8typ:[9,208],utf:[57,82],util:[4,14,39,49,54,57,66,82,218,220],uuid:[9,10,11,12,17,22,25,29,53],val0:[11,54,62,64,76],val1:[11,54,62,64,76],val2:[62,64,76],val:[14,81],valid:[0,6,10,11,12,13,14,17,22,24,25,43,45,56,57,59,66,70,74,76,77,82,83,161,168,192,203,215],validationexecutor:[60,74,219],validationtim:74,valu:[0,1,4,6,9,10,11,12,13,14,16,17,19,22,25,28,32,39,40,43,45,50,53,54,57,58,59,60,61,62,63,64,66,70,72,74,75,76,77,78,80,81,82,83,105,133,137,146,169,173,175,176,177,178,180,181,182,183,203,204,215,216,218,220],valuabl:218,value1:13,value2:13,value_in_kb_per_sec:[169,177],value_in_m:180,value_in_mb:[173,178,181],valueof:14,varchar:[9,11,14,17,22],vari:[11,28,32,70],variabl:[6,10,12,17,22,28,33,40,43,48,56,64,210],varianc:[25,218],variant:[0,12,32],variat:32,varieti:65,varint:[9,11,14,17,19,22],variou:[6,11,23,24,25,26,33,40,44,71,75,77,81,203,217,218],vector:[0,77],vendor:56,verb:57,verbatim:57,verbos:[64,207,211,214,215],veri:[0,6,11,13,27,32,36,42,44,45,63,66,67,70,71,210,215,216,218,219,220],verif:[83,203],verifi:[11,42,43,45,47,49,64,70,76,137,146,192,203,204,205,206,207,208,209,210,211,212,213,214,215],versa:214,version:[1,2,5,6,9,11,14,15,22,35,40,42,47,49,54,56,57,58,59,60,64,66,70,72,74,79,83,88,93,103,113,146,199,200,203,211,214,218],versu:27,vertic:[56,82],via:[0,4,6,8,10,18,20,24,27,35,39,40,45,46,54,57,60,61,62,64,69,70,71,72,74,76,77,78,208,210,220],vice:214,view:[0,3,6,10,11,12,15,20,24,25,26,27,30,31,52,55,59,60,74,82,123,146,176,202,210,218,219,220],view_build:192,view_nam:[18,54],viewbuildexecutor:[60,74,219],viewbuildstatu:146,viewlockacquiretim:74,viewmutationstag:[60,74,219],viewpendingmut:74,viewreadtim:74,viewreplicasattempt:74,viewreplicassuccess:74,viewwrit:74,viewwritelat:74,vint:57,violat:[0,27,54],virtual:[0,3,6,45,52,55,69,74,79],virtualenv:33,visibl:[0,11,20,34,63,72,75],visit:[23,49,81],visual:[0,24,25,36,218],vnode:[6,50,59,70],volum:[3,4,6,65,70,215,219,220],vote:41,vpc:78,vulner:[6,43,77],w_await:220,wai:[0,4,6,12,15,17,18,22,24,27,28,33,37,38,40,44,45,53,61,66,69,70,72,161,208,209,210,211,218,220],wait:[0,4,6,11,42,45,53,54,56,57,58,59,61,62,72,74,75,146,163,218,219,220],waitingoncommit:74,waitingonfreememtablespac:74,waitingonsegmentalloc:74,walk:[0,25],want:[0,4,6,11,13,23,26,27,28,30,33,40,42,43,44,45,49,61,72,76,77,79,81,206,207,210,218,220],warmup:[81,146,171],warn:[6,11,34,44,53,54,73,76,161,215,218],warrant:219,washington:22,wasn:10,wast:6,watch:[44,220],weaker:0,web:[1,36],websit:[24,44,220],wed:56,week:[22,76,210],weibul:81,weigh:[11,54],weight:[53,54,57,61,74,109],weightedqueuetest:54,welcom:8,well:[0,6,11,13,14,17,22,24,26,27,28,30,39,40,49,50,53,54,57,59,61,62,65,70,71,77,78,146,164,213,218,220],went:74,were:[0,3,6,9,10,20,24,30,39,40,43,54,64,66,74,75,211,214,218,219],west:43,what:[0,2,11,13,22,23,24,31,36,37,41,44,46,52,55,57,58,66,69,70,71,75,77,81,82,208,216,217,218,219,220],whatev:[10,13,45],whedon:13,wheel:213,when:[0,1,4,6,9,10,11,12,13,14,15,16,17,20,22,24,26,27,28,33,34,36,39,42,43,44,46,49,50,52,53,54,55,56,59,61,62,63,64,65,67,68,69,70,71,72,73,74,75,76,77,78,79,81,82,84,86,87,89,92,94,96,102,106,112,115,116,119,121,124,128,130,132,136,137,144,148,151,152,153,159,160,161,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,192,194,195,196,198,199,200,202,208,209,214,216,218,219,220],whenev:[209,220],where:[0,3,4,6,9,10,11,12,14,16,17,18,19,20,22,24,26,28,39,44,46,50,53,54,57,59,60,61,63,64,67,69,70,72,77,79,81,82,109,161,216,218,220],where_claus:13,wherea:[22,77,219],whether:[0,6,9,11,13,27,28,40,53,54,57,62,66,75,78,82,109],which:[0,1,3,4,5,6,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,32,36,42,43,44,45,46,49,50,51,53,54,56,57,58,59,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,79,81,89,124,128,137,143,146,151,161,214,216,217,218,219,220],whichev:[0,6],whilst:6,whitelist:77,whitespac:41,who:[20,33,42,45,59,72],whole:[0,6,13,14,22,58,66,76],whose:[11,22,27,192],why:[0,24,39,42,50,52,70,205,216,218,220],wide2:60,wide:[0,3,4,23,24,28,49,65,219],wider:59,width:12,wiki:[6,36,40],wildcard:[13,20,54,212],wildli:11,win:[0,6],window:[0,4,6,66,72,74,77,127,135,146,180,217],winner:45,wip:42,wipe:[45,79],wire:[6,45,54,57],wirefram:26,wireshark:220,wise:11,wish:[6,24,43,69,74,218],withbuffersizeinmb:64,within:[0,3,4,6,11,12,13,16,24,32,40,42,45,54,58,62,68,69,71,74,77],withing:6,without:[0,1,6,11,12,13,14,20,22,38,40,42,43,44,45,50,54,56,57,58,59,64,65,69,71,72,74,77,82,83,84,137,146,153,203,204],withpartition:64,withtyp:64,wmb:220,wmem_max:6,wnen:30,won:[4,6,13,36,38,57,75,76,220],wont:[61,67],word:[10,11,12,18,20,22,45,65,77],work:[0,4,6,10,11,14,15,17,23,24,25,27,30,33,34,37,38,40,41,43,44,45,50,52,59,66,67,69,71,74,76,77,78,79,82,207,220],workaround:[207,211],worker:[57,82],workflow:[24,26],workload:[0,6,37,39,50,54,63,66,67,69,71,81,219,220],workspac:40,worktre:40,world:[23,27],worri:[26,42,45],wors:[6,78],worst:[6,28,42],worth:[6,27,57,61],worthwhil:[6,24],would:[0,6,12,13,14,17,20,23,24,25,26,27,28,32,36,40,42,44,50,52,53,54,57,58,59,60,64,66,69,70,71,72,75,76,77,78,208,210,214,218,220],wouldn:11,wrap:[24,57,78],wrapper:54,writabl:64,write:[0,2,3,4,6,10,11,13,22,24,27,32,34,36,37,39,44,45,50,54,55,60,64,65,66,67,68,69,70,71,74,75,76,77,78,79,81,82,104,132,146,182,195,208,211,214,216,218,219,220],write_lat:195,write_request_timeout:45,write_request_timeout_in_m:72,writefailedideacl:74,writelat:[74,216],writer:[4,6,34,64],writetim:[9,14],writetimeoutexcept:[6,216],written:[0,1,3,4,6,11,21,26,27,33,45,53,54,57,59,61,63,64,66,69,70,72,74,75,76],wrong:[6,27,219],wrqm:220,wrst:220,wrte:74,www:[6,49,220],x86:80,xandra:47,xarg:[210,218],xdm:220,xferd:49,xlarge_daili:61,xml:[35,40,43,44,46,56,64,218],xmn220m:40,xms1024m:40,xmx1024m:40,xmx:71,xss256k:40,xzvf:49,yaml:[0,3,4,6,14,18,20,46,50,53,54,57,58,59,60,62,64,72,74,75,77,78,79,81,90,105,109,146,164,195,197,207,208,216],year:[13,22,27,28,56],yes:[9,11],yet:[0,6,11,33,37,62,65,74,214],ygc:220,ygct:220,yield:[13,27,28,61,79,220],ymmv:218,you:[0,4,5,6,8,10,11,12,13,14,16,17,18,20,21,22,23,24,25,26,27,28,29,30,33,34,35,36,37,38,40,41,43,44,45,46,47,48,49,50,51,52,54,59,61,62,64,65,66,67,68,70,72,74,75,76,77,78,79,80,81,82,84,146,185,204,206,207,208,210,211,212,214,215,216,217,218,219,220],young:220,younger:14,your:[0,5,6,8,10,11,12,24,25,26,27,28,29,30,34,36,37,40,41,42,44,45,46,49,50,52,56,59,66,67,70,71,76,77,78,81,82,207,212,215,217,218,219,220],yourself:[37,38,44,72],yum:[49,56],yyyi:[17,22,61],z_0:[11,16,18],zero:[3,6,10,11,45,55,74,78,218],zerocopi:58,zgrep:218,zip:[22,61],zipcod:22,zlib:70,zone:[0,6,22,78],zoomabl:220,zstd:[4,6,70],zstdcompressor:[11,70]},titles:["Dynamo","Guarantees","Architecture","Overview","Storage Engine","Reporting Bugs","Cassandra Configuration File","Configuring Cassandra","Contact us","Appendices","Changes","Data Definition","Definitions","Data Manipulation","Functions","The Cassandra Query Language (CQL)","Secondary Indexes","JSON Support","Materialized Views","Arithmetic Operators","Security","Triggers","Data Types","Conceptual Data Modeling","Logical Data Modeling","Physical Data Modeling","Defining Application Queries","RDBMS Design","Evaluating and Refining Data Models","Defining Database Schema","Cassandra Data Modeling Tools","Data Modeling","Introduction","Jenkins CI Environment","Code Style","Dependency Management","Working on Documentation","Getting Started","How-to Commit","Review Checklist","Building and IDE Integration","Contributing to Cassandra","Contributing Code Changes","Release Process","Testing","Frequently Asked Questions","Configuring Cassandra","Client drivers","Getting Started","Installing Cassandra","Production Recommendations","Inserting and querying","Welcome to Apache Cassandra\u2019s documentation!","Audit Logging","Full Query Logging","New Features in Apache Cassandra 4.0","Support for Java 11","Improved Internode Messaging","Improved Streaming","Transient Replication","Virtual Tables","Audit Logging","Backups","Bloom Filters","Bulk Loading","Change Data Capture","Compaction","Leveled Compaction Strategy","Leveled Compaction Strategy","Time Window CompactionStrategy","Compression","Hardware Choices","Hints","Operating Cassandra","Monitoring","Read repair","Repair","Security","Snitch","Adding, replacing, moving and removing nodes","Third-Party Plugins","Cassandra Stress","cqlsh: the CQL shell","Cassandra Tools","assassinate","bootstrap","cleanup","clearsnapshot","clientstats","compact","compactionhistory","compactionstats","decommission","describecluster","describering","disableauditlog","disableautocompaction","disablebackup","disablebinary","disablefullquerylog","disablegossip","disablehandoff","disablehintsfordc","disableoldprotocolversions","drain","enableauditlog","enableautocompaction","enablebackup","enablebinary","enablefullquerylog","enablegossip","enablehandoff","enablehintsfordc","enableoldprotocolversions","failuredetector","flush","garbagecollect","gcstats","getbatchlogreplaythrottle","getcompactionthreshold","getcompactionthroughput","getconcurrency","getconcurrentcompactors","getconcurrentviewbuilders","getendpoints","getinterdcstreamthroughput","getlogginglevels","getmaxhintwindow","getreplicas","getseeds","getsstables","getstreamthroughput","gettimeout","gettraceprobability","gossipinfo","handoffwindow","help","import","info","invalidatecountercache","invalidatekeycache","invalidaterowcache","join","listsnapshots","move","netstats","Nodetool","pausehandoff","profileload","proxyhistograms","rangekeysample","rebuild","rebuild_index","refresh","refreshsizeestimates","reloadlocalschema","reloadseeds","reloadssl","reloadtriggers","relocatesstables","removenode","repair","repair_admin","replaybatchlog","resetfullquerylog","resetlocalschema","resumehandoff","ring","scrub","setbatchlogreplaythrottle","setcachecapacity","setcachekeystosave","setcompactionthreshold","setcompactionthroughput","setconcurrency","setconcurrentcompactors","setconcurrentviewbuilders","sethintedhandoffthrottlekb","setinterdcstreamthroughput","setlogginglevel","setmaxhintwindow","setstreamthroughput","settimeout","settraceprobability","sjk","snapshot","status","statusautocompaction","statusbackup","statusbinary","statusgossip","statushandoff","stop","stopdaemon","tablehistograms","tablestats","toppartitions","tpstats","truncatehints","upgradesstables","verify","version","viewbuildstatus","SSTable Tools","sstabledump","sstableexpiredblockers","sstablelevelreset","sstableloader","sstablemetadata","sstableofflinerelevel","sstablerepairedset","sstablescrub","sstablesplit","sstableupgrade","sstableutil","sstableverify","Find The Misbehaving Nodes","Troubleshooting","Cassandra Logs","Use Nodetool","Diving Deep, Use External Tools"],titleterms:{"break":28,"class":78,"final":214,"function":[13,14,17],"import":[34,64,137],"long":44,"new":[45,55],"switch":66,"transient":[0,59],Added:57,Adding:79,Doing:209,IDE:40,IDEs:34,LCS:67,QoS:57,TLS:77,The:[13,15,17,60,66,216],USE:11,Use:[70,207,219,220],Uses:70,Using:[32,40,53,56,64,210],Will:45,With:77,about:33,abov:208,accept:64,access:[43,77],account:43,adcanc:61,add:[35,45],address:45,advanc:[70,220],after:79,aggreg:14,ahead:50,alias:13,all:[20,45,62,208,214],alloc:79,allocate_tokens_for_keyspac:6,allocate_tokens_for_local_replication_factor:6,allow:[13,72],alreadi:206,alter:[11,18,20,22],analysi:32,ani:45,announc:43,answer:37,anti:24,apach:[33,40,43,52,55],api:64,appendic:9,appendix:9,applic:[26,57,72],architectur:2,archiv:53,arithmet:19,artifact:43,ask:45,assassin:84,assign:79,assur:57,attempt:212,audit:[53,61],audit_logging_opt:6,auditlog:61,auth:77,authent:[6,20,77],author:[6,77],auto_snapshot:6,automat:20,automatic_sstable_upgrad:6,avail:[1,58],avg:14,back_pressure_en:6,back_pressure_strategi:6,background:75,backup:[62,64],base:[36,58],basic:[211,215,220],batch:[1,13,45,74],batch_size_fail_threshold_in_kb:6,batch_size_warn_threshold_in_kb:6,batchlog_replay_throttle_in_kb:6,bcc:220,befor:42,behavior:75,below:58,benefit:[58,70],best:76,between:[27,59],binari:49,binauditlogg:61,bintrai:43,blob:[14,45],block:[75,205],bloom:63,boilerpl:34,bootstrap:[45,67,79,85],branch:42,broadcast_address:6,broadcast_rpc_address:6,buffer_pool_use_heap_if_exhaust:6,bufferpool:74,bug:[5,37,42],build:[40,56],bulk:[45,64],cach:[60,74,77,220],calcul:28,call:[43,45],can:45,cap:1,capi:80,captur:[61,65,82,220],cas_contention_timeout_in_m:6,cassandra:[6,7,15,17,27,30,33,36,40,41,43,44,45,46,49,52,55,61,65,73,75,77,80,81,83,213,218],cast:14,categori:53,cdc:65,cdc_enabl:6,cdc_free_space_check_interval_m:6,cdc_raw_directori:6,cdc_total_space_in_mb:6,certif:77,chang:[10,42,45,46,63,65,69],characterist:22,cheap:59,check:211,checklist:39,choic:71,choos:[42,49],circleci:44,claus:13,clean:214,cleanup:[79,86],clear:[62,82],clearsnapshot:87,client:[47,51,60,74,77,216],client_encryption_opt:6,clientstat:88,clojur:47,close:57,cloud:71,cluster:[0,45,207,219],cluster_nam:6,code:[4,34,42],collect:[22,66,220],column_index_cache_size_in_kb:6,column_index_size_in_kb:6,command:[40,61,66,82,210],comment:12,commit:38,commit_failure_polici:6,commitlog:[4,74],commitlog_compress:6,commitlog_directori:6,commitlog_segment_size_in_mb:6,commitlog_sync:6,commitlog_sync_batch_window_in_m:6,commitlog_sync_group_window_in_m:6,commitlog_sync_period_in_m:6,commitlog_total_space_in_mb:6,commitlogseg:65,committ:36,commod:0,common:[11,56,66,71,218],compact:[9,50,66,67,68,74,89,219],compaction_large_partition_warning_threshold_mb:6,compaction_throughput_mb_per_sec:6,compactionhistori:90,compactionstat:91,compactionstrategi:69,compar:[32,54],compat:82,compon:58,compress:[50,70],conceptu:23,concern:69,concurrent_compactor:6,concurrent_counter_writ:6,concurrent_materialized_view_build:6,concurrent_materialized_view_writ:6,concurrent_read:6,concurrent_valid:6,concurrent_writ:6,condition:20,config:207,configur:[6,7,46,50,53,54,58,61,62,65,70,72,75],conflict:35,connect:[20,45,57],consist:[0,1,75,82],constant:12,contact:8,content:[43,61],contribut:[37,41,42],control:20,convent:[12,34],convers:14,coordin:219,copi:[58,82],corrupt:[57,211,215],corrupted_tombstone_strategi:6,count:14,counter:[13,22,211],counter_cache_keys_to_sav:6,counter_cache_save_period:6,counter_cache_size_in_mb:6,counter_write_request_timeout_in_m:6,cpu:[71,220],cql:[9,15,54,74,82],cqlsh:[51,82],cqlshrc:82,cqlsstablewrit:64,creat:[11,14,16,18,20,21,22,37,42,43,50,62],credenti:20,credentials_update_interval_in_m:6,credentials_validity_in_m:6,cross_node_timeout:6,cstar_perf:44,current:[14,213],custom:22,cycl:54,cython:82,dart:47,data:[0,11,13,17,20,22,23,24,25,28,30,31,32,45,62,64,65,66,79],data_file_directori:6,databas:[20,29],datacent:20,dataset:0,date:[14,22,211],datetim:[14,19],dead:79,deal:211,debian:49,debug:[40,218],decis:27,decommiss:[58,92],deep:220,defin:[14,22,26,29],definit:[11,12,60],defragment:68,delet:[13,43,45,66],deliveri:72,demo:[53,64],denorm:27,depend:[35,82],deploy:58,describ:[60,82,94],describeclust:93,deseri:57,design:[27,32],detail:[66,207],detect:0,develop:43,diagnost:[53,75],diagnostic_events_en:6,dies:45,differ:[27,60,62],directori:[46,53,54,62,66],disabl:[61,65],disableauditlog:95,disableautocompact:96,disablebackup:97,disablebinari:98,disablefullquerylog:99,disablegossip:100,disablehandoff:101,disablehintsfordc:102,disableoldprotocolvers:103,disallow:58,disk:[28,45,71,72],disk_failure_polici:6,disk_optimization_strategi:6,displai:204,distribut:[0,43],dive:220,document:[36,37,52],doe:[45,53,61],down:72,drain:104,driven:32,driver:[47,51],drop:[9,11,14,16,18,20,21,22,45,58],droppedmessag:74,dry:209,dtest:[37,44],dump:204,durabl:1,durat:22,dynam:78,dynamic_snitch_badness_threshold:6,dynamic_snitch_reset_interval_in_m:6,dynamic_snitch_update_interval_in_m:6,dynamo:0,each:[45,208],each_quorum:59,eclips:40,effici:57,elig:58,elixir:47,email:45,enabl:[53,54,58,59,61,65,77],enable_materialized_view:6,enable_sasi_index:6,enable_scripted_user_defined_funct:6,enable_transient_repl:6,enable_user_defined_funct:6,enableauditlog:105,enableautocompact:106,enablebackup:107,enablebinari:108,enablefullquerylog:109,enablegossip:110,enablehandoff:111,enablehintsfordc:112,enableoldprotocolvers:113,encod:17,encrypt:[50,77],endpoint_snitch:6,engin:4,ensur:50,entir:204,entri:45,environ:[33,46],erlang:47,error:[45,57,216],evalu:28,even:45,event:[53,75],eventu:1,exampl:[4,32,62,75,76],except:34,exclud:204,exist:45,exit:82,expand:82,expect:75,experiment:6,expir:66,expiri:57,explan:208,extend:215,extern:[64,220],factor:45,fail:[45,79],failur:[0,45,57],failuredetector:114,faq:81,faster:72,featur:[3,6,55],file:[6,34,35,61,207,212,215,218],file_cache_size_in_mb:6,fileauditlogg:61,filedescriptorratio:74,filter:[13,61,63],find:[62,216],first:27,fix:[37,42],flamegraph:220,flexibl:53,flow:36,flush:115,flush_compress:6,format:[34,204],found:[206,209],fql:54,frame:57,freez:42,frequenc:53,frequent:45,from:[40,43,45,57,60,62,64,82,207],fromjson:17,full:[54,59,76,218],full_query_logging_opt:6,fulli:66,further:[49,65],garbag:[66,220],garbagecollect:116,garbagecollector:74,gc_grace_second:66,gc_log_threshold_in_m:6,gc_warn_threshold_in_m:6,gcstat:117,gener:[34,64],get:[37,48,207,218],getbatchlogreplaythrottl:118,getcompactionthreshold:119,getcompactionthroughput:120,getconcurr:121,getconcurrentcompactor:122,getconcurrentviewbuild:123,getendpoint:124,getinterdcstreamthroughput:125,getlogginglevel:126,getmaxhintwindow:127,getreplica:128,getse:129,getsstabl:130,getstreamthroughput:131,gettimeout:132,gettraceprob:133,github:36,give:45,goal:32,gossip:0,gossipinfo:134,gpg:43,grace:[66,208],grant:20,graph:81,group:13,guarante:1,handl:34,handoff:72,handoffwindow:135,hang:79,happen:45,hardwar:[0,71],has:206,hash:0,haskel:47,heap:45,help:[82,136],hide:207,high:[1,58,220],hint:[57,72],hinted_handoff_disabled_datacent:6,hinted_handoff_en:6,hinted_handoff_throttle_in_kb:6,hintedhandoff:74,hints_compress:6,hints_directori:6,hints_flush_period_in_m:6,hintsservic:74,host:[45,82],hot:77,hotel:[24,25],how:[36,38,45,60,61],htop:220,idea:40,ideal_consistency_level:6,identifi:12,impact:70,improv:[57,58,75],inbound:[57,60],includ:214,increment:[0,62,64,76],incremental_backup:6,index:[1,16,74,80],index_summary_capacity_in_mb:6,index_summary_resize_interval_in_minut:6,info:[49,138],inform:[218,220],initi:37,initial_token:6,insert:[13,17,51],instal:49,integr:[27,40,77],intellij:40,inter:77,inter_dc_stream_throughput_outbound_megabits_per_sec:6,inter_dc_tcp_nodelai:6,interfac:0,intern:[20,77,204],internod:[57,60],internode_application_receive_queue_capacity_in_byt:6,internode_application_receive_queue_reserve_endpoint_capacity_in_byt:6,internode_application_receive_queue_reserve_global_capacity_in_byt:6,internode_application_send_queue_capacity_in_byt:6,internode_application_send_queue_reserve_endpoint_capacity_in_byt:6,internode_application_send_queue_reserve_global_capacity_in_byt:6,internode_authent:6,internode_compress:6,internode_recv_buff_size_in_byt:6,internode_send_buff_size_in_byt:6,introduct:32,invalidatecountercach:139,invalidatekeycach:140,invalidaterowcach:141,investig:[37,216],iostat:220,issu:56,java:[45,47,56,64],jconsol:45,jenkin:33,jira:[36,43],jmx:[45,66,74,77],job:33,join:[27,45,142],json:17,jstack:220,jstat:220,jvm:[74,220],keep:213,kei:[16,18,43,204],key_cache_keys_to_sav:6,key_cache_save_period:6,key_cache_size_in_mb:6,keyspac:[11,45,50,53,58,60,62,64,74,209],keyword:[9,12],lang:45,languag:15,larg:[28,45],latenc:[216,219,220],level:[0,67,68,75,206,220],librari:35,lightweight:[1,81],limit:[13,18,57,60,61],line:[40,82],lineariz:1,list:[8,20,22,37,45,60,62,214],listen:45,listen_address:[6,45],listen_interfac:6,listen_interface_prefer_ipv6:6,listen_on_broadcast_address:6,listsnapshot:143,liter:22,live:45,load:[45,64,207],local:[36,57,219],locat:46,log:[45,46,53,54,61,66,214,216,218],logger:[53,54,218],logic:24,login:82,longer:72,lot:[45,210],lucen:80,made:45,mail:8,main:46,major:[67,68],make:72,manag:[35,204],mani:210,manifest:211,manipul:13,manual:79,map:[16,22,45],master:0,materi:[18,32],matrix:56,max:[14,45],max_concurrent_automatic_sstable_upgrad:6,max_hint_window_in_m:6,max_hints_delivery_thread:6,max_hints_file_size_in_mb:6,max_value_size_in_mb:6,maxtimeuuid:14,mean:45,membership:0,memori:[45,71,74],memorypool:74,memtabl:4,memtable_allocation_typ:6,memtable_cleanup_threshold:6,memtable_flush_writ:6,memtable_heap_space_in_mb:6,memtable_offheap_space_in_mb:6,merg:66,messag:[45,57,60],metadata:[208,210],method:[45,49],metric:[57,74,216],min:14,minor:66,mintimeuuid:14,misbehav:216,mode:81,model:[0,23,24,25,28,30,31,32],monitor:[72,74,79],monoton:75,more:[45,66,204,207,218],move:[79,144],movement:79,multi:[0,58],multilin:34,multipl:[0,62,212],name:64,nativ:[14,22],native_transport_allow_older_protocol:6,native_transport_flush_in_batches_legaci:6,native_transport_frame_block_size_in_kb:6,native_transport_idle_timeout_in_m:6,native_transport_max_concurrent_connect:6,native_transport_max_concurrent_connections_per_ip:6,native_transport_max_frame_size_in_mb:6,native_transport_max_thread:6,native_transport_port:6,native_transport_port_ssl:6,net:47,netbean:40,netstat:145,netti:58,network:220,network_author:6,networktopologystrategi:[0,11,50],newer:40,next:[43,216],nexu:43,nio:57,node:[0,45,58,72,77,79,216],nodej:47,nodetool:[45,53,61,64,66,72,146,219],none:75,note:36,noteworthi:22,now:14,num_token:6,number:[19,58],object:[54,59,60],old:[43,213],one:[45,210],onli:[45,204,214],open:[40,57],oper:[3,19,43,45,58,69,70,73],optim:[27,57],option:[18,53,54,59,61,64,66,67,68,69,76,82],order:13,organis:43,otc_backlog_expiration_interval_m:6,otc_coalescing_enough_coalesced_messag:6,otc_coalescing_strategi:6,otc_coalescing_window_u:6,other:[45,53,54,60,76],out:[0,72],outbound:57,outofmemoryerror:45,output:[61,204,205,207],overflow:211,overhead:54,overload:57,overview:[3,65],own:33,packag:[43,49],packet:220,page:[82,220],parallel:58,paramet:[13,65,66],parti:80,partit:[0,28,32],partition:6,password:77,patch:[37,42],path:57,pattern:24,pausehandoff:147,paxo:57,pend:59,per:0,perform:[43,44,54],periodic_commitlog_sync_lag_block_in_m:6,perl:47,permiss:20,permissions_update_interval_in_m:6,permissions_validity_in_m:6,phi_convict_threshold:6,php:47,physic:[0,25],pick:0,plai:72,plugin:[33,80],point:45,pom:35,pool:60,port:45,post:43,practic:76,prepar:[12,57],prepared_statements_cache_size_mb:6,prerequisit:[43,49],prevent:57,preview:58,primari:18,print:[208,210],process:43,product:50,profil:81,profileload:148,progress:[79,207],project:40,promot:43,properti:46,propos:57,protocol:57,proxyhistogram:149,publish:[36,43],python:47,pytz:82,qualiti:57,queri:[0,15,26,27,32,51,54,216,218,219],question:[37,45],queu:57,quorum:[59,75],rack:50,rang:[59,79],range_request_timeout_in_m:6,rangekeysampl:150,rate:216,raw:204,rdbm:27,read:[50,59,65,75],read_request_timeout_in_m:6,rebuild:151,rebuild_index:152,recommend:50,reconnect:57,record:0,recov:57,reduc:206,referenti:27,refin:28,refresh:153,refreshsizeestim:154,refus:45,regular:60,relat:32,releas:43,relevel:209,reliabl:220,reload:[61,77],reloadlocalschema:155,reloadse:156,reloadssl:157,reloadtrigg:158,relocatesst:159,remot:45,remov:[66,79],removenod:160,repair:[58,59,66,75,76,161,210],repair_admin:162,repair_session_space_in_mb:6,repaired_data_tracking_for_partition_reads_en:6,repaired_data_tracking_for_range_reads_en:6,replac:79,replai:54,replaybatchlog:163,replic:[0,45,59],replica:[0,58,59],report:[5,37,45,74],report_unconfirmed_repaired_data_mismatch:6,repositori:43,request:[57,72,74],request_timeout_in_m:6,requir:[33,35],reserv:[9,24,25],resetfullquerylog:164,resetlocalschema:165,resili:57,resolut:35,resourc:[57,220],restor:62,restrict:20,result:13,resum:79,resumehandoff:166,retriev:14,review:[37,39],revok:20,rewrit:213,rhel:45,right:42,ring:[0,45,167],role:[20,77],role_manag:6,roles_update_interval_in_m:6,roles_validity_in_m:6,roll:[53,54],row:204,row_cache_class_nam:6,row_cache_keys_to_sav:6,row_cache_save_period:6,row_cache_size_in_mb:6,rowcach:80,rpc_address:6,rpc_interfac:6,rpc_interface_prefer_ipv6:6,rpc_keepal:6,rpm:49,rubi:47,run:[44,54,209],runtim:[46,72],rust:47,safeti:6,sai:45,same:[45,62],sampl:61,saved_caches_directori:6,scala:47,scalabl:1,scalar:14,scale:0,schema:[29,32],script:210,scrub:[168,211],second:208,secondari:[1,16],secur:[20,77],see:45,seed:[33,45],seed_provid:6,select:[13,17,18],selector:13,send:43,serial:[57,82],server:33,server_encryption_opt:6,session:82,set:[20,22,33,40,45,53,54,60,62,210],setbatchlogreplaythrottl:169,setcachecapac:170,setcachekeystosav:171,setcompactionthreshold:172,setcompactionthroughput:173,setconcurr:174,setconcurrentcompactor:175,setconcurrentviewbuild:176,sethintedhandoffthrottlekb:177,setinterdcstreamthroughput:178,setlogginglevel:179,setmaxhintwindow:180,setstreamthroughput:181,settimeout:182,settraceprob:183,setup:[33,40],share:82,shell:82,show:[45,82,210],signatur:14,simpl:0,simplestrategi:[0,11],singl:[45,62,66,204],size:[28,57,212],sjk:184,skip:211,slack:[8,43],slow_query_log_timeout_in_m:6,small:212,snapshot:[62,64,185,207,212,213],snapshot_before_compact:6,snitch:[50,78],sort:27,sourc:[40,82],special:82,specif:20,specifi:[208,212],specul:59,speed:[45,207],sphinx:36,split:212,ssl:[77,207],ssl_storage_port:6,sstabl:[4,58,60,64,66,67,74,203,205,206,207,210,214],sstable_preemptive_open_interval_in_mb:6,sstabledump:204,sstableexpiredblock:205,sstablelevelreset:206,sstableload:[64,207],sstablemetadata:208,sstableofflinerelevel:209,sstablerepairedset:210,sstablescrub:211,sstablesplit:212,sstableupgrad:213,sstableutil:214,sstableverifi:215,stage:57,stai:45,standard:77,start:[37,40,42,48],start_native_transport:6,starv:67,state:[219,220],statement:[12,18,34,54],statu:[186,210,219],statusautocompact:187,statusbackup:188,statusbinari:189,statusgossip:190,statushandoff:191,stc:[67,68],step:[35,216],stop:192,stopdaemon:193,storag:[4,9,27,72,74],storage_port:6,store:[0,45],strategi:[0,66,67,68],stratio:80,stream:[45,58,74,79],stream_entire_sst:6,stream_throughput_outbound_megabits_per_sec:6,streaming_connections_per_host:6,streaming_keep_alive_period_in_sec:6,stress:[44,81],structur:[62,204],style:34,submit:37,sum:14,support:[17,56,59,81],sync:43,synchron:0,system:218,system_virtual_schema:60,tabl:[6,11,57,60,62,65,74,75,204,206,209,211,213],tablehistogram:194,tablestat:195,take:62,tarbal:49,target:64,task:60,temporari:214,term:12,test:[33,37,40,44],than:45,thei:45,third:80,though:45,thread:[60,220],threadpool:[74,219],threshold:6,throttl:207,throughput:220,time:[14,22,69,72],timeout:57,timestamp:[22,45,204],timeuuid:14,timewindowcompactionstrategi:69,todo:11,tojson:17,token:[0,14,50,79],tombston:66,tombstone_failure_threshold:6,tombstone_warn_threshold:6,tool:[30,44,64,83,203,220],top:[45,220],topic:43,toppartit:196,tpstat:197,trace:82,tracetype_query_ttl:6,tracetype_repair_ttl:6,transact:[1,81,214],transit:59,transparent_data_encryption_opt:6,trickle_fsync:6,trickle_fsync_interval_in_kb:6,trigger:[21,66],troubleshoot:[35,217],truncat:11,truncate_request_timeout_in_m:6,truncatehint:198,ttl:[13,66],tunabl:0,tupl:22,tweet:43,two:45,type:[9,17,22,35,58,62,66,74],udt:22,unabl:45,uniqu:58,unit:[37,40,44],unknown:57,unlog:13,unlogged_batch_across_partitions_warn_threshold:6,unrepair:[66,210],unsubscrib:45,updat:[13,35,37,43,45],upgradesst:199,upload:43,usag:[45,76,81,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,204,205,206,207,208,209,210,211,212,213,214,215,220],use:45,user:[14,20,22,37,53,81],using:[0,36,45,66],uuid:14,valid:211,valu:208,variabl:46,verif:215,verifi:200,version:[0,4,10,43,82,201,213],view:[18,32,53,54,61],viewbuildstatu:202,virtual:[57,60],vmtouch:220,vnode:0,vote:43,wait:43,warn:65,websit:43,welcom:52,what:[1,32,42,45,53,61],when:[45,57,58,66],where:13,whitespac:34,why:[45,66],window:69,windows_timer_interv:6,without:[66,211,212],work:[22,36,42],write:[1,59,72],write_request_timeout_in_m:6,writetim:13,yaml:[61,65],you:42,your:[33,43],zero:58}}) \ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/cassandra_stress.html b/src/doc/4.0-beta1/tools/cassandra_stress.html deleted file mode 100644 index fd18a64c4..000000000 --- a/src/doc/4.0-beta1/tools/cassandra_stress.html +++ /dev/null @@ -1,353 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Cassandra Stress" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Stress

-

cassandra-stress is a tool for benchmarking and load testing a Cassandra -cluster. cassandra-stress supports testing arbitrary CQL tables and queries -to allow users to benchmark their data model.

-

This documentation focuses on user mode as this allows the testing of your -actual schema.

-
-

Usage

-

There are several operation types:

-
-
    -
  • write-only, read-only, and mixed workloads of standard data
  • -
  • write-only and read-only workloads for counter columns
  • -
  • user configured workloads, running custom queries on custom schemas
  • -
-
-

The syntax is cassandra-stress <command> [options]. If you want more information on a given command -or options, just run cassandra-stress help <command|option>.

-
-
Commands:
-
-
read:
-
Multiple concurrent reads - the cluster must first be populated by a write test
-
write:
-
Multiple concurrent writes against the cluster
-
mixed:
-
Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test
-
counter_write:
-
Multiple concurrent updates of counters.
-
counter_read:
-
Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test.
-
user:
-
Interleaving of user provided queries, with configurable ratio and distribution.
-
help:
-
Print help for a command or option
-
print:
-
Inspect the output of a distribution definition
-
legacy:
-
Legacy support mode
-
-
-
Primary Options:
-
-
-pop:
-
Population distribution and intra-partition visit order
-
-insert:
-
Insert specific options relating to various methods for batching and splitting partition updates
-
-col:
-
Column details such as size and count distribution, data generator, names, comparator and if super columns should be used
-
-rate:
-
Thread count, rate limit or automatic mode (default is auto)
-
-mode:
-
Thrift or CQL with options
-
-errors:
-
How to handle errors when encountered during stress
-
-sample:
-
Specify the number of samples to collect for measuring latency
-
-schema:
-
Replication settings, compression, compaction, etc.
-
-node:
-
Nodes to connect to
-
-log:
-
Where to log progress to, and the interval at which to do it
-
-transport:
-
Custom transport factories
-
-port:
-
The port to connect to cassandra nodes on
-
-sendto:
-
Specify a stress server to send this command to
-
-graph:
-
Graph recorded metrics
-
-tokenrange:
-
Token range settings
-
-
-
Suboptions:
-
Every command and primary option has its own collection of suboptions. These are too numerous to list here. -For information on the suboptions for each command or option, please use the help command, -cassandra-stress help <command|option>.
-
-
-
-

User mode

-

User mode allows you to use your stress your own schemas. This can save time in -the long run rather than building an application and then realising your schema -doesn’t scale.

-
-

Profile

-

User mode requires a profile defined in YAML. -Multiple YAML files may be specified in which case operations in the ops argument are referenced as specname.opname.

-

An identifier for the profile:

-
specname: staff_activities
-
-
-

The keyspace for the test:

-
keyspace: staff
-
-
-

CQL for the keyspace. Optional if the keyspace already exists:

-
keyspace_definition: |
- CREATE KEYSPACE stresscql WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
-
-
-

The table to be stressed:

-
table: staff_activities
-
-
-

CQL for the table. Optional if the table already exists:

-
table_definition: |
-  CREATE TABLE staff_activities (
-      name text,
-      when timeuuid,
-      what text,
-      PRIMARY KEY(name, when, what)
-  )
-
-
-

Optional meta information on the generated columns in the above table. -The min and max only apply to text and blob types. -The distribution field represents the total unique population -distribution of that column across rows:

-
columnspec:
-  - name: name
-    size: uniform(5..10) # The names of the staff members are between 5-10 characters
-    population: uniform(1..10) # 10 possible staff members to pick from
-  - name: when
-    cluster: uniform(20..500) # Staff members do between 20 and 500 events
-  - name: what
-    size: normal(10..100,50)
-
-
-

Supported types are:

-

An exponential distribution over the range [min..max]:

-
EXP(min..max)
-
-
-

An extreme value (Weibull) distribution over the range [min..max]:

-
EXTREME(min..max,shape)
-
-
-

A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng:

-
GAUSSIAN(min..max,stdvrng)
-
-
-

A gaussian/normal distribution, with explicitly defined mean and stdev:

-
GAUSSIAN(min..max,mean,stdev)
-
-
-

A uniform distribution over the range [min, max]:

-
UNIFORM(min..max)
-
-
-

A fixed distribution, always returning the same value:

-
FIXED(val)
-
-
-

If preceded by ~, the distribution is inverted

-

Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1)

-

Insert distributions:

-
insert:
-  # How many partition to insert per batch
-  partitions: fixed(1)
-  # How many rows to update per partition
-  select: fixed(1)/500
-  # UNLOGGED or LOGGED batch for insert
-  batchtype: UNLOGGED
-
-
-

Currently all inserts are done inside batches.

-

Read statements to use during the test:

-
queries:
-   events:
-      cql: select *  from staff_activities where name = ?
-      fields: samerow
-   latest_event:
-      cql: select * from staff_activities where name = ?  LIMIT 1
-      fields: samerow
-
-
-

Running a user mode test:

-
cassandra-stress user profile=./example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" truncate=once
-
-
-

This will create the schema then run tests for 1 minute with an equal number of inserts, latest_event queries and events -queries. Additionally the table will be truncated once before the test.

-

The full example can be found here yaml

-
-
Running a user mode test with multiple yaml files::
-
cassandra-stress user profile=./example.yaml,./example2.yaml duration=1m “ops(ex1.insert=1,ex1.latest_event=1,ex2.insert=2)” truncate=once
-
This will run operations as specified in both the example.yaml and example2.yaml files. example.yaml and example2.yaml can reference the same table
-
although care must be taken that the table definition is identical (data generation specs can be different).
-
-
-
-

Lightweight transaction support

-

cassandra-stress supports lightweight transactions. In this it will first read current data from Cassandra and then uses read value(s) -to fulfill lightweight transaction condition(s).

-

Lightweight transaction update query:

-
queries:
-  regularupdate:
-      cql: update blogposts set author = ? where domain = ? and published_date = ?
-      fields: samerow
-  updatewithlwt:
-      cql: update blogposts set author = ? where domain = ? and published_date = ? IF body = ? AND url = ?
-      fields: samerow
-
-
-

The full example can be found here yaml

-
-
-
-

Graphing

-

Graphs can be generated for each run of stress.

-../_images/example-stress-graph.png -

To create a new graph:

-
cassandra-stress user profile=./stress-example.yaml "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph"
-
-
-

To add a new run to an existing graph point to an existing file and add a revision name:

-
cassandra-stress user profile=./stress-example.yaml duration=1m "ops(insert=1,latest_event=1,events=1)" -graph file=graph.html title="Awesome graph" revision="Second run"
-
-
-
-
-

FAQ

-

How do you use NetworkTopologyStrategy for the keyspace?

-

Use the schema option making sure to either escape the parenthesis or enclose in quotes:

-
cassandra-stress write -schema "replication(strategy=NetworkTopologyStrategy,datacenter1=3)"
-
-
-

How do you use SSL?

-

Use the transport option:

-
cassandra-stress "write n=100k cl=ONE no-warmup" -transport "truststore=$HOME/jks/truststore.jks truststore-password=cassandra"
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/cqlsh.html b/src/doc/4.0-beta1/tools/cqlsh.html deleted file mode 100644 index fdf4ddd96..000000000 --- a/src/doc/4.0-beta1/tools/cqlsh.html +++ /dev/null @@ -1,488 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "cqlsh: the CQL shell" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cqlsh: the CQL shell

-

cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped -with every Cassandra package, and can be found in the bin/ directory alongside the cassandra executable. cqlsh utilizes -the Python native protocol driver, and connects to the single node specified on the command line.

-
-

Compatibility

-

cqlsh is compatible with Python 2.7.

-

In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with. -In some cases, cqlsh make work with older or newer versions of Cassandra, but this is not officially supported.

-
-
-

Optional Dependencies

-

cqlsh ships with all essential dependencies. However, there are some optional dependencies that can be installed to -improve the capabilities of cqlsh.

-
-

pytz

-

By default, cqlsh displays all timestamps with a UTC timezone. To support display of timestamps with another timezone, -the pytz library must be installed. See the timezone option in cqlshrc for -specifying a timezone to use.

-
-
-

cython

-

The performance of cqlsh’s COPY operations can be improved by installing cython. This will -compile the python modules that are central to the performance of COPY.

-
-
-
-

cqlshrc

-

The cqlshrc file holds configuration options for cqlsh. By default this is in the user’s home directory at -~/.cassandra/cqlsh, but a custom location can be specified with the --cqlshrc option.

-

Example config values and documentation can be found in the conf/cqlshrc.sample file of a tarball installation. You -can also view the latest version of cqlshrc online.

-
-
-

Command Line Options

-

Usage:

-

cqlsh [options] [host [port]]

-

Options:

-
-
-C --color
-
Force color output
-
--no-color
-
Disable color output
-
--browser
-
Specify the browser to use for displaying cqlsh help. This can be one of the supported browser names (e.g. firefox) or a browser path followed by %s (e.g. -/usr/bin/google-chrome-stable %s).
-
--ssl
-
Use SSL when connecting to Cassandra
-
-u --user
-
Username to authenticate against Cassandra with
-
-p --password
-
Password to authenticate against Cassandra with, should -be used in conjunction with --user
-
-k --keyspace
-
Keyspace to authenticate to, should be used in conjunction -with --user
-
-f --file
-
Execute commands from the given file, then exit
-
--debug
-
Print additional debugging information
-
--encoding
-
Specify a non-default encoding for output (defaults to UTF-8)
-
--cqlshrc
-
Specify a non-default location for the cqlshrc file
-
-e --execute
-
Execute the given statement, then exit
-
--connect-timeout
-
Specify the connection timeout in seconds (defaults to 2s)
-
--python /path/to/python
-
Specify the full path to Python interpreter to override default on systems with multiple interpreters installed
-
--request-timeout
-
Specify the request timeout in seconds (defaults to 10s)
-
-t --tty
-
Force tty mode (command prompt)
-
-
-
-

Special Commands

-

In addition to supporting regular CQL statements, cqlsh also supports a number of special commands that are not part of -CQL. These are detailed below.

-
-

CONSISTENCY

-

Usage: CONSISTENCY <consistency level>

-

Sets the consistency level for operations to follow. Valid arguments include:

-
    -
  • ANY
  • -
  • ONE
  • -
  • TWO
  • -
  • THREE
  • -
  • QUORUM
  • -
  • ALL
  • -
  • LOCAL_QUORUM
  • -
  • LOCAL_ONE
  • -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-
-
-

SERIAL CONSISTENCY

-

Usage: SERIAL CONSISTENCY <consistency level>

-

Sets the serial consistency level for operations to follow. Valid arguments include:

-
    -
  • SERIAL
  • -
  • LOCAL_SERIAL
  • -
-

The serial consistency level is only used by conditional updates (INSERT, UPDATE and DELETE with an IF -condition). For those, the serial consistency level defines the consistency level of the serial phase (or “paxos” phase) -while the normal consistency level defines the consistency for the “learn” phase, i.e. what type of reads will be -guaranteed to see the update right away. For example, if a conditional write has a consistency level of QUORUM (and -is successful), then a QUORUM read is guaranteed to see that write. But if the regular consistency level of that -write is ANY, then only a read with a consistency level of SERIAL is guaranteed to see it (even a read with -consistency ALL is not guaranteed to be enough).

-
-
-

SHOW VERSION

-

Prints the cqlsh, Cassandra, CQL, and native protocol versions in use. Example:

-
cqlsh> SHOW VERSION
-[cqlsh 5.0.1 | Cassandra 3.8 | CQL spec 3.4.2 | Native protocol v4]
-
-
-
-
-

SHOW HOST

-

Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name. -Example:

-
cqlsh> SHOW HOST
-Connected to Prod_Cluster at 192.0.0.1:9042.
-
-
-
-
-

SHOW SESSION

-

Pretty prints a specific tracing session.

-

Usage: SHOW SESSION <session id>

-

Example usage:

-
cqlsh> SHOW SESSION 95ac6470-327e-11e6-beca-dfb660d92ad8
-
-Tracing session: 95ac6470-327e-11e6-beca-dfb660d92ad8
-
- activity                                                  | timestamp                  | source    | source_elapsed | client
------------------------------------------------------------+----------------------------+-----------+----------------+-----------
-                                        Execute CQL3 query | 2016-06-14 17:23:13.979000 | 127.0.0.1 |              0 | 127.0.0.1
- Parsing SELECT * FROM system.local; [SharedPool-Worker-1] | 2016-06-14 17:23:13.982000 | 127.0.0.1 |           3843 | 127.0.0.1
-...
-
-
-
-
-

SOURCE

-

Reads the contents of a file and executes each line as a CQL statement or special cqlsh command.

-

Usage: SOURCE <string filename>

-

Example usage:

-
cqlsh> SOURCE '/home/thobbs/commands.cql'
-
-
-
-
-

CAPTURE

-

Begins capturing command output and appending it to a specified file. Output will not be shown at the console while it -is captured.

-

Usage:

-
CAPTURE '<file>';
-CAPTURE OFF;
-CAPTURE;
-
-
-

That is, the path to the file to be appended to must be given inside a string literal. The path is interpreted relative -to the current working directory. The tilde shorthand notation ('~/mydir') is supported for referring to $HOME.

-

Only query result output is captured. Errors and output from cqlsh-only commands will still be shown in the cqlsh -session.

-

To stop capturing output and show it in the cqlsh session again, use CAPTURE OFF.

-

To inspect the current capture configuration, use CAPTURE with no arguments.

-
-
-

HELP

-

Gives information about cqlsh commands. To see available topics, enter HELP without any arguments. To see help on a -topic, use HELP <topic>. Also see the --browser argument for controlling what browser is used to display help.

-
-
-

TRACING

-

Enables or disables tracing for queries. When tracing is enabled, once a query completes, a trace of the events during -the query will be printed.

-

Usage:

-
TRACING ON
-TRACING OFF
-
-
-
-
-

PAGING

-

Enables paging, disables paging, or sets the page size for read queries. When paging is enabled, only one page of data -will be fetched at a time and a prompt will appear to fetch the next page. Generally, it’s a good idea to leave paging -enabled in an interactive session to avoid fetching and printing large amounts of data at once.

-

Usage:

-
PAGING ON
-PAGING OFF
-PAGING <page size in rows>
-
-
-
-
-

EXPAND

-

Enables or disables vertical printing of rows. Enabling EXPAND is useful when many columns are fetched, or the -contents of a single column are large.

-

Usage:

-
EXPAND ON
-EXPAND OFF
-
-
-
-
-

LOGIN

-

Authenticate as a specified Cassandra user for the current session.

-

Usage:

-
LOGIN <username> [<password>]
-
-
-
-
-

EXIT

-

Ends the current session and terminates the cqlsh process.

-

Usage:

-
EXIT
-QUIT
-
-
-
-
-

CLEAR

-

Clears the console.

-

Usage:

-
CLEAR
-CLS
-
-
-
-
-

DESCRIBE

-

Prints a description (typically a series of DDL statements) of a schema element or the cluster. This is useful for -dumping all or portions of the schema.

-

Usage:

-
DESCRIBE CLUSTER
-DESCRIBE SCHEMA
-DESCRIBE KEYSPACES
-DESCRIBE KEYSPACE <keyspace name>
-DESCRIBE TABLES
-DESCRIBE TABLE <table name>
-DESCRIBE INDEX <index name>
-DESCRIBE MATERIALIZED VIEW <view name>
-DESCRIBE TYPES
-DESCRIBE TYPE <type name>
-DESCRIBE FUNCTIONS
-DESCRIBE FUNCTION <function name>
-DESCRIBE AGGREGATES
-DESCRIBE AGGREGATE <aggregate function name>
-
-
-

In any of the commands, DESC may be used in place of DESCRIBE.

-

The DESCRIBE CLUSTER command prints the cluster name and partitioner:

-
cqlsh> DESCRIBE CLUSTER
-
-Cluster: Test Cluster
-Partitioner: Murmur3Partitioner
-
-
-

The DESCRIBE SCHEMA command prints the DDL statements needed to recreate the entire schema. This is especially -useful for dumping the schema in order to clone a cluster or restore from a backup.

-
-
-

COPY TO

-

Copies data from a table to a CSV file.

-

Usage:

-
COPY <table name> [(<column>, ...)] TO <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the table will be copied to the CSV file. A subset of columns to copy may -be specified by adding a comma-separated list of column names surrounded by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the destination file. This -can also the special value STDOUT (without single quotes) to print the CSV to stdout.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
MAXREQUESTS
-
The maximum number token ranges to fetch simultaneously. Defaults to 6.
-
PAGESIZE
-
The number of rows to fetch in a single page. Defaults to 1000.
-
PAGETIMEOUT
-
By default the page timeout is 10 seconds per 1000 entries -in the page size or 10 seconds if pagesize is smaller.
-
BEGINTOKEN, ENDTOKEN
-
Token range to export. Defaults to exporting the full ring.
-
MAXOUTPUTSIZE
-
The maximum size of the output file measured in number of lines; -beyond this maximum the output file will be split into segments. --1 means unlimited, and is the default.
-
ENCODING
-
The encoding used for characters. Defaults to utf8.
-
-
-
-
-

COPY FROM

-

Copies data from a CSV file to table.

-

Usage:

-
COPY <table name> [(<column>, ...)] FROM <file name> WITH <copy option> [AND <copy option> ...]
-
-
-

If no columns are specified, all columns from the CSV file will be copied to the table. A subset -of columns to copy may be specified by adding a comma-separated list of column names surrounded -by parenthesis after the table name.

-

The <file name> should be a string literal (with single quotes) representing a path to the -source file. This can also the special value STDIN (without single quotes) to read the -CSV data from stdin.

-

See Shared COPY Options for options that apply to both COPY TO and COPY FROM.

-
-

Options for COPY TO

-
-
INGESTRATE
-
The maximum number of rows to process per second. Defaults to 100000.
-
MAXROWS
-
The maximum number of rows to import. -1 means unlimited, and is the default.
-
SKIPROWS
-
A number of initial rows to skip. Defaults to 0.
-
SKIPCOLS
-
A comma-separated list of column names to ignore. By default, no columns are skipped.
-
MAXPARSEERRORS
-
The maximum global number of parsing errors to ignore. -1 means unlimited, and is the default.
-
MAXINSERTERRORS
-
The maximum global number of insert errors to ignore. -1 means unlimited. The default is 1000.
-
ERRFILE =
-
A file to store all rows that could not be imported, by default this is import_<ks>_<table>.err where <ks> is -your keyspace and <table> is your table name.
-
MAXBATCHSIZE
-
The max number of rows inserted in a single batch. Defaults to 20.
-
MINBATCHSIZE
-
The min number of rows inserted in a single batch. Defaults to 2.
-
CHUNKSIZE
-
The number of rows that are passed to child worker processes from the main process at a time. Defaults to 1000.
-
-
-
-

Shared COPY Options

-

Options that are common to both COPY TO and COPY FROM.

-
-
NULLVAL
-
The string placeholder for null values. Defaults to null.
-
HEADER
-
For COPY TO, controls whether the first line in the CSV output file will contain the column names. For COPY FROM, -specifies whether the first line in the CSV input file contains column names. Defaults to false.
-
DECIMALSEP
-
The character that is used as the decimal point separator. Defaults to ..
-
THOUSANDSSEP
-
The character that is used to separate thousands. Defaults to the empty string.
-
BOOLSTYlE
-
The string literal format for boolean values. Defaults to True,False.
-
NUMPROCESSES
-
The number of child worker processes to create for COPY tasks. Defaults to a max of 4 for COPY FROM and 16 -for COPY TO. However, at most (num_cores - 1) processes will be created.
-
MAXATTEMPTS
-
The maximum number of failed attempts to fetch a range of data (when using COPY TO) or insert a chunk of data -(when using COPY FROM) before giving up. Defaults to 5.
-
REPORTFREQUENCY
-
How often status updates are refreshed, in seconds. Defaults to 0.25.
-
RATEFILE
-
An optional file to output rate statistics to. By default, statistics are not output to a file.
-
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/index.html b/src/doc/4.0-beta1/tools/index.html deleted file mode 100644 index bf26ae541..000000000 --- a/src/doc/4.0-beta1/tools/index.html +++ /dev/null @@ -1,258 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Cassandra Tools" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Tools

-

This section describes the command line tools provided with Apache Cassandra.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/assassinate.html b/src/doc/4.0-beta1/tools/nodetool/assassinate.html deleted file mode 100644 index 129101667..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/assassinate.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "assassinate" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

assassinate

-
-
-

Usage

-
NAME
-        nodetool assassinate - Forcefully remove a dead node without
-        re-replicating any data. Use as a last resort if you cannot removenode
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] assassinate [--] <ip_address>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <ip_address>
-            IP address of the endpoint to assassinate
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/bootstrap.html b/src/doc/4.0-beta1/tools/nodetool/bootstrap.html deleted file mode 100644 index 871e21445..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/bootstrap.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "bootstrap" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

bootstrap

-
-
-

Usage

-
NAME
-        nodetool bootstrap - Monitor/manage node's bootstrap process
-
-SYNOPSIS
-        nodetool bootstrap
-        nodetool [(-u <username> | --username <username>)]
-                [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-pp | --print-port)] bootstrap resume
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-COMMANDS
-        With no arguments, Display help information
-
-        resume
-            Resume bootstrap streaming
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/cleanup.html b/src/doc/4.0-beta1/tools/nodetool/cleanup.html deleted file mode 100644 index 1f6da4add..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/cleanup.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "cleanup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

cleanup

-
-
-

Usage

-
NAME
-        nodetool cleanup - Triggers the immediate cleanup of keys no longer
-        belonging to a node. By default, clean all keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] cleanup
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/clearsnapshot.html b/src/doc/4.0-beta1/tools/nodetool/clearsnapshot.html deleted file mode 100644 index 36281efd4..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/clearsnapshot.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clearsnapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clearsnapshot

-
-
-

Usage

-
NAME
-        nodetool clearsnapshot - Remove the snapshot with the given name from
-        the given keyspaces. If no snapshotName is specified we will remove all
-        snapshots
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clearsnapshot [--all]
-                [-t <snapshot_name>] [--] [<keyspaces>...]
-
-OPTIONS
-        --all
-            Removes all snapshots
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -t <snapshot_name>
-            Remove the snapshot with a given name
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces>...]
-            Remove snapshots from the given keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/clientstats.html b/src/doc/4.0-beta1/tools/nodetool/clientstats.html deleted file mode 100644 index 842ff67c2..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/clientstats.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "clientstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

clientstats

-
-
-

Usage

-
NAME
-        nodetool clientstats - Print information about connected clients
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] clientstats [--all]
-                [--by-protocol] [--clear-history]
-
-OPTIONS
-        --all
-            Lists all connections
-
-        --by-protocol
-            Lists most recent client connections by protocol version
-
-        --clear-history
-            Clear the history of connected clients
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/compact.html b/src/doc/4.0-beta1/tools/nodetool/compact.html deleted file mode 100644 index 8bfb591fc..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/compact.html +++ /dev/null @@ -1,151 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compact" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compact

-
-
-

Usage

-
NAME
-        nodetool compact - Force a (major) compaction on one or more tables or
-        user-defined compaction on given SSTables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compact
-                [(-et <end_token> | --end-token <end_token>)] [(-s | --split-output)]
-                [(-st <start_token> | --start-token <start_token>)] [--user-defined]
-                [--] [<keyspace> <tables>...] or <SSTable file>...
-
-OPTIONS
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which compaction range ends
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s, --split-output
-            Use -s to not create a single big file
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the compaction range starts
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --user-defined
-            Use --user-defined to submit listed files for user-defined
-            compaction
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...] or <SSTable file>...
-            The keyspace followed by one or many tables or list of SSTable data
-            files when using --user-defined
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/compactionhistory.html b/src/doc/4.0-beta1/tools/nodetool/compactionhistory.html deleted file mode 100644 index 0a917302f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/compactionhistory.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionhistory" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionhistory

-
-
-

Usage

-
NAME
-        nodetool compactionhistory - Print history of compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionhistory
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/compactionstats.html b/src/doc/4.0-beta1/tools/nodetool/compactionstats.html deleted file mode 100644 index 123219a00..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/compactionstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "compactionstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

compactionstats

-
-
-

Usage

-
NAME
-        nodetool compactionstats - Print statistics on compactions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] compactionstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/decommission.html b/src/doc/4.0-beta1/tools/nodetool/decommission.html deleted file mode 100644 index c644ea9bf..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/decommission.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "decommission" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

decommission

-
-
-

Usage

-
NAME
-        nodetool decommission - Decommission the *node I am connecting to*
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] decommission [(-f | --force)]
-
-OPTIONS
-        -f, --force
-            Force decommission of this node even when it reduces the number of
-            replicas to below configured RF
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/describecluster.html b/src/doc/4.0-beta1/tools/nodetool/describecluster.html deleted file mode 100644 index 2cd9950ff..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/describecluster.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describecluster" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describecluster

-
-
-

Usage

-
NAME
-        nodetool describecluster - Print the name, snitch, partitioner and
-        schema version of a cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describecluster
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/describering.html b/src/doc/4.0-beta1/tools/nodetool/describering.html deleted file mode 100644 index 6c0b85624..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/describering.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "describering" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

describering

-
-
-

Usage

-
NAME
-        nodetool describering - Shows the token ranges info of a given keyspace
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] describering [--] <keyspace>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disableauditlog.html b/src/doc/4.0-beta1/tools/nodetool/disableauditlog.html deleted file mode 100644 index a0518daf5..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disableauditlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableauditlog

-
-
-

Usage

-
NAME
-        nodetool disableauditlog - Disable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableauditlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disableautocompaction.html b/src/doc/4.0-beta1/tools/nodetool/disableautocompaction.html deleted file mode 100644 index ac8d0c616..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableautocompaction

-
-
-

Usage

-
NAME
-        nodetool disableautocompaction - Disable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablebackup.html b/src/doc/4.0-beta1/tools/nodetool/disablebackup.html deleted file mode 100644 index 54231143d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebackup

-
-
-

Usage

-
NAME
-        nodetool disablebackup - Disable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablebinary.html b/src/doc/4.0-beta1/tools/nodetool/disablebinary.html deleted file mode 100644 index 5cc350f33..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablebinary

-
-
-

Usage

-
NAME
-        nodetool disablebinary - Disable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablefullquerylog.html b/src/doc/4.0-beta1/tools/nodetool/disablefullquerylog.html deleted file mode 100644 index 472d94fb3..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablefullquerylog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool disablefullquerylog - Disable the full query log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablefullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablegossip.html b/src/doc/4.0-beta1/tools/nodetool/disablegossip.html deleted file mode 100644 index 25885bb0c..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablegossip.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablegossip

-
-
-

Usage

-
NAME
-        nodetool disablegossip - Disable gossip (effectively marking the node
-        down)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablehandoff.html b/src/doc/4.0-beta1/tools/nodetool/disablehandoff.html deleted file mode 100644 index 7c170351f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehandoff

-
-
-

Usage

-
NAME
-        nodetool disablehandoff - Disable storing hinted handoffs
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disablehintsfordc.html b/src/doc/4.0-beta1/tools/nodetool/disablehintsfordc.html deleted file mode 100644 index f0f80ca9b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disablehintsfordc.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool disablehintsfordc - Disable hints for a data center
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/disableoldprotocolversions.html b/src/doc/4.0-beta1/tools/nodetool/disableoldprotocolversions.html deleted file mode 100644 index 04c57706c..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/disableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "disableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

disableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool disableoldprotocolversions - Disable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] disableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/drain.html b/src/doc/4.0-beta1/tools/nodetool/drain.html deleted file mode 100644 index 7c81caf5e..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/drain.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "drain" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

drain

-
-
-

Usage

-
NAME
-        nodetool drain - Drain the node (stop accepting writes and flush all
-        tables)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] drain
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enableauditlog.html b/src/doc/4.0-beta1/tools/nodetool/enableauditlog.html deleted file mode 100644 index 69fb5d8f0..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enableauditlog.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableauditlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableauditlog

-
-
-

Usage

-
NAME
-        nodetool enableauditlog - Enable the audit log
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableauditlog
-                [--excluded-categories <excluded_categories>]
-                [--excluded-keyspaces <excluded_keyspaces>]
-                [--excluded-users <excluded_users>]
-                [--included-categories <included_categories>]
-                [--included-keyspaces <included_keyspaces>]
-                [--included-users <included_users>] [--logger <logger>]
-
-OPTIONS
-        --excluded-categories <excluded_categories>
-            Comma separated list of Audit Log Categories to be excluded for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --excluded-keyspaces <excluded_keyspaces>
-            Comma separated list of keyspaces to be excluded for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --excluded-users <excluded_users>
-            Comma separated list of users to be excluded for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --included-categories <included_categories>
-            Comma separated list of Audit Log Categories to be included for
-            audit log. If not set the value from cassandra.yaml will be used
-
-        --included-keyspaces <included_keyspaces>
-            Comma separated list of keyspaces to be included for audit log. If
-            not set the value from cassandra.yaml will be used
-
-        --included-users <included_users>
-            Comma separated list of users to be included for audit log. If not
-            set the value from cassandra.yaml will be used
-
-        --logger <logger>
-            Logger name to be used for AuditLogging. Default BinAuditLogger. If
-            not set the value from cassandra.yaml will be used
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enableautocompaction.html b/src/doc/4.0-beta1/tools/nodetool/enableautocompaction.html deleted file mode 100644 index 3fdc87952..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enableautocompaction.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableautocompaction

-
-
-

Usage

-
NAME
-        nodetool enableautocompaction - Enable autocompaction for the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableautocompaction [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablebackup.html b/src/doc/4.0-beta1/tools/nodetool/enablebackup.html deleted file mode 100644 index 51ce36b54..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablebackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebackup

-
-
-

Usage

-
NAME
-        nodetool enablebackup - Enable incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablebinary.html b/src/doc/4.0-beta1/tools/nodetool/enablebinary.html deleted file mode 100644 index ecb785dd5..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablebinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablebinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablebinary

-
-
-

Usage

-
NAME
-        nodetool enablebinary - Reenable native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablebinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablefullquerylog.html b/src/doc/4.0-beta1/tools/nodetool/enablefullquerylog.html deleted file mode 100644 index 6c87ade5f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablefullquerylog.html +++ /dev/null @@ -1,156 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablefullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablefullquerylog

-
-
-

Usage

-
NAME
-        nodetool enablefullquerylog - Enable full query logging, defaults for
-        the options are configured in cassandra.yaml
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablefullquerylog
-                [--archive-command <archive_command>] [--blocking]
-                [--max-archive-retries <archive_retries>]
-                [--max-log-size <max_log_size>] [--max-queue-weight <max_queue_weight>]
-                [--path <path>] [--roll-cycle <roll_cycle>]
-
-OPTIONS
-        --archive-command <archive_command>
-            Command that will handle archiving rolled full query log files.
-            Format is "/path/to/script.sh %path" where %path will be replaced
-            with the file to archive
-
-        --blocking
-            If the queue is full whether to block producers or drop samples.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        --max-archive-retries <archive_retries>
-            Max number of archive retries.
-
-        --max-log-size <max_log_size>
-            How many bytes of log data to store before dropping segments. Might
-            not be respected if a log file hasn't rolled so it can be deleted.
-
-        --max-queue-weight <max_queue_weight>
-            Maximum number of bytes of query data to queue to disk before
-            blocking or dropping samples.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        --path <path>
-            Path to store the full query log at. Will have it's contents
-            recursively deleted.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        --roll-cycle <roll_cycle>
-            How often to roll the log file (MINUTELY, HOURLY, DAILY).
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablegossip.html b/src/doc/4.0-beta1/tools/nodetool/enablegossip.html deleted file mode 100644 index 189592fc9..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablegossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablegossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablegossip

-
-
-

Usage

-
NAME
-        nodetool enablegossip - Reenable gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablegossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablehandoff.html b/src/doc/4.0-beta1/tools/nodetool/enablehandoff.html deleted file mode 100644 index 962fb118b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablehandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehandoff

-
-
-

Usage

-
NAME
-        nodetool enablehandoff - Reenable future hints storing on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enablehintsfordc.html b/src/doc/4.0-beta1/tools/nodetool/enablehintsfordc.html deleted file mode 100644 index 714b77073..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enablehintsfordc.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enablehintsfordc" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enablehintsfordc

-
-
-

Usage

-
NAME
-        nodetool enablehintsfordc - Enable hints for a data center that was
-        previsouly disabled
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enablehintsfordc [--]
-                <datacenter>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <datacenter>
-            The data center to enable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/enableoldprotocolversions.html b/src/doc/4.0-beta1/tools/nodetool/enableoldprotocolversions.html deleted file mode 100644 index 740fa7b5e..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/enableoldprotocolversions.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "enableoldprotocolversions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

enableoldprotocolversions

-
-
-

Usage

-
NAME
-        nodetool enableoldprotocolversions - Enable old protocol versions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] enableoldprotocolversions
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/failuredetector.html b/src/doc/4.0-beta1/tools/nodetool/failuredetector.html deleted file mode 100644 index 52e99ebf0..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/failuredetector.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "failuredetector" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

failuredetector

-
-
-

Usage

-
NAME
-        nodetool failuredetector - Shows the failure detector information for
-        the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] failuredetector
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/flush.html b/src/doc/4.0-beta1/tools/nodetool/flush.html deleted file mode 100644 index fac3bc4ad..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/flush.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "flush" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

flush

-
-
-

Usage

-
NAME
-        nodetool flush - Flush one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] flush [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/garbagecollect.html b/src/doc/4.0-beta1/tools/nodetool/garbagecollect.html deleted file mode 100644 index d8ea2924e..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/garbagecollect.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "garbagecollect" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

garbagecollect

-
-
-

Usage

-
NAME
-        nodetool garbagecollect - Remove deleted data from one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] garbagecollect
-                [(-g <granularity> | --granularity <granularity>)]
-                [(-j <jobs> | --jobs <jobs>)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -g <granularity>, --granularity <granularity>
-            Granularity of garbage removal. ROW (default) removes deleted
-            partitions and rows, CELL also removes overwritten or deleted cells.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to cleanup simultanously, set to 0 to use all
-            available compaction threads. Defaults to 1 so that collections of
-            newer tables can see the data is deleted and also remove tombstones.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/gcstats.html b/src/doc/4.0-beta1/tools/nodetool/gcstats.html deleted file mode 100644 index 2a668f1da..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/gcstats.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gcstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gcstats

-
-
-

Usage

-
NAME
-        nodetool gcstats - Print GC Statistics
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gcstats
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getbatchlogreplaythrottle.html b/src/doc/4.0-beta1/tools/nodetool/getbatchlogreplaythrottle.html deleted file mode 100644 index c677ecbc0..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getbatchlogreplaythrottle.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool getbatchlogreplaythrottle - Print batchlog replay throttle in
-        KB/s. This is reduced proportionally to the number of nodes in the
-        cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getbatchlogreplaythrottle
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getcompactionthreshold.html b/src/doc/4.0-beta1/tools/nodetool/getcompactionthreshold.html deleted file mode 100644 index 6e71a61cf..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool getcompactionthreshold - Print min and max compaction
-        thresholds for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthreshold [--]
-                <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace with a table
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getcompactionthroughput.html b/src/doc/4.0-beta1/tools/nodetool/getcompactionthroughput.html deleted file mode 100644 index 50fbd5e08..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getcompactionthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool getcompactionthroughput - Print the MB/s throughput cap for
-        compaction in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getcompactionthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getconcurrency.html b/src/doc/4.0-beta1/tools/nodetool/getconcurrency.html deleted file mode 100644 index 17ff67113..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getconcurrency.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrency

-
-
-

Usage

-
NAME
-        nodetool getconcurrency - Get maximum concurrency for processing stages
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrency [--]
-                [stage-names]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [stage-names]
-            optional list of stage names, otherwise display all stages
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getconcurrentcompactors.html b/src/doc/4.0-beta1/tools/nodetool/getconcurrentcompactors.html deleted file mode 100644 index c5ce81079..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getconcurrentcompactors.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool getconcurrentcompactors - Get the number of concurrent
-        compactors in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentcompactors
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getconcurrentviewbuilders.html b/src/doc/4.0-beta1/tools/nodetool/getconcurrentviewbuilders.html deleted file mode 100644 index 5ba8f10aa..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getconcurrentviewbuilders.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool getconcurrentviewbuilders - Get the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getconcurrentviewbuilders
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getendpoints.html b/src/doc/4.0-beta1/tools/nodetool/getendpoints.html deleted file mode 100644 index 3518e1fb7..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getendpoints.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getendpoints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getendpoints

-
-
-

Usage

-
NAME
-        nodetool getendpoints - Print the end points that owns the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getendpoints [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find the endpoint
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getinterdcstreamthroughput.html b/src/doc/4.0-beta1/tools/nodetool/getinterdcstreamthroughput.html deleted file mode 100644 index 85de27b3a..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getinterdcstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getinterdcstreamthroughput - Print the Mb/s throughput cap for
-        inter-datacenter streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getinterdcstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getlogginglevels.html b/src/doc/4.0-beta1/tools/nodetool/getlogginglevels.html deleted file mode 100644 index ce37e44cc..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getlogginglevels.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getlogginglevels" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getlogginglevels

-
-
-

Usage

-
NAME
-        nodetool getlogginglevels - Get the runtime logging levels
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getlogginglevels
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getmaxhintwindow.html b/src/doc/4.0-beta1/tools/nodetool/getmaxhintwindow.html deleted file mode 100644 index bd16523e4..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getmaxhintwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool getmaxhintwindow - Print the max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getmaxhintwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getreplicas.html b/src/doc/4.0-beta1/tools/nodetool/getreplicas.html deleted file mode 100644 index 750e4f896..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getreplicas.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getreplicas" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getreplicas

-
-
-

Usage

-
NAME
-        nodetool getreplicas - Print replicas for a given key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getreplicas [--] <keyspace>
-                <table> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <key>
-            The keyspace, the table, and the partition key for which we need to
-            find replicas
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getseeds.html b/src/doc/4.0-beta1/tools/nodetool/getseeds.html deleted file mode 100644 index 153e8e1db..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getseeds

-
-
-

Usage

-
NAME
-        nodetool getseeds - Get the currently in use seed node IP list excluding
-        the node IP
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getsstables.html b/src/doc/4.0-beta1/tools/nodetool/getsstables.html deleted file mode 100644 index 6b88a5286..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getsstables.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getsstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getsstables

-
-
-

Usage

-
NAME
-        nodetool getsstables - Print the sstable filenames that own the key
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getsstables
-                [(-hf | --hex-format)] [--] <keyspace> <cfname> <key>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hf, --hex-format
-            Specify the key in hexadecimal string format
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <key>
-            The keyspace, the column family, and the key
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/getstreamthroughput.html b/src/doc/4.0-beta1/tools/nodetool/getstreamthroughput.html deleted file mode 100644 index def6eadff..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/getstreamthroughput.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "getstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

getstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool getstreamthroughput - Print the Mb/s throughput cap for
-        streaming in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] getstreamthroughput
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/gettimeout.html b/src/doc/4.0-beta1/tools/nodetool/gettimeout.html deleted file mode 100644 index 483ca1726..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/gettimeout.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettimeout

-
-
-

Usage

-
NAME
-        nodetool gettimeout - Print the timeout of the given type in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettimeout [--] <timeout_type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type>
-            The timeout type, one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/gettraceprobability.html b/src/doc/4.0-beta1/tools/nodetool/gettraceprobability.html deleted file mode 100644 index 408c10651..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/gettraceprobability.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gettraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gettraceprobability

-
-
-

Usage

-
NAME
-        nodetool gettraceprobability - Print the current trace probability value
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gettraceprobability
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/gossipinfo.html b/src/doc/4.0-beta1/tools/nodetool/gossipinfo.html deleted file mode 100644 index 72527fbf9..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/gossipinfo.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "gossipinfo" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

gossipinfo

-
-
-

Usage

-
NAME
-        nodetool gossipinfo - Shows the gossip information for the cluster
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] gossipinfo
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/handoffwindow.html b/src/doc/4.0-beta1/tools/nodetool/handoffwindow.html deleted file mode 100644 index 342c9131c..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/handoffwindow.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "handoffwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

handoffwindow

-
-
-

Usage

-
NAME
-        nodetool handoffwindow - Print current hinted handoff window
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] handoffwindow
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/help.html b/src/doc/4.0-beta1/tools/nodetool/help.html deleted file mode 100644 index a98dbcb2b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/help.html +++ /dev/null @@ -1,112 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "help" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

help

-
-
-

Usage

-
NAME
-        nodetool help - Display help information
-
-SYNOPSIS
-        nodetool help [--] [<command>...]
-
-OPTIONS
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <command>
-
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/import.html b/src/doc/4.0-beta1/tools/nodetool/import.html deleted file mode 100644 index b7c97a37d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/import.html +++ /dev/null @@ -1,160 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "import" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

import

-
-
-

Usage

-
NAME
-        nodetool import - Import new SSTables to the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] import
-                [(-c | --no-invalidate-caches)] [(-e | --extended-verify)]
-                [(-l | --keep-level)] [(-q | --quick)] [(-r | --keep-repaired)]
-                [(-t | --no-tokens)] [(-v | --no-verify)] [--] <keyspace> <table>
-                <directory> ...
-
-OPTIONS
-        -c, --no-invalidate-caches
-            Don't invalidate the row cache when importing
-
-        -e, --extended-verify
-            Run an extended verify, verifying all values in the new sstables
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --keep-level
-            Keep the level on the new sstables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick import without verifying sstables, clearing row cache or
-            checking in which data directory to put the file
-
-        -r, --keep-repaired
-            Keep any repaired information from the sstables
-
-        -t, --no-tokens
-            Don't verify that all tokens in the new sstable are owned by the
-            current node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -v, --no-verify
-            Don't verify new sstables
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <directory> ...
-            The keyspace, table name and directories to import sstables from
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/info.html b/src/doc/4.0-beta1/tools/nodetool/info.html deleted file mode 100644 index 59c2e1e36..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/info.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "info" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

info

-
-
-

Usage

-
NAME
-        nodetool info - Print node information (uptime, load, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] info [(-T | --tokens)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -T, --tokens
-            Display all tokens
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/invalidatecountercache.html b/src/doc/4.0-beta1/tools/nodetool/invalidatecountercache.html deleted file mode 100644 index c2cb1f2c7..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/invalidatecountercache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatecountercache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatecountercache

-
-
-

Usage

-
NAME
-        nodetool invalidatecountercache - Invalidate the counter cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatecountercache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/invalidatekeycache.html b/src/doc/4.0-beta1/tools/nodetool/invalidatekeycache.html deleted file mode 100644 index 90c97f8ce..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/invalidatekeycache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidatekeycache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidatekeycache

-
-
-

Usage

-
NAME
-        nodetool invalidatekeycache - Invalidate the key cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidatekeycache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/invalidaterowcache.html b/src/doc/4.0-beta1/tools/nodetool/invalidaterowcache.html deleted file mode 100644 index 865657926..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/invalidaterowcache.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "invalidaterowcache" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

invalidaterowcache

-
-
-

Usage

-
NAME
-        nodetool invalidaterowcache - Invalidate the row cache
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] invalidaterowcache
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/join.html b/src/doc/4.0-beta1/tools/nodetool/join.html deleted file mode 100644 index 1297fc33a..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/join.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "join" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

join

-
-
-

Usage

-
NAME
-        nodetool join - Join the ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] join
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/listsnapshots.html b/src/doc/4.0-beta1/tools/nodetool/listsnapshots.html deleted file mode 100644 index 32706830c..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/listsnapshots.html +++ /dev/null @@ -1,128 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "listsnapshots" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

listsnapshots

-
-
-

Usage

-
NAME
-        nodetool listsnapshots - Lists all the snapshots along with the size on
-        disk and true size. True size is the total size of all SSTables which
-        are not backed up to disk. Size on disk is total size of the snapshot on
-        disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] listsnapshots
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/move.html b/src/doc/4.0-beta1/tools/nodetool/move.html deleted file mode 100644 index d2af83fca..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/move.html +++ /dev/null @@ -1,133 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "move" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

move

-
-
-

Usage

-
NAME
-        nodetool move - Move node on the token ring to a new token
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] move [--] <new token>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <new token>
-            The new token.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/netstats.html b/src/doc/4.0-beta1/tools/nodetool/netstats.html deleted file mode 100644 index 9c16b09ac..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/netstats.html +++ /dev/null @@ -1,130 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "netstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

netstats

-
-
-

Usage

-
NAME
-        nodetool netstats - Print network information on provided host
-        (connecting node by default)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] netstats
-                [(-H | --human-readable)]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/nodetool.html b/src/doc/4.0-beta1/tools/nodetool/nodetool.html deleted file mode 100644 index a4fcc939d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/nodetool.html +++ /dev/null @@ -1,246 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Nodetool

-
-
-

Usage

-
-
usage: nodetool [(-u <username> | –username <username>)]
-
[(-h <host> | –host <host>)] [(-p <port> | –port <port>)] -[(-pw <password> | –password <password>)] -[(-pwf <passwordFilePath> | –password-file <passwordFilePath>)] -[(-pp | –print-port)] <command> [<args>]
-
-

The most commonly used nodetool commands are:

-
-

assassinate - Forcefully remove a dead node without re-replicating any data. Use as a last resort if you cannot removenode

-

bootstrap - Monitor/manage node’s bootstrap process

-

cleanup - Triggers the immediate cleanup of keys no longer belonging to a node. By default, clean all keyspaces

-

clearsnapshot - Remove the snapshot with the given name from the given keyspaces. If no snapshotName is specified we will remove all snapshots

-

clientstats - Print information about connected clients

-

compact - Force a (major) compaction on one or more tables or user-defined compaction on given SSTables

-

compactionhistory - Print history of compaction

-

compactionstats - Print statistics on compactions

-

decommission - Decommission the node I am connecting to

-

describecluster - Print the name, snitch, partitioner and schema version of a cluster

-

describering - Shows the token ranges info of a given keyspace

-

disableauditlog - Disable the audit log

-

disableautocompaction - Disable autocompaction for the given keyspace and table

-

disablebackup - Disable incremental backup

-

disablebinary - Disable native transport (binary protocol)

-

disablefullquerylog - Disable the full query log

-

disablegossip - Disable gossip (effectively marking the node down)

-

disablehandoff - Disable storing hinted handoffs

-

disablehintsfordc - Disable hints for a data center

-

disableoldprotocolversions - Disable old protocol versions

-

drain - Drain the node (stop accepting writes and flush all tables)

-

enableauditlog - Enable the audit log

-

enableautocompaction - Enable autocompaction for the given keyspace and table

-

enablebackup - Enable incremental backup

-

enablebinary - Reenable native transport (binary protocol)

-

enablefullquerylog - Enable full query logging, defaults for the options are configured in cassandra.yaml

-

enablegossip - Reenable gossip

-

enablehandoff - Reenable future hints storing on the current node

-

enablehintsfordc - Enable hints for a data center that was previsouly disabled

-

enableoldprotocolversions - Enable old protocol versions

-

failuredetector - Shows the failure detector information for the cluster

-

flush - Flush one or more tables

-

garbagecollect - Remove deleted data from one or more tables

-

gcstats - Print GC Statistics

-

getbatchlogreplaythrottle - Print batchlog replay throttle in KB/s. This is reduced proportionally to the number of nodes in the cluster.

-

getcompactionthreshold - Print min and max compaction thresholds for a given table

-

getcompactionthroughput - Print the MB/s throughput cap for compaction in the system

-

getconcurrency - Get maximum concurrency for processing stages

-

getconcurrentcompactors - Get the number of concurrent compactors in the system.

-

getconcurrentviewbuilders - Get the number of concurrent view builders in the system

-

getendpoints - Print the end points that owns the key

-

getinterdcstreamthroughput - Print the Mb/s throughput cap for inter-datacenter streaming in the system

-

getlogginglevels - Get the runtime logging levels

-

getmaxhintwindow - Print the max hint window in ms

-

getreplicas - Print replicas for a given key

-

getseeds - Get the currently in use seed node IP list excluding the node IP

-

getsstables - Print the sstable filenames that own the key

-

getstreamthroughput - Print the Mb/s throughput cap for streaming in the system

-

gettimeout - Print the timeout of the given type in ms

-

gettraceprobability - Print the current trace probability value

-

gossipinfo - Shows the gossip information for the cluster

-

handoffwindow - Print current hinted handoff window

-

help - Display help information

-

import - Import new SSTables to the system

-

info - Print node information (uptime, load, …)

-

invalidatecountercache - Invalidate the counter cache

-

invalidatekeycache - Invalidate the key cache

-

invalidaterowcache - Invalidate the row cache

-

join - Join the ring

-

listsnapshots - Lists all the snapshots along with the size on disk and true size. True size is the total size of all SSTables which are not backed up to disk. Size on disk is total size of the snapshot on disk. Total TrueDiskSpaceUsed does not make any SSTable deduplication.

-

move - Move node on the token ring to a new token

-

netstats - Print network information on provided host (connecting node by default)

-

pausehandoff - Pause hints delivery process

-

profileload - Low footprint profiling of activity for a period of time

-

proxyhistograms - Print statistic histograms for network operations

-

rangekeysample - Shows the sampled keys held across all keyspaces

-

rebuild - Rebuild data by streaming from other nodes (similarly to bootstrap)

-

rebuild_index - A full rebuild of native secondary indexes for a given table

-

refresh - Load newly placed SSTables to the system without restart

-

refreshsizeestimates - Refresh system.size_estimates

-

reloadlocalschema - Reload local node schema from system tables

-

reloadseeds - Reload the seed node list from the seed node provider

-

reloadssl - Signals Cassandra to reload SSL certificates

-

reloadtriggers - Reload trigger classes

-

relocatesstables - Relocates sstables to the correct disk

-

removenode - Show status of current node removal, force completion of pending removal or remove provided ID

-

repair - Repair one or more tables

-

repair_admin - -list - and fail incremental repair sessions

-

replaybatchlog - Kick off batchlog replay and wait for finish

-

resetfullquerylog - Stop the full query log and clean files in the configured full query log directory from cassandra.yaml as well as JMX

-

resetlocalschema - Reset node’s local schema and resync

-

resumehandoff - Resume hints delivery process

-

ring - Print information about the token ring

-

scrub - Scrub (rebuild sstables for) one or more tables

-

setbatchlogreplaythrottle - Set batchlog replay throttle in KB per second, or 0 to disable throttling. This will be reduced proportionally to the number of nodes in the cluster.

-

setcachecapacity - Set global key, row, and counter cache capacities (in MB units)

-

setcachekeystosave - Set number of keys saved by each cache for faster post-restart warmup. 0 to disable

-

setcompactionthreshold - Set min and max compaction thresholds for a given table

-

setcompactionthroughput - Set the MB/s throughput cap for compaction in the system, or 0 to disable throttling

-

setconcurrency - Set maximum concurrency for processing stage

-

setconcurrentcompactors - Set number of concurrent compactors in the system.

-

setconcurrentviewbuilders - Set the number of concurrent view builders in the system

-

sethintedhandoffthrottlekb - Set hinted handoff throttle in kb per second, per delivery thread.

-

setinterdcstreamthroughput - Set the Mb/s throughput cap for inter-datacenter streaming in the system, or 0 to disable throttling

-

setlogginglevel - Set the log level threshold for a given component or class. Will reset to the initial configuration if called with no parameters.

-

setmaxhintwindow - Set the specified max hint window in ms

-

setstreamthroughput - Set the Mb/s throughput cap for streaming in the system, or 0 to disable throttling

-

settimeout - Set the specified timeout in ms, or 0 to disable timeout

-

settraceprobability - Sets the probability for tracing any given request to value. 0 disables, 1 enables for all requests, 0 is the default

-

sjk - Run commands of ‘Swiss Java Knife’. Run ‘nodetool sjk –help’ for more information.

-

snapshot - Take a snapshot of specified keyspaces or a snapshot of the specified table

-

status - Print cluster information (state, load, IDs, …)

-

statusautocompaction - -status - of autocompaction of the given keyspace and table

-

statusbackup - Status of incremental backup

-

statusbinary - Status of native transport (binary protocol)

-

statusgossip - Status of gossip

-

statushandoff - Status of storing future hints on the current node

-

stop - Stop compaction

-

stopdaemon - Stop cassandra daemon

-

tablehistograms - Print statistic histograms for a given table

-

tablestats - Print statistics on tables

-

toppartitions - Sample and print the most active partitions

-

tpstats - Print usage statistics of thread pools

-

truncatehints - Truncate all hints on the local node, or truncate hints for the endpoint(s) specified.

-

upgradesstables - Rewrite sstables (for the requested tables) that are not on the current version (thus upgrading them to said current version)

-

verify - Verify (check data checksum for) one or more tables

-

version - Print cassandra version

-

viewbuildstatus - Show progress of a materialized view build

-
-

See ‘nodetool help <command>’ for more information on a specific command.

-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/pausehandoff.html b/src/doc/4.0-beta1/tools/nodetool/pausehandoff.html deleted file mode 100644 index f3900da12..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/pausehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "pausehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

pausehandoff

-
-
-

Usage

-
NAME
-        nodetool pausehandoff - Pause hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] pausehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/profileload.html b/src/doc/4.0-beta1/tools/nodetool/profileload.html deleted file mode 100644 index 36855a0d3..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/profileload.html +++ /dev/null @@ -1,144 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "profileload" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

profileload

-
-
-

Usage

-
NAME
-        nodetool profileload - Low footprint profiling of activity for a period
-        of time
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] profileload [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/proxyhistograms.html b/src/doc/4.0-beta1/tools/nodetool/proxyhistograms.html deleted file mode 100644 index 5f51a5e97..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/proxyhistograms.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "proxyhistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

proxyhistograms

-
-
-

Usage

-
NAME
-        nodetool proxyhistograms - Print statistic histograms for network
-        operations
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] proxyhistograms
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/rangekeysample.html b/src/doc/4.0-beta1/tools/nodetool/rangekeysample.html deleted file mode 100644 index 18f0d52ea..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/rangekeysample.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rangekeysample" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rangekeysample

-
-
-

Usage

-
NAME
-        nodetool rangekeysample - Shows the sampled keys held across all
-        keyspaces
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rangekeysample
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/rebuild.html b/src/doc/4.0-beta1/tools/nodetool/rebuild.html deleted file mode 100644 index 3f8ca4c0d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/rebuild.html +++ /dev/null @@ -1,150 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild

-
-
-

Usage

-
NAME
-        nodetool rebuild - Rebuild data by streaming from other nodes (similarly
-        to bootstrap)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild
-                [(-ks <specific_keyspace> | --keyspace <specific_keyspace>)]
-                [(-s <specific_sources> | --sources <specific_sources>)]
-                [(-ts <specific_tokens> | --tokens <specific_tokens>)] [--]
-                <src-dc-name>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -ks <specific_keyspace>, --keyspace <specific_keyspace>
-            Use -ks to rebuild specific keyspace.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <specific_sources>, --sources <specific_sources>
-            Use -s to specify hosts that this node should stream from when -ts
-            is used. Multiple hosts should be separated using commas (e.g.
-            127.0.0.1,127.0.0.2,...)
-
-        -ts <specific_tokens>, --tokens <specific_tokens>
-            Use -ts to rebuild specific token ranges, in the format of "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]".
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <src-dc-name>
-            Name of DC from which to select sources for streaming. By default,
-            pick any DC
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/rebuild_index.html b/src/doc/4.0-beta1/tools/nodetool/rebuild_index.html deleted file mode 100644 index 7be07d0ab..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/rebuild_index.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "rebuild_index" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

rebuild_index

-
-
-

Usage

-
NAME
-        nodetool rebuild_index - A full rebuild of native secondary indexes for
-        a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] rebuild_index [--] <keyspace>
-                <table> <indexName...>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <indexName...>
-            The keyspace and table name followed by a list of index names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/refresh.html b/src/doc/4.0-beta1/tools/nodetool/refresh.html deleted file mode 100644 index 3931e86d2..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/refresh.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refresh" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refresh

-
-
-

Usage

-
NAME
-        nodetool refresh - Load newly placed SSTables to the system without
-        restart
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refresh [--] <keyspace>
-                <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/refreshsizeestimates.html b/src/doc/4.0-beta1/tools/nodetool/refreshsizeestimates.html deleted file mode 100644 index 421cafc04..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/refreshsizeestimates.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "refreshsizeestimates" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

refreshsizeestimates

-
-
-

Usage

-
NAME
-        nodetool refreshsizeestimates - Refresh system.size_estimates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] refreshsizeestimates
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/reloadlocalschema.html b/src/doc/4.0-beta1/tools/nodetool/reloadlocalschema.html deleted file mode 100644 index c4411de90..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/reloadlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadlocalschema

-
-
-

Usage

-
NAME
-        nodetool reloadlocalschema - Reload local node schema from system tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/reloadseeds.html b/src/doc/4.0-beta1/tools/nodetool/reloadseeds.html deleted file mode 100644 index a7094d3a7..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/reloadseeds.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadseeds" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadseeds

-
-
-

Usage

-
NAME
-        nodetool reloadseeds - Reload the seed node list from the seed node
-        provider
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadseeds
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/reloadssl.html b/src/doc/4.0-beta1/tools/nodetool/reloadssl.html deleted file mode 100644 index 355b0234a..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/reloadssl.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadssl" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadssl

-
-
-

Usage

-
NAME
-        nodetool reloadssl - Signals Cassandra to reload SSL certificates
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadssl
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/reloadtriggers.html b/src/doc/4.0-beta1/tools/nodetool/reloadtriggers.html deleted file mode 100644 index 09077d129..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/reloadtriggers.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "reloadtriggers" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

reloadtriggers

-
-
-

Usage

-
NAME
-        nodetool reloadtriggers - Reload trigger classes
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] reloadtriggers
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/relocatesstables.html b/src/doc/4.0-beta1/tools/nodetool/relocatesstables.html deleted file mode 100644 index 748d5b5c2..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/relocatesstables.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "relocatesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

relocatesstables

-
-
-

Usage

-
NAME
-        nodetool relocatesstables - Relocates sstables to the correct disk
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] relocatesstables
-                [(-j <jobs> | --jobs <jobs>)] [--] <keyspace> <table>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to relocate simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table>
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/removenode.html b/src/doc/4.0-beta1/tools/nodetool/removenode.html deleted file mode 100644 index 17b22125b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/removenode.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "removenode" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

removenode

-
-
-

Usage

-
NAME
-        nodetool removenode - Show status of current node removal, force
-        completion of pending removal or remove provided ID
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] removenode [--]
-                <status>|<force>|<ID>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <status>|<force>|<ID>
-            Show status of current node removal, force completion of pending
-            removal, or remove provided ID
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/repair.html b/src/doc/4.0-beta1/tools/nodetool/repair.html deleted file mode 100644 index f60554dcc..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/repair.html +++ /dev/null @@ -1,199 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair

-
-
-

Usage

-
NAME
-        nodetool repair - Repair one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair
-                [(-dc <specific_dc> | --in-dc <specific_dc>)...]
-                [(-dcpar | --dc-parallel)] [(-et <end_token> | --end-token <end_token>)]
-                [(-force | --force)] [(-full | --full)]
-                [(-hosts <specific_host> | --in-hosts <specific_host>)...]
-                [(-j <job_threads> | --job-threads <job_threads>)]
-                [(-local | --in-local-dc)] [(-os | --optimise-streams)] [(-pl | --pull)]
-                [(-pr | --partitioner-range)] [(-prv | --preview)]
-                [(-seq | --sequential)]
-                [(-st <start_token> | --start-token <start_token>)] [(-tr | --trace)]
-                [(-vd | --validate)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -dc <specific_dc>, --in-dc <specific_dc>
-            Use -dc to repair specific datacenters
-
-        -dcpar, --dc-parallel
-            Use -dcpar to repair data centers in parallel.
-
-        -et <end_token>, --end-token <end_token>
-            Use -et to specify a token at which repair range ends (inclusive)
-
-        -force, --force
-            Use -force to filter out down endpoints
-
-        -full, --full
-            Use -full to issue a full repair.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -hosts <specific_host>, --in-hosts <specific_host>
-            Use -hosts to repair specific hosts
-
-        -j <job_threads>, --job-threads <job_threads>
-            Number of threads to run repair jobs. Usually this means number of
-            CFs to repair concurrently. WARNING: increasing this puts more load
-            on repairing nodes, so be careful. (default: 1, max: 4)
-
-        -local, --in-local-dc
-            Use -local to only repair against nodes in the same datacenter
-
-        -os, --optimise-streams
-            Use --optimise-streams to try to reduce the number of streams we do
-            (EXPERIMENTAL, see CASSANDRA-3200).
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pl, --pull
-            Use --pull to perform a one way repair where data is only streamed
-            from a remote node to this node.
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pr, --partitioner-range
-            Use -pr to repair only the first range returned by the partitioner
-
-        -prv, --preview
-            Determine ranges and amount of data to be streamed, but don't
-            actually perform repair
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -seq, --sequential
-            Use -seq to carry out a sequential repair
-
-        -st <start_token>, --start-token <start_token>
-            Use -st to specify a token at which the repair range starts
-            (exclusive)
-
-        -tr, --trace
-            Use -tr to trace the repair. Traces are logged to
-            system_traces.events.
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -vd, --validate
-            Checks that repaired data is in sync between nodes. Out of sync
-            repaired data indicates a full repair should be run.
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/repair_admin.html b/src/doc/4.0-beta1/tools/nodetool/repair_admin.html deleted file mode 100644 index 0f21a5c0d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/repair_admin.html +++ /dev/null @@ -1,139 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "repair_admin" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

repair_admin

-
-
-

Usage

-
NAME
-        nodetool repair_admin - list and fail incremental repair sessions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] repair_admin [(-a | --all)]
-                [(-f | --force)] [(-l | --list)] [(-x <cancel> | --cancel <cancel>)]
-
-OPTIONS
-        -a, --all
-            include completed and failed sessions
-
-        -f, --force
-            cancel repair session from a node other than the repair coordinator.
-            Attempting to cancel FINALIZED or FAILED sessions is an error.
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -l, --list
-            list repair sessions (default behavior)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        -x <cancel>, --cancel <cancel>
-            cancel an incremental repair session
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/replaybatchlog.html b/src/doc/4.0-beta1/tools/nodetool/replaybatchlog.html deleted file mode 100644 index ea1351b23..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/replaybatchlog.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "replaybatchlog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

replaybatchlog

-
-
-

Usage

-
NAME
-        nodetool replaybatchlog - Kick off batchlog replay and wait for finish
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] replaybatchlog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/resetfullquerylog.html b/src/doc/4.0-beta1/tools/nodetool/resetfullquerylog.html deleted file mode 100644 index d1991263a..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/resetfullquerylog.html +++ /dev/null @@ -1,127 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetfullquerylog" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetfullquerylog

-
-
-

Usage

-
NAME
-        nodetool resetfullquerylog - Stop the full query log and clean files in
-        the configured full query log directory from cassandra.yaml as well as
-        JMX
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetfullquerylog
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/resetlocalschema.html b/src/doc/4.0-beta1/tools/nodetool/resetlocalschema.html deleted file mode 100644 index e5f86e07b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/resetlocalschema.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resetlocalschema" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resetlocalschema

-
-
-

Usage

-
NAME
-        nodetool resetlocalschema - Reset node's local schema and resync
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resetlocalschema
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/resumehandoff.html b/src/doc/4.0-beta1/tools/nodetool/resumehandoff.html deleted file mode 100644 index 3e4917fe1..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/resumehandoff.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "resumehandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

resumehandoff

-
-
-

Usage

-
NAME
-        nodetool resumehandoff - Resume hints delivery process
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] resumehandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/ring.html b/src/doc/4.0-beta1/tools/nodetool/ring.html deleted file mode 100644 index e82387af0..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/ring.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "ring" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

ring

-
-
-

Usage

-
NAME
-        nodetool ring - Print information about the token ring
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] ring [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace>
-            Specify a keyspace for accurate ownership information (topology
-            awareness)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/scrub.html b/src/doc/4.0-beta1/tools/nodetool/scrub.html deleted file mode 100644 index d20a33e50..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/scrub.html +++ /dev/null @@ -1,159 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "scrub" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

scrub

-
-
-

Usage

-
NAME
-        nodetool scrub - Scrub (rebuild sstables for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] scrub
-                [(-j <jobs> | --jobs <jobs>)] [(-n | --no-validate)]
-                [(-ns | --no-snapshot)] [(-r | --reinsert-overflowed-ttl)]
-                [(-s | --skip-corrupted)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to scrub simultanously, set to 0 to use all
-            available compaction threads
-
-        -n, --no-validate
-            Do not validate columns using column validator
-
-        -ns, --no-snapshot
-            Scrubbed CFs will be snapshotted first, if disableSnapshot is false.
-            (default false)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --reinsert-overflowed-ttl
-            Rewrites rows with overflowed expiration date affected by
-            CASSANDRA-14092 with the maximum supported expiration date of
-            2038-01-19T03:14:06+00:00. The rows are rewritten with the original
-            timestamp incremented by one millisecond to override/supersede any
-            potential tombstone that may have been generated during compaction
-            of the affected rows.
-
-        -s, --skip-corrupted
-            Skip corrupted partitions even when scrubbing counter tables.
-            (default false)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setbatchlogreplaythrottle.html b/src/doc/4.0-beta1/tools/nodetool/setbatchlogreplaythrottle.html deleted file mode 100644 index ff4a3823c..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setbatchlogreplaythrottle.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setbatchlogreplaythrottle" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setbatchlogreplaythrottle

-
-
-

Usage

-
NAME
-        nodetool setbatchlogreplaythrottle - Set batchlog replay throttle in KB
-        per second, or 0 to disable throttling. This will be reduced
-        proportionally to the number of nodes in the cluster.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setbatchlogreplaythrottle [--]
-                <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setcachecapacity.html b/src/doc/4.0-beta1/tools/nodetool/setcachecapacity.html deleted file mode 100644 index d1598d677..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setcachecapacity.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachecapacity" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachecapacity

-
-
-

Usage

-
NAME
-        nodetool setcachecapacity - Set global key, row, and counter cache
-        capacities (in MB units)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachecapacity [--]
-                <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-capacity> <row-cache-capacity> <counter-cache-capacity>
-            Key cache, row cache, and counter cache (in MB)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setcachekeystosave.html b/src/doc/4.0-beta1/tools/nodetool/setcachekeystosave.html deleted file mode 100644 index cf7dd9ea1..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setcachekeystosave.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcachekeystosave" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcachekeystosave

-
-
-

Usage

-
NAME
-        nodetool setcachekeystosave - Set number of keys saved by each cache for
-        faster post-restart warmup. 0 to disable
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcachekeystosave [--]
-                <key-cache-keys-to-save> <row-cache-keys-to-save>
-                <counter-cache-keys-to-save>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <key-cache-keys-to-save> <row-cache-keys-to-save>
-        <counter-cache-keys-to-save>
-            The number of keys saved by each cache. 0 to disable
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setcompactionthreshold.html b/src/doc/4.0-beta1/tools/nodetool/setcompactionthreshold.html deleted file mode 100644 index 5a3f7ef41..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setcompactionthreshold.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthreshold" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthreshold

-
-
-

Usage

-
NAME
-        nodetool setcompactionthreshold - Set min and max compaction thresholds
-        for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthreshold [--]
-                <keyspace> <table> <minthreshold> <maxthreshold>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <table> <minthreshold> <maxthreshold>
-            The keyspace, the table, min and max threshold
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setcompactionthroughput.html b/src/doc/4.0-beta1/tools/nodetool/setcompactionthroughput.html deleted file mode 100644 index 282f123d8..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setcompactionthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setcompactionthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setcompactionthroughput

-
-
-

Usage

-
NAME
-        nodetool setcompactionthroughput - Set the MB/s throughput cap for
-        compaction in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setcompactionthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in MB, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setconcurrency.html b/src/doc/4.0-beta1/tools/nodetool/setconcurrency.html deleted file mode 100644 index 1bba0de46..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setconcurrency.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrency" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrency

-
-
-

Usage

-
NAME
-        nodetool setconcurrency - Set maximum concurrency for processing stage
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrency [--]
-                <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-                <maximum-concurrency>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <stage-name> <maximum-concurrency> | <stage-name> <core-pool>
-        <maximum-concurrency>
-            Set concurrency for processing stage
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setconcurrentcompactors.html b/src/doc/4.0-beta1/tools/nodetool/setconcurrentcompactors.html deleted file mode 100644 index 313b5a533..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setconcurrentcompactors.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentcompactors" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentcompactors

-
-
-

Usage

-
NAME
-        nodetool setconcurrentcompactors - Set number of concurrent compactors
-        in the system.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentcompactors [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent compactors, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setconcurrentviewbuilders.html b/src/doc/4.0-beta1/tools/nodetool/setconcurrentviewbuilders.html deleted file mode 100644 index 9d671830f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setconcurrentviewbuilders.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setconcurrentviewbuilders" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setconcurrentviewbuilders

-
-
-

Usage

-
NAME
-        nodetool setconcurrentviewbuilders - Set the number of concurrent view
-        builders in the system
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setconcurrentviewbuilders [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Number of concurrent view builders, greater than 0.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/sethintedhandoffthrottlekb.html b/src/doc/4.0-beta1/tools/nodetool/sethintedhandoffthrottlekb.html deleted file mode 100644 index deffa044f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/sethintedhandoffthrottlekb.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sethintedhandoffthrottlekb" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sethintedhandoffthrottlekb

-
-
-

Usage

-
NAME
-        nodetool sethintedhandoffthrottlekb - Set hinted handoff throttle in kb
-        per second, per delivery thread.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sethintedhandoffthrottlekb
-                [--] <value_in_kb_per_sec>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_kb_per_sec>
-            Value in KB per second
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setinterdcstreamthroughput.html b/src/doc/4.0-beta1/tools/nodetool/setinterdcstreamthroughput.html deleted file mode 100644 index 64739f1df..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setinterdcstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setinterdcstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setinterdcstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setinterdcstreamthroughput - Set the Mb/s throughput cap for
-        inter-datacenter streaming in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setinterdcstreamthroughput
-                [--] <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setlogginglevel.html b/src/doc/4.0-beta1/tools/nodetool/setlogginglevel.html deleted file mode 100644 index 83e169fc5..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setlogginglevel.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setlogginglevel" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setlogginglevel

-
-
-

Usage

-
NAME
-        nodetool setlogginglevel - Set the log level threshold for a given
-        component or class. Will reset to the initial configuration if called
-        with no parameters.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setlogginglevel [--]
-                <component|class> <level>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <component|class> <level>
-            The component or class to change the level for and the log level
-            threshold to set. Will reset to initial level if omitted. Available
-            components: bootstrap, compaction, repair, streaming, cql, ring
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setmaxhintwindow.html b/src/doc/4.0-beta1/tools/nodetool/setmaxhintwindow.html deleted file mode 100644 index add5fef3d..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setmaxhintwindow.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setmaxhintwindow" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setmaxhintwindow

-
-
-

Usage

-
NAME
-        nodetool setmaxhintwindow - Set the specified max hint window in ms
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setmaxhintwindow [--]
-                <value_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_ms>
-            Value of maxhintwindow in ms
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/setstreamthroughput.html b/src/doc/4.0-beta1/tools/nodetool/setstreamthroughput.html deleted file mode 100644 index 2cc216d89..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/setstreamthroughput.html +++ /dev/null @@ -1,135 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "setstreamthroughput" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

setstreamthroughput

-
-
-

Usage

-
NAME
-        nodetool setstreamthroughput - Set the Mb/s throughput cap for streaming
-        in the system, or 0 to disable throttling
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] setstreamthroughput [--]
-                <value_in_mb>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value_in_mb>
-            Value in Mb, 0 to disable throttling
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/settimeout.html b/src/doc/4.0-beta1/tools/nodetool/settimeout.html deleted file mode 100644 index 867617895..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/settimeout.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settimeout" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settimeout

-
-
-

Usage

-
NAME
-        nodetool settimeout - Set the specified timeout in ms, or 0 to disable
-        timeout
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settimeout [--] <timeout_type>
-                <timeout_in_ms>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <timeout_type> <timeout_in_ms>
-            Timeout type followed by value in ms (0 disables socket streaming
-            timeout). Type should be one of (read, range, write, counterwrite,
-            cascontention, truncate, internodeconnect, internodeuser, misc
-            (general rpc_timeout_in_ms))
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/settraceprobability.html b/src/doc/4.0-beta1/tools/nodetool/settraceprobability.html deleted file mode 100644 index 15bcb477b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/settraceprobability.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "settraceprobability" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

settraceprobability

-
-
-

Usage

-
NAME
-        nodetool settraceprobability - Sets the probability for tracing any
-        given request to value. 0 disables, 1 enables for all requests, 0 is the
-        default
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] settraceprobability [--]
-                <value>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <value>
-            Trace probability between 0 and 1 (ex: 0.2)
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/sjk.html b/src/doc/4.0-beta1/tools/nodetool/sjk.html deleted file mode 100644 index 7b826e6e2..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/sjk.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "sjk" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sjk

-
-
-

Usage

-
NAME
-        nodetool sjk - Run commands of 'Swiss Java Knife'. Run 'nodetool sjk
-        --help' for more information.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] sjk [--] [<args>...]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <args>
-            Arguments passed as is to 'Swiss Java Knife'.
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/snapshot.html b/src/doc/4.0-beta1/tools/nodetool/snapshot.html deleted file mode 100644 index 2fb613b25..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/snapshot.html +++ /dev/null @@ -1,152 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "snapshot" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

snapshot

-
-
-

Usage

-
NAME
-        nodetool snapshot - Take a snapshot of specified keyspaces or a snapshot
-        of the specified table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] snapshot
-                [(-cf <table> | --column-family <table> | --table <table>)]
-                [(-kt <ktlist> | --kt-list <ktlist> | -kc <ktlist> | --kc.list <ktlist>)]
-                [(-sf | --skip-flush)] [(-t <tag> | --tag <tag>)] [--] [<keyspaces...>]
-
-OPTIONS
-        -cf <table>, --column-family <table>, --table <table>
-            The table name (you must specify one and only one keyspace for using
-            this option)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -kt <ktlist>, --kt-list <ktlist>, -kc <ktlist>, --kc.list <ktlist>
-            The list of Keyspace.table to take snapshot.(you must not specify
-            only keyspace)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -sf, --skip-flush
-            Do not flush memtables before snapshotting (snapshot will not
-            contain unflushed data)
-
-        -t <tag>, --tag <tag>
-            The name of the snapshot
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspaces...>]
-            List of keyspaces. By default, all keyspaces
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/status.html b/src/doc/4.0-beta1/tools/nodetool/status.html deleted file mode 100644 index decc7d90a..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/status.html +++ /dev/null @@ -1,137 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "status" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

status

-
-
-

Usage

-
NAME
-        nodetool status - Print cluster information (state, load, IDs, ...)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] status [(-r | --resolve-ip)]
-                [--] [<keyspace>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -r, --resolve-ip
-            Show node domain names instead of IPs
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace>]
-            The keyspace name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/statusautocompaction.html b/src/doc/4.0-beta1/tools/nodetool/statusautocompaction.html deleted file mode 100644 index 1e1d08ee9..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/statusautocompaction.html +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusautocompaction" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusautocompaction

-
-
-

Usage

-
NAME
-        nodetool statusautocompaction - status of autocompaction of the given
-        keyspace and table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusautocompaction
-                [(-a | --all)] [--] [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --all
-            Show auto compaction status for each keyspace/table
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/statusbackup.html b/src/doc/4.0-beta1/tools/nodetool/statusbackup.html deleted file mode 100644 index 4c155d398..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/statusbackup.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbackup" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbackup

-
-
-

Usage

-
NAME
-        nodetool statusbackup - Status of incremental backup
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbackup
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/statusbinary.html b/src/doc/4.0-beta1/tools/nodetool/statusbinary.html deleted file mode 100644 index 0282e69e2..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/statusbinary.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusbinary" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusbinary

-
-
-

Usage

-
NAME
-        nodetool statusbinary - Status of native transport (binary protocol)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusbinary
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/statusgossip.html b/src/doc/4.0-beta1/tools/nodetool/statusgossip.html deleted file mode 100644 index 84654bf70..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/statusgossip.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statusgossip" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statusgossip

-
-
-

Usage

-
NAME
-        nodetool statusgossip - Status of gossip
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statusgossip
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/statushandoff.html b/src/doc/4.0-beta1/tools/nodetool/statushandoff.html deleted file mode 100644 index 50bc6489f..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/statushandoff.html +++ /dev/null @@ -1,126 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "statushandoff" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

statushandoff

-
-
-

Usage

-
NAME
-        nodetool statushandoff - Status of storing future hints on the current
-        node
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] statushandoff
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/stop.html b/src/doc/4.0-beta1/tools/nodetool/stop.html deleted file mode 100644 index fbd22fa07..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/stop.html +++ /dev/null @@ -1,142 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stop" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stop

-
-
-

Usage

-
NAME
-        nodetool stop - Stop compaction
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stop
-                [(-id <compactionId> | --compaction-id <compactionId>)] [--] <compaction
-                type>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -id <compactionId>, --compaction-id <compactionId>
-            Use -id to stop a compaction by the specified id. Ids can be found
-            in the transaction log files whose name starts with compaction_,
-            located in the table transactions folder.
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <compaction type>
-            Supported types are COMPACTION, VALIDATION, CLEANUP, SCRUB,
-            UPGRADE_SSTABLES, INDEX_BUILD, TOMBSTONE_COMPACTION, ANTICOMPACTION,
-            VERIFY, VIEW_BUILD, INDEX_SUMMARY, RELOCATE, GARBAGE_COLLECT
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/stopdaemon.html b/src/doc/4.0-beta1/tools/nodetool/stopdaemon.html deleted file mode 100644 index 9701eeef6..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/stopdaemon.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "stopdaemon" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

stopdaemon

-
-
-

Usage

-
NAME
-        nodetool stopdaemon - Stop cassandra daemon
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] stopdaemon
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/tablehistograms.html b/src/doc/4.0-beta1/tools/nodetool/tablehistograms.html deleted file mode 100644 index 43aa82369..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/tablehistograms.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablehistograms" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablehistograms

-
-
-

Usage

-
NAME
-        nodetool tablehistograms - Print statistic histograms for a given table
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablehistograms [--]
-                [<keyspace> <table> | <keyspace.table>]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <table> | <keyspace.table>]
-            The keyspace and table name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/tablestats.html b/src/doc/4.0-beta1/tools/nodetool/tablestats.html deleted file mode 100644 index b7b194207..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/tablestats.html +++ /dev/null @@ -1,169 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tablestats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tablestats

-
-
-

Usage

-
NAME
-        nodetool tablestats - Print statistics on tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tablestats
-                [(-F <format> | --format <format>)] [(-H | --human-readable)] [-i]
-                [(-s <sort_key> | --sort <sort_key>)] [(-t <top> | --top <top>)] [--]
-                [<keyspace.table>...]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -H, --human-readable
-            Display bytes in human readable form, i.e. KiB, MiB, GiB, TiB
-
-        -i
-            Ignore the list of tables and display the remaining tables
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <sort_key>, --sort <sort_key>
-            Sort tables by specified sort key
-            (average_live_cells_per_slice_last_five_minutes,
-            average_tombstones_per_slice_last_five_minutes,
-            bloom_filter_false_positives, bloom_filter_false_ratio,
-            bloom_filter_off_heap_memory_used, bloom_filter_space_used,
-            compacted_partition_maximum_bytes, compacted_partition_mean_bytes,
-            compacted_partition_minimum_bytes,
-            compression_metadata_off_heap_memory_used, dropped_mutations,
-            full_name, index_summary_off_heap_memory_used, local_read_count,
-            local_read_latency_ms, local_write_latency_ms,
-            maximum_live_cells_per_slice_last_five_minutes,
-            maximum_tombstones_per_slice_last_five_minutes, memtable_cell_count,
-            memtable_data_size, memtable_off_heap_memory_used,
-            memtable_switch_count, number_of_partitions_estimate,
-            off_heap_memory_used_total, pending_flushes, percent_repaired,
-            read_latency, reads, space_used_by_snapshots_total, space_used_live,
-            space_used_total, sstable_compression_ratio, sstable_count,
-            table_name, write_latency, writes)
-
-        -t <top>, --top <top>
-            Show only the top K tables for the sort key (specify the number K of
-            tables to be shown
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace.table>...]
-            List of tables (or keyspace) names
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/toppartitions.html b/src/doc/4.0-beta1/tools/nodetool/toppartitions.html deleted file mode 100644 index dee978d04..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/toppartitions.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "toppartitions" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

toppartitions

-
-
-

Usage

-
NAME
-        nodetool toppartitions - Sample and print the most active partitions
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] toppartitions [-a <samplers>]
-                [-k <topCount>] [-s <capacity>] [--] <keyspace> <cfname> <duration>
-
-OPTIONS
-        -a <samplers>
-            Comma separated list of samplers to use (Default: all)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -k <topCount>
-            Number of the top samples to list (Default: 10)
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -s <capacity>
-            Capacity of the sampler, higher for more accuracy (Default: 256)
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <cfname> <duration>
-            The keyspace, column family name, and duration in milliseconds
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/tpstats.html b/src/doc/4.0-beta1/tools/nodetool/tpstats.html deleted file mode 100644 index 68bc53165..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/tpstats.html +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "tpstats" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

tpstats

-
-
-

Usage

-
NAME
-        nodetool tpstats - Print usage statistics of thread pools
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] tpstats
-                [(-F <format> | --format <format>)]
-
-OPTIONS
-        -F <format>, --format <format>
-            Output format (json, yaml)
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/truncatehints.html b/src/doc/4.0-beta1/tools/nodetool/truncatehints.html deleted file mode 100644 index 39712e7a8..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/truncatehints.html +++ /dev/null @@ -1,136 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "truncatehints" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

truncatehints

-
-
-

Usage

-
NAME
-        nodetool truncatehints - Truncate all hints on the local node, or
-        truncate hints for the endpoint(s) specified.
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] truncatehints [--] [endpoint
-                ... ]
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [endpoint ... ]
-            Endpoint address(es) to delete hints for, either ip address
-            ("127.0.0.1") or hostname
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/upgradesstables.html b/src/doc/4.0-beta1/tools/nodetool/upgradesstables.html deleted file mode 100644 index 7347115b9..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/upgradesstables.html +++ /dev/null @@ -1,145 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "upgradesstables" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

upgradesstables

-
-
-

Usage

-
NAME
-        nodetool upgradesstables - Rewrite sstables (for the requested tables)
-        that are not on the current version (thus upgrading them to said current
-        version)
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] upgradesstables
-                [(-a | --include-all-sstables)] [(-j <jobs> | --jobs <jobs>)] [--]
-                [<keyspace> <tables>...]
-
-OPTIONS
-        -a, --include-all-sstables
-            Use -a to include all sstables, even those already on the current
-            version
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -j <jobs>, --jobs <jobs>
-            Number of sstables to upgrade simultanously, set to 0 to use all
-            available compaction threads
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/verify.html b/src/doc/4.0-beta1/tools/nodetool/verify.html deleted file mode 100644 index dc0488e9b..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/verify.html +++ /dev/null @@ -1,154 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "verify" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

verify

-
-
-

Usage

-
NAME
-        nodetool verify - Verify (check data checksum for) one or more tables
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] verify
-                [(-c | --check-version)] [(-d | --dfp)] [(-e | --extended-verify)]
-                [(-q | --quick)] [(-r | --rsc)] [(-t | --check-tokens)] [--] [<keyspace>
-                <tables>...]
-
-OPTIONS
-        -c, --check-version
-            Also check that all sstables are the latest version
-
-        -d, --dfp
-            Invoke the disk failure policy if a corrupt sstable is found
-
-        -e, --extended-verify
-            Verify each cell data, beyond simply checking sstable checksums
-
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -q, --quick
-            Do a quick check - avoid reading all data to verify checksums
-
-        -r, --rsc
-            Mutate the repair status on corrupt sstables
-
-        -t, --check-tokens
-            Verify that all tokens in sstables are owned by this node
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        [<keyspace> <tables>...]
-            The keyspace followed by one or many tables
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/version.html b/src/doc/4.0-beta1/tools/nodetool/version.html deleted file mode 100644 index f7a80d870..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/version.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "version" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

version

-
-
-

Usage

-
NAME
-        nodetool version - Print cassandra version
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] version
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/nodetool/viewbuildstatus.html b/src/doc/4.0-beta1/tools/nodetool/viewbuildstatus.html deleted file mode 100644 index ada41afa3..000000000 --- a/src/doc/4.0-beta1/tools/nodetool/viewbuildstatus.html +++ /dev/null @@ -1,134 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "viewbuildstatus" -doc-header-links: ' - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

viewbuildstatus

-
-
-

Usage

-
NAME
-        nodetool viewbuildstatus - Show progress of a materialized view build
-
-SYNOPSIS
-        nodetool [(-h <host> | --host <host>)] [(-p <port> | --port <port>)]
-                [(-pp | --print-port)] [(-pw <password> | --password <password>)]
-                [(-pwf <passwordFilePath> | --password-file <passwordFilePath>)]
-                [(-u <username> | --username <username>)] viewbuildstatus [--]
-                <keyspace> <view> | <keyspace.view>
-
-OPTIONS
-        -h <host>, --host <host>
-            Node hostname or ip address
-
-        -p <port>, --port <port>
-            Remote jmx agent port number
-
-        -pp, --print-port
-            Operate in 4.0 mode with hosts disambiguated by port number
-
-        -pw <password>, --password <password>
-            Remote jmx agent password
-
-        -pwf <passwordFilePath>, --password-file <passwordFilePath>
-            Path to the JMX password file
-
-        -u <username>, --username <username>
-            Remote jmx agent username
-
-        --
-            This option can be used to separate command-line options from the
-            list of argument, (useful when arguments might be mistaken for
-            command-line options
-
-        <keyspace> <view> | <keyspace.view>
-            The keyspace and view name
-
-
-
-
-
- - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/index.html b/src/doc/4.0-beta1/tools/sstable/index.html deleted file mode 100644 index 2c976e0d3..000000000 --- a/src/doc/4.0-beta1/tools/sstable/index.html +++ /dev/null @@ -1,229 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-title: "SSTable Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

SSTable Tools

-

This section describes the functionality of the various sstable tools.

-

Cassandra must be stopped before these tools are executed, or unexpected results will occur. Note: the scripts do not verify that Cassandra is stopped.

-
- -
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstabledump.html b/src/doc/4.0-beta1/tools/sstable/sstabledump.html deleted file mode 100644 index 67d3463fa..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstabledump.html +++ /dev/null @@ -1,404 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstabledump" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstabledump

-

Dump contents of a given SSTable to standard output in JSON format.

-

You must supply exactly one sstable.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstabledump <options> <sstable file path>

- ---- - - - - - - - - - - - - - - - - - - - - -
-dCQL row per line internal representation
-eEnumerate partition keys only
-k <arg>Partition key
-x <arg>Excluded partition key(s)
-tPrint raw timestamps instead of iso8601 date strings
-lOutput each row as a separate JSON object
-

If necessary, use sstableutil first to find out the sstables used by a table.

-
-
-

Dump entire table

-

Dump the entire table without any options.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db > eventlog_dump_2018Jul26
-
-cat eventlog_dump_2018Jul26
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-]
-
-
-
-
-

Dump table in a more manageable format

-

Use the -l option to dump each row as a separate JSON object. This will make the output easier to manipulate for large data sets. ref: https://issues.apache.org/jira/browse/CASSANDRA-13848

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -l > eventlog_dump_2018Jul26_justlines
-
-cat eventlog_dump_2018Jul26_justlines
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ],
-      "position" : 62
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 123,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:07.783522Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:07.789Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  },
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Dump only keys

-

Dump only the keys by using the -e option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -e > eventlog_dump_2018Jul26_justkeys
-
-cat eventlog_dump_2018Jul26b
-[ [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ], [ "d18250c0-84fc-4d40-b957-4248dc9d790e" ], [ "cf188983-d85b-48d6-9365-25005289beb2" ]
-
-
-
-
-

Dump row for a single key

-

Dump a single key using the -k option.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -k 3578d7de-c60d-4599-aefb-3f22a07b2bc6 > eventlog_dump_2018Jul26_singlekey
-
-cat eventlog_dump_2018Jul26_singlekey
-[
-  {
-    "partition" : {
-      "key" : [ "3578d7de-c60d-4599-aefb-3f22a07b2bc6" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 61,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:23:08.378711Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:23:08.384Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Exclude a key or keys in dump of rows

-

Dump a table except for the rows excluded with the -x option. Multiple keys can be used.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -x 3578d7de-c60d-4599-aefb-3f22a07b2bc6 d18250c0-84fc-4d40-b957-4248dc9d790e  > eventlog_dump_2018Jul26_excludekeys
-
-cat eventlog_dump_2018Jul26_excludekeys
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 0
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "2018-07-20T20:22:27.028809Z" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display raw timestamps

-

By default, dates are displayed in iso8601 date format. Using the -t option will dump the data with the raw timestamp.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -t -k cf188983-d85b-48d6-9365-25005289beb2 > eventlog_dump_2018Jul26_times
-
-cat eventlog_dump_2018Jul26_times
-[
-  {
-    "partition" : {
-      "key" : [ "cf188983-d85b-48d6-9365-25005289beb2" ],
-      "position" : 124
-    },
-    "rows" : [
-      {
-        "type" : "row",
-        "position" : 182,
-        "liveness_info" : { "tstamp" : "1532118147028809" },
-        "cells" : [
-          { "name" : "event", "value" : "party" },
-          { "name" : "insertedtimestamp", "value" : "2018-07-20 20:22:27.055Z" },
-          { "name" : "source", "value" : "asdf" }
-        ]
-      }
-    ]
-  }
-
-
-
-
-

Display internal structure in output

-

Dump the table in a format that reflects the internal structure.

-

Example:

-
sstabledump /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db -d > eventlog_dump_2018Jul26_d
-
-cat eventlog_dump_2018Jul26_d
-[3578d7de-c60d-4599-aefb-3f22a07b2bc6]@0 Row[info=[ts=1532118188378711] ]:  | [event=party ts=1532118188378711], [insertedtimestamp=2018-07-20 20:23Z ts=1532118188378711], [source=asdf ts=1532118188378711]
-[d18250c0-84fc-4d40-b957-4248dc9d790e]@62 Row[info=[ts=1532118187783522] ]:  | [event=party ts=1532118187783522], [insertedtimestamp=2018-07-20 20:23Z ts=1532118187783522], [source=asdf ts=1532118187783522]
-[cf188983-d85b-48d6-9365-25005289beb2]@124 Row[info=[ts=1532118147028809] ]:  | [event=party ts=1532118147028809], [insertedtimestamp=2018-07-20 20:22Z ts=1532118147028809], [source=asdf ts=1532118147028809]
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableexpiredblockers.html b/src/doc/4.0-beta1/tools/sstable/sstableexpiredblockers.html deleted file mode 100644 index 13e5c5974..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableexpiredblockers.html +++ /dev/null @@ -1,149 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableexpiredblockers" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableexpiredblockers

-

During compaction, entire sstables can be dropped if they contain only expired tombstones, and if it is guaranteed that the data is not newer than the data in other sstables. An expired sstable can be blocked from getting dropped if its newest timestamp is newer than the oldest data in another sstable.

-

This tool is used to list all sstables that are blocking other sstables from getting dropped (by having older data than the newest tombstone in an expired sstable) so a user can figure out why certain sstables are still on disk.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-10015

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableexpiredblockers <keyspace> <table>

-
-
-

Output blocked sstables

-

If the sstables exist for the table, but no tables have older data than the newest tombstone in an expired sstable, the script will return nothing.

-

Otherwise, the script will return <sstable> blocks <#> expired sstables from getting dropped followed by a list of the blocked sstables.

-

Example:

-
sstableexpiredblockers keyspace1 standard1
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-2-big-Data.db') (minTS = 5, maxTS = 5, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-[BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-1-big-Data.db') (minTS = 1, maxTS = 10, maxLDT = 2147483647)],  blocks 1 expired sstables from getting dropped: [BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-0665ae80b2d711e886c66d2c86545d91/mc-3-big-Data.db') (minTS = 1536349775157606, maxTS = 1536349780311159, maxLDT = 1536349780)],
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstablelevelreset.html b/src/doc/4.0-beta1/tools/sstable/sstablelevelreset.html deleted file mode 100644 index f1fb83fdf..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstablelevelreset.html +++ /dev/null @@ -1,175 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablelevelreset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablelevelreset

-

If LeveledCompactionStrategy is set, this script can be used to reset level to 0 on a given set of sstables. This is useful if you want to, for example, change the minimum sstable size, and therefore restart the compaction process using this new configuration.

-

See http://cassandra.apache.org/doc/latest/operating/compaction.html#leveled-compaction-strategy for information on how levels are used in this compaction strategy.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5271

-
-

Usage

-

sstablelevelreset –really-reset <keyspace> <table>

-

The really-reset flag is required, to ensure this intrusive command is not run accidentally.

-
-
-

Table not found

-

If the keyspace and/or table is not in the schema (e.g., if you misspelled the table name), the script will return an error.

-

Example:

-
ColumnFamily not found: keyspace/evenlog.
-
-
-
-
-

Table has no sstables

-

Example:

-
Found no sstables, did you give the correct keyspace/table?
-
-
-
-
-

Table already at level 0

-

The script will not set the level if it is already set to 0.

-

Example:

-
Skipped /var/lib/cassandra/data/keyspace/eventlog-65c429e08c5a11e8939edf4f403979ef/mc-1-big-Data.db since it is already on level 0
-
-
-
-
-

Table levels reduced to 0

-

If the level is not already 0, then this will reset it to 0.

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 1
-
-sstablelevelreset --really-reset keyspace eventlog
-Changing level from 1 to 0 on /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-sstablemetadata /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db | grep -i level
-SSTable Level: 0
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableloader.html b/src/doc/4.0-beta1/tools/sstable/sstableloader.html deleted file mode 100644 index d0f3abb31..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableloader.html +++ /dev/null @@ -1,409 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableloader" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableloader

-

Bulk-load the sstables found in the directory <dir_path> to the configured cluster. The parent directories of <dir_path> are used as the target keyspace/table name. For example, to load an sstable named ma-1-big-Data.db into keyspace1/standard1, you will need to have the files ma-1-big-Data.db and ma-1-big-Index.db in a directory /path/to/keyspace1/standard1/. The tool will create new sstables, and does not clean up your copied files.

-

Several of the options listed below don’t work quite as intended, and in those cases, workarounds are mentioned for specific use cases.

-

To avoid having the sstable files to be loaded compacted while reading them, place the files in an alternate keyspace/table path than the data directory.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-1278

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableloader <options> <dir_path>

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-d, –nodes <initial hosts>Required. Try to connect to these hosts (comma-separated) -initially for ring information
-u, –username <username>username for Cassandra authentication
-pw, –password <password>password for Cassandra authentication
-p, –port <native transport port>port used for native connection (default 9042)
-sp, –storage-port <storage port>port used for internode communication (default 7000)
-ssp, –ssl-storage-port <ssl storage port>port used for TLS internode communication (default 7001)
–no-progressdon’t display progress
-t, –throttle <throttle>throttle speed in Mbits (default unlimited)
-idct, –inter-dc-throttle <inter-dc-throttle>inter-datacenter throttle speed in Mbits (default unlimited)
-cph, –connections-per-host <connectionsPerHost>number of concurrent connections-per-host
-i, –ignore <NODES>don’t stream to this (comma separated) list of nodes
-alg, –ssl-alg <ALGORITHM>Client SSL: algorithm (default: SunX509)
-ciphers, –ssl-ciphers <CIPHER-SUITES>Client SSL: comma-separated list of encryption suites to use
-ks, –keystore <KEYSTORE>Client SSL: full path to keystore
-kspw, –keystore-password <KEYSTORE-PASSWORD>Client SSL: password of the keystore
-st, –store-type <STORE-TYPE>Client SSL: type of store
-ts, –truststore <TRUSTSTORE>Client SSL: full path to truststore
-tspw, –truststore-password <TRUSTSTORE-PASSWORD>Client SSL: password of the truststore
-prtcl, –ssl-protocol <PROTOCOL>Client SSL: connections protocol to use (default: TLS)
-ap, –auth-provider <auth provider>custom AuthProvider class name for cassandra authentication
-f, –conf-path <path to config file>cassandra.yaml file path for streaming throughput and client/server SSL
-v, –verboseverbose output
-h, –helpdisplay this help message
-

You can provide a cassandra.yaml file with the -f command line option to set up streaming throughput, and client and server encryption options. Only stream_throughput_outbound_megabits_per_sec, server_encryption_options, and client_encryption_options are read from yaml. You can override options read from cassandra.yaml with corresponding command line options.

-
-
-

Load sstables from a Snapshot

-

Copy the snapshot sstables into an accessible directory and use sstableloader to restore them.

-

Example:

-
cp snapshots/1535397029191/* /path/to/keyspace1/standard1/
-
-sstableloader --nodes 172.17.0.2 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-3-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4700000
-   Total duration (ms):          : 4390
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

The -d or –nodes option is required, or the script will not run.

-

Example:

-
sstableloader /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Initial hosts must be specified (-d)
-
-
-
-
-

Use a Config File for SSL Clusters

-

If SSL encryption is enabled in the cluster, use the –conf-path option with sstableloader to point the tool to the cassandra.yaml with the relevant server_encryption_options (e.g., truststore location, algorithm). This will work better than passing individual ssl options shown above to sstableloader on the command line.

-

Example:

-
sstableloader --nodes 172.17.0.2 --conf-path /etc/cassandra/cassandra.yaml /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/snapshots/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 9.165KiB/s (avg: 9.165KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 5.147MiB/s (avg: 18.299KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 9.751MiB/s (avg: 27.423KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 8.203MiB/s (avg: 36.524KiB/s)
-...
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 480.513KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 9356 ms
-   Average transfer rate   : 480.105KiB/s
-   Peak transfer rate      : 586.410KiB/s
-
-
-
-
-

Hide Progress Output

-

To hide the output of progress and the summary statistics (e.g., if you wanted to use this tool in a script), use the –no-progress option.

-

Example:

-
sstableloader --nodes 172.17.0.2 --no-progress /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-4-big-Data.db to [/172.17.0.2]
-
-
-
-
-

Get More Detail

-

Using the –verbose option will provide much more progress output.

-

Example:

-
sstableloader --nodes 172.17.0.2 --verbose /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-0974e5a0aa5811e8a0a06d2c86545d91/mc-1-big-Data.db  to [/172.17.0.2]
-progress: [/172.17.0.2]0:0/1 1  % total: 1% 12.056KiB/s (avg: 12.056KiB/s)
-progress: [/172.17.0.2]0:0/1 2  % total: 2% 9.092MiB/s (avg: 24.081KiB/s)
-progress: [/172.17.0.2]0:0/1 4  % total: 4% 18.832MiB/s (avg: 36.099KiB/s)
-progress: [/172.17.0.2]0:0/1 5  % total: 5% 2.253MiB/s (avg: 47.882KiB/s)
-progress: [/172.17.0.2]0:0/1 7  % total: 7% 6.388MiB/s (avg: 59.743KiB/s)
-progress: [/172.17.0.2]0:0/1 8  % total: 8% 14.606MiB/s (avg: 71.635KiB/s)
-progress: [/172.17.0.2]0:0/1 9  % total: 9% 8.880MiB/s (avg: 83.465KiB/s)
-progress: [/172.17.0.2]0:0/1 11 % total: 11% 5.217MiB/s (avg: 95.176KiB/s)
-progress: [/172.17.0.2]0:0/1 12 % total: 12% 12.563MiB/s (avg: 106.975KiB/s)
-progress: [/172.17.0.2]0:0/1 14 % total: 14% 2.550MiB/s (avg: 118.322KiB/s)
-progress: [/172.17.0.2]0:0/1 15 % total: 15% 16.638MiB/s (avg: 130.063KiB/s)
-progress: [/172.17.0.2]0:0/1 17 % total: 17% 17.270MiB/s (avg: 141.793KiB/s)
-progress: [/172.17.0.2]0:0/1 18 % total: 18% 11.280MiB/s (avg: 153.452KiB/s)
-progress: [/172.17.0.2]0:0/1 19 % total: 19% 2.903MiB/s (avg: 164.603KiB/s)
-progress: [/172.17.0.2]0:0/1 21 % total: 21% 6.744MiB/s (avg: 176.061KiB/s)
-progress: [/172.17.0.2]0:0/1 22 % total: 22% 6.011MiB/s (avg: 187.440KiB/s)
-progress: [/172.17.0.2]0:0/1 24 % total: 24% 9.690MiB/s (avg: 198.920KiB/s)
-progress: [/172.17.0.2]0:0/1 25 % total: 25% 11.481MiB/s (avg: 210.412KiB/s)
-progress: [/172.17.0.2]0:0/1 27 % total: 27% 9.957MiB/s (avg: 221.848KiB/s)
-progress: [/172.17.0.2]0:0/1 28 % total: 28% 10.270MiB/s (avg: 233.265KiB/s)
-progress: [/172.17.0.2]0:0/1 29 % total: 29% 7.812MiB/s (avg: 244.571KiB/s)
-progress: [/172.17.0.2]0:0/1 31 % total: 31% 14.843MiB/s (avg: 256.021KiB/s)
-progress: [/172.17.0.2]0:0/1 32 % total: 32% 11.457MiB/s (avg: 267.394KiB/s)
-progress: [/172.17.0.2]0:0/1 34 % total: 34% 6.550MiB/s (avg: 278.536KiB/s)
-progress: [/172.17.0.2]0:0/1 35 % total: 35% 9.115MiB/s (avg: 289.782KiB/s)
-progress: [/172.17.0.2]0:0/1 37 % total: 37% 11.054MiB/s (avg: 301.064KiB/s)
-progress: [/172.17.0.2]0:0/1 38 % total: 38% 10.449MiB/s (avg: 312.307KiB/s)
-progress: [/172.17.0.2]0:0/1 39 % total: 39% 1.646MiB/s (avg: 321.665KiB/s)
-progress: [/172.17.0.2]0:0/1 41 % total: 41% 13.300MiB/s (avg: 332.872KiB/s)
-progress: [/172.17.0.2]0:0/1 42 % total: 42% 14.370MiB/s (avg: 344.082KiB/s)
-progress: [/172.17.0.2]0:0/1 44 % total: 44% 16.734MiB/s (avg: 355.314KiB/s)
-progress: [/172.17.0.2]0:0/1 45 % total: 45% 22.245MiB/s (avg: 366.592KiB/s)
-progress: [/172.17.0.2]0:0/1 47 % total: 47% 25.561MiB/s (avg: 377.882KiB/s)
-progress: [/172.17.0.2]0:0/1 48 % total: 48% 24.543MiB/s (avg: 389.155KiB/s)
-progress: [/172.17.0.2]0:0/1 49 % total: 49% 4.894MiB/s (avg: 399.688KiB/s)
-progress: [/172.17.0.2]0:0/1 51 % total: 51% 8.331MiB/s (avg: 410.559KiB/s)
-progress: [/172.17.0.2]0:0/1 52 % total: 52% 5.771MiB/s (avg: 421.150KiB/s)
-progress: [/172.17.0.2]0:0/1 54 % total: 54% 8.738MiB/s (avg: 431.983KiB/s)
-progress: [/172.17.0.2]0:0/1 55 % total: 55% 3.406MiB/s (avg: 441.911KiB/s)
-progress: [/172.17.0.2]0:0/1 56 % total: 56% 9.791MiB/s (avg: 452.730KiB/s)
-progress: [/172.17.0.2]0:0/1 58 % total: 58% 3.401MiB/s (avg: 462.545KiB/s)
-progress: [/172.17.0.2]0:0/1 59 % total: 59% 5.280MiB/s (avg: 472.840KiB/s)
-progress: [/172.17.0.2]0:0/1 61 % total: 61% 12.232MiB/s (avg: 483.663KiB/s)
-progress: [/172.17.0.2]0:0/1 62 % total: 62% 9.258MiB/s (avg: 494.325KiB/s)
-progress: [/172.17.0.2]0:0/1 64 % total: 64% 2.877MiB/s (avg: 503.640KiB/s)
-progress: [/172.17.0.2]0:0/1 65 % total: 65% 7.461MiB/s (avg: 514.078KiB/s)
-progress: [/172.17.0.2]0:0/1 66 % total: 66% 24.247MiB/s (avg: 525.018KiB/s)
-progress: [/172.17.0.2]0:0/1 68 % total: 68% 9.348MiB/s (avg: 535.563KiB/s)
-progress: [/172.17.0.2]0:0/1 69 % total: 69% 5.130MiB/s (avg: 545.563KiB/s)
-progress: [/172.17.0.2]0:0/1 71 % total: 71% 19.861MiB/s (avg: 556.392KiB/s)
-progress: [/172.17.0.2]0:0/1 72 % total: 72% 15.501MiB/s (avg: 567.122KiB/s)
-progress: [/172.17.0.2]0:0/1 74 % total: 74% 5.031MiB/s (avg: 576.996KiB/s)
-progress: [/172.17.0.2]0:0/1 75 % total: 75% 22.771MiB/s (avg: 587.813KiB/s)
-progress: [/172.17.0.2]0:0/1 76 % total: 76% 22.780MiB/s (avg: 598.619KiB/s)
-progress: [/172.17.0.2]0:0/1 78 % total: 78% 20.684MiB/s (avg: 609.386KiB/s)
-progress: [/172.17.0.2]0:0/1 79 % total: 79% 22.920MiB/s (avg: 620.173KiB/s)
-progress: [/172.17.0.2]0:0/1 81 % total: 81% 7.458MiB/s (avg: 630.333KiB/s)
-progress: [/172.17.0.2]0:0/1 82 % total: 82% 22.993MiB/s (avg: 641.090KiB/s)
-progress: [/172.17.0.2]0:0/1 84 % total: 84% 21.392MiB/s (avg: 651.814KiB/s)
-progress: [/172.17.0.2]0:0/1 85 % total: 85% 7.732MiB/s (avg: 661.938KiB/s)
-progress: [/172.17.0.2]0:0/1 86 % total: 86% 3.476MiB/s (avg: 670.892KiB/s)
-progress: [/172.17.0.2]0:0/1 88 % total: 88% 19.889MiB/s (avg: 681.521KiB/s)
-progress: [/172.17.0.2]0:0/1 89 % total: 89% 21.077MiB/s (avg: 692.162KiB/s)
-progress: [/172.17.0.2]0:0/1 91 % total: 91% 24.062MiB/s (avg: 702.835KiB/s)
-progress: [/172.17.0.2]0:0/1 92 % total: 92% 19.798MiB/s (avg: 713.431KiB/s)
-progress: [/172.17.0.2]0:0/1 94 % total: 94% 17.591MiB/s (avg: 723.965KiB/s)
-progress: [/172.17.0.2]0:0/1 95 % total: 95% 13.725MiB/s (avg: 734.361KiB/s)
-progress: [/172.17.0.2]0:0/1 96 % total: 96% 16.737MiB/s (avg: 744.846KiB/s)
-progress: [/172.17.0.2]0:0/1 98 % total: 98% 22.701MiB/s (avg: 755.443KiB/s)
-progress: [/172.17.0.2]0:0/1 99 % total: 99% 18.718MiB/s (avg: 765.954KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 6.613MiB/s (avg: 767.802KiB/s)
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0.000KiB/s (avg: 670.295KiB/s)
-
-Summary statistics:
-   Connections per host    : 1
-   Total files transferred : 1
-   Total bytes transferred : 4.387MiB
-   Total duration          : 6706 ms
-   Average transfer rate   : 669.835KiB/s
-   Peak transfer rate      : 767.802KiB/s
-
-
-
-
-

Throttling Load

-

To prevent the table loader from overloading the system resources, you can throttle the process with the –throttle option. The default is unlimited (no throttling). Throttle units are in megabits. Note that the total duration is increased in the example below.

-

Example:

-
sstableloader --nodes 172.17.0.2 --throttle 1 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-6-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 0 MB/s)
-Summary statistics:
-   Connections per host:         : 1
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 37634
-   Average transfer rate (MB/s): : 0
-   Peak transfer rate (MB/s):    : 0
-
-
-
-
-

Speeding up Load

-

To speed up the load process, the number of connections per host can be increased.

-

Example:

-
sstableloader --nodes 172.17.0.2 --connections-per-host 100 /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/
-Established connection to initial hosts
-Opening sstables and calculating sections to stream
-Streaming relevant part of /var/lib/cassandra/loadme/keyspace1/standard1-f8a4fa30aa2a11e8af27091830ac5256/ma-9-big-Data.db to [/172.17.0.2]
-progress: [/172.17.0.2]0:1/1 100% total: 100% 0  MB/s(avg: 1 MB/s)
-Summary statistics:
-   Connections per host:         : 100
-   Total files transferred:      : 1
-   Total bytes transferred:      : 4595705
-   Total duration (ms):          : 3486
-   Average transfer rate (MB/s): : 1
-   Peak transfer rate (MB/s):    : 1
-
-
-

This small data set doesn’t benefit much from the increase in connections per host, but note that the total duration has decreased in this example.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstablemetadata.html b/src/doc/4.0-beta1/tools/sstable/sstablemetadata.html deleted file mode 100644 index f55747532..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstablemetadata.html +++ /dev/null @@ -1,473 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablemetadata" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablemetadata

-

Print information about an sstable from the related Statistics.db and Summary.db files to standard output.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7159 and https://issues.apache.org/jira/browse/CASSANDRA-10838

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablemetadata <options> <sstable filename(s)>

- ---- - - - - - -
–gc_grace_seconds <arg>The gc_grace_seconds to use when calculating droppable tombstones
-
- -
-

Specify gc grace seconds

-

To see the ratio of droppable tombstones given a configured gc grace seconds, use the gc_grace_seconds option. Because the sstablemetadata tool doesn’t access the schema directly, this is a way to more accurately estimate droppable tombstones – for example, if you pass in gc_grace_seconds matching what is configured in the schema. The gc_grace_seconds value provided is subtracted from the curent machine time (in seconds).

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-12208

-

Example:

-
sstablemetadata /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated tombstone drop times" -A4
-Estimated tombstone drop times:
-1536599100:         1
-1536599640:         1
-1536599700:         2
-
-echo $(date +%s)
-1536602005
-
-# if gc_grace_seconds was configured at 100, all of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 100 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 4.0E-5
-
-# if gc_grace_seconds was configured at 4700, some of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 4700 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 9.61111111111111E-6
-
-# if gc_grace_seconds was configured at 100, none of the tombstones would be currently droppable
-sstablemetadata --gc_grace_seconds 5000 /var/lib/cassandra/data/keyspace1/standard1-41b52700b4ed11e896476d2c86545d91/mc-12-big-Data.db | grep "Estimated droppable tombstones"
-Estimated droppable tombstones: 0.0
-
-
-
-
-

Explanation of each value printed above

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueExplanation
SSTableprefix of the sstable filenames related to this sstable
Partitionerpartitioner type used to distribute data across nodes; defined in cassandra.yaml
Bloom Filter FPprecision of Bloom filter used in reads; defined in the table definition
Minimum timestampminimum timestamp of any entry in this sstable, in epoch microseconds
Maximum timestampmaximum timestamp of any entry in this sstable, in epoch microseconds
SSTable min local deletion timeminimum timestamp of deletion date, based on TTL, in epoch seconds
SSTable max local deletion timemaximum timestamp of deletion date, based on TTL, in epoch seconds
Compressorblank (-) by default; if not blank, indicates type of compression enabled on the table
TTL mintime-to-live in seconds; default 0 unless defined in the table definition
TTL maxtime-to-live in seconds; default 0 unless defined in the table definition
First tokenlowest token and related key found in the sstable summary
Last tokenhighest token and related key found in the sstable summary
Estimated droppable tombstonesratio of tombstones to columns, using configured gc grace seconds if relevant
SSTable levelcompaction level of this sstable, if leveled compaction (LCS) is used
Repaired atthe timestamp this sstable was marked as repaired via sstablerepairedset, in epoch milliseconds
Replay positions coveredthe interval of time and commitlog positions related to this sstable
totalColumnsSetnumber of cells in the table
totalRowsnumber of rows in the table
Estimated tombstone drop timesapproximate number of rows that will expire, ordered by epoch seconds
Count Row Size Cell Counttwo histograms in two columns; one represents distribution of Row Size -and the other represents distribution of Cell Count
Estimated cardinalityan estimate of unique values, used for compaction
EncodingStats* minTTLin epoch milliseconds
EncodingStats* minLocalDeletionTimein epoch seconds
EncodingStats* minTimestampin epoch microseconds
KeyTypethe type of partition key, useful in reading and writing data -from/to storage; defined in the table definition
ClusteringTypesthe type of clustering key, useful in reading and writing data -from/to storage; defined in the table definition
StaticColumnsa list of the shared columns in the table
RegularColumnsa list of non-static, non-key columns in the table
-
    -
  • For the encoding stats values, the delta of this and the current epoch time is used when encoding and storing data in the most optimal way.
  • -
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableofflinerelevel.html b/src/doc/4.0-beta1/tools/sstable/sstableofflinerelevel.html deleted file mode 100644 index 7539934ef..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableofflinerelevel.html +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableofflinerelevel" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableofflinerelevel

-

When using LeveledCompactionStrategy, sstables can get stuck at L0 on a recently bootstrapped node, and compactions may never catch up. This tool is used to bump sstables into the highest level possible.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-8301

-

The way this is done is: sstables are storted by their last token. Given an original leveling like this (note that [ ] indicates token boundaries, not sstable size on disk; all sstables are the same size):

-
L3 [][][][][][][][][][][]
-L2 [    ][    ][    ][  ]
-L1 [          ][        ]
-L0 [                    ]
-
-
-

Will look like this after being dropped to L0 and sorted by last token (and, to illustrate overlap, the overlapping ones are put on a new line):

-
[][][]
-[    ][][][]
-    [    ]
-[          ]
-...
-
-
-

Then, we start iterating from the smallest last-token and adding all sstables that do not cause an overlap to a level. We will reconstruct the original leveling top-down. Whenever we add an sstable to the level, we remove it from the sorted list. Once we reach the end of the sorted list, we have a full level, and can start over with the level below.

-

If we end up with more levels than expected, we put all levels exceeding the expected in L0, for example, original L0 files will most likely be put in a level of its own since they most often overlap many other sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableofflinerelevel [–dry-run] <keyspace> <table>

-
-
-

Doing a dry run

-

Use the –dry-run option to see the current level distribution and predicted level after the change.

-

Example:

-
sstableofflinerelevel --dry-run keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-Potential leveling:
-L0=1
-L1=1
-
-
-
-
-

Running a relevel

-

Example:

-
sstableofflinerelevel keyspace eventlog
-For sstables in /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753:
-Current leveling:
-L0=2
-New leveling:
-L0=1
-L1=1
-
-
-
-
-

Keyspace or table not found

-

If an invalid keyspace and/or table is provided, an exception will be thrown.

-

Example:

-
sstableofflinerelevel --dry-run keyspace evenlog
-
-Exception in thread "main" java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.evenlog
-    at org.apache.cassandra.tools.SSTableOfflineRelevel.main(SSTableOfflineRelevel.java:96)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstablerepairedset.html b/src/doc/4.0-beta1/tools/sstable/sstablerepairedset.html deleted file mode 100644 index cb09eda8f..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstablerepairedset.html +++ /dev/null @@ -1,193 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablerepairedset" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablerepairedset

-

Repairs can take a very long time in some environments, for large sizes of data. Use this tool to set the repairedAt status on a given set of sstables, so that repairs can be run on only un-repaired sstables if desired.

-

Note that running a repair (e.g., via nodetool repair) doesn’t set the status of this metadata. Only setting the status of this metadata via this tool does.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5351

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablerepairedset –really-set <options> [-f <sstable-list> | <sstables>]

- ---- - - - - - - - - - - - - - - -
–really-setrequired if you want to really set the status
–is-repairedset the repairedAt status to the last modified time
–is-unrepairedset the repairedAt status to 0
-fuse a file containing a list of sstables as the input
-
-
-

Set a lot of sstables to unrepaired status

-

There are many ways to do this programmatically. This way would likely include variables for the keyspace and table.

-

Example:

-
find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-unrepaired %
-
-
-
-
-

Set one to many sstables to repaired status

-

Set the repairedAt status after a repair to mark the sstables as repaired. Again, using variables for the keyspace and table names is a good choice.

-

Example:

-
nodetool repair keyspace1 standard1
-find /var/lib/cassandra/data/keyspace1/standard1-d936bd20a17c11e8bc92a55ed562cd82/* -name "*Data.db" -print0 | xargs -0 -I % sstablerepairedset --really-set --is-repaired %
-
-
-
- -
-

Using command in a script

-

If you know you ran repair 2 weeks ago, you can do something like the following:

-
sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14)
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstablescrub.html b/src/doc/4.0-beta1/tools/sstable/sstablescrub.html deleted file mode 100644 index d404a2286..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstablescrub.html +++ /dev/null @@ -1,211 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablescrub" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablescrub

-

Fix a broken sstable. The scrub process rewrites the sstable, skipping any corrupted rows. Because these rows are lost, follow this process with a repair.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4321

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablescrub <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-m,–manifest-checkonly check and repair the leveled manifest, without actually scrubbing the sstables
-n,–no-validatedo not validate columns using column validator
-r,–reinsert-overflowed-ttlRewrites rows with overflowed expiration date affected by CASSANDRA-14092 -with the maximum supported expiration date of 2038-01-19T03:14:06+00:00. The rows are rewritten with the original timestamp incremented by one millisecond to override/supersede any potential tombstone that may have been generated during compaction of the affected rows.
-s,–skip-corruptedskip corrupt rows in counter tables
-v,–verboseverbose output
-
-
-

Basic Scrub

-

The scrub without options will do a snapshot first, then write all non-corrupted files to a new sstable.

-

Example:

-
sstablescrub keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1534424070883
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') (17.142MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-6365332094dd11e88f324f9c503e4753/mc-5-big-Data.db') complete: 73367 rows in new sstable and 0 empty (tombstoned) rows dropped
-Checking leveled manifest
-
-
-
-
-

Scrub without Validation

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-9406

-

Use the –no-validate option to retain data that may be misrepresented (e.g., an integer stored in a long field) but not corrupt. This data usually doesn not present any errors to the client.

-

Example:

-
sstablescrub --no-validate keyspace1 standard1
-Pre-scrub sstables snapshotted into snapshot pre-scrub-1536243158517
-Scrubbing BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') (4.482MiB)
-Scrub of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-bc9cf530b1da11e886c66d2c86545d91/mc-2-big-Data.db') complete; looks like all 0 rows were tombstoned
-
-
-
-
-

Skip Corrupted Counter Tables

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5930

-

If counter tables are corrupted in a way that prevents sstablescrub from completing, you can use the –skip-corrupted option to skip scrubbing those counter tables. This workaround is not necessary in versions 2.0+.

-

Example:

-
sstablescrub --skip-corrupted keyspace1 counter1
-
-
-
-
-

Dealing with Overflow Dates

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-14092

-

Using the option –reinsert-overflowed-ttl allows a rewriting of rows that had a max TTL going over the maximum (causing an overflow).

-

Example:

-
sstablescrub --reinsert-overflowed-ttl keyspace1 counter1
-
-
-
-
-

Manifest Check

-

As of Cassandra version 2.0, this option is no longer relevant, since level data was moved from a separate manifest into the sstable metadata.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstablesplit.html b/src/doc/4.0-beta1/tools/sstable/sstablesplit.html deleted file mode 100644 index 570a62a4b..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstablesplit.html +++ /dev/null @@ -1,202 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstablesplit" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstablesplit

-

Big sstable files can take up a lot of disk space. The sstablesplit tool can be used to split those large files into smaller files. It can be thought of as a type of anticompaction.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-4766

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstablesplit <options> <filename>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-h, –helpdisplay this help message
–no-snapshotdon’t snapshot the sstables before splitting
-s, –size <size>maximum size in MB for the output sstables (default: 50)
-

This command should be run with Cassandra stopped. Note: the script does not verify that Cassandra is stopped.

-
-
-

Split a File

-

Split a large sstable into smaller sstables. By default, unless the option –no-snapshot is added, a snapshot will be done of the original sstable and placed in the snapshots folder.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-
-Pre-split sstables snapshotted into snapshot pre-split-1533144514795
-
-
-
-
-

Split Multiple Files

-

Wildcards can be used in the filename portion of the command to split multiple files.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-1*
-
-
-
-
-

Attempt to Split a Small File

-

If the file is already smaller than the split size provided, the sstable will not be split.

-

Example:

-
sstablesplit /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db
-Skipping /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-8-big-Data.db: it's size (1.442 MB) is less than the split size (50 MB)
-No sstables needed splitting.
-
-
-
-
-

Split a File into Specified Size

-

The default size used for splitting is 50MB. Specify another size with the –size option. The size is in megabytes (MB). Specify only the number, not the units. For example –size 50 is correct, but –size 50MB is not.

-

Example:

-
sstablesplit --size 1 /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-9-big-Data.db
-Pre-split sstables snapshotted into snapshot pre-split-1533144996008
-
-
-
-
-

Split Without Snapshot

-

By default, sstablesplit will create a snapshot before splitting. If a snapshot is not needed, use the –no-snapshot option to skip it.

-

Example:

-
sstablesplit --size 1 --no-snapshot /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-11-big-Data.db
-
-
-

Note: There is no output, but you can see the results in your file system.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableupgrade.html b/src/doc/4.0-beta1/tools/sstable/sstableupgrade.html deleted file mode 100644 index c8d04fad5..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableupgrade.html +++ /dev/null @@ -1,249 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableupgrade" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableupgrade

-

Upgrade the sstables in the given table (or snapshot) to the current version of Cassandra. This process is typically done after a Cassandra version upgrade. This operation will rewrite the sstables in the specified table to match the currently installed version of Cassandra. The sstableupgrade command can also be used to downgrade sstables to a previous version.

-

The snapshot option will only upgrade the specified snapshot. Upgrading snapshots is required before attempting to restore a snapshot taken in a major version older than the major version Cassandra is currently running. This will replace the files in the given snapshot as well as break any hard links to live sstables.

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableupgrade <options> <keyspace> <table> [snapshot_name]

- ---- - - - - - - - - - - - -
–debugdisplay stack traces
-h,–helpdisplay this help message
-k,–keep-sourcedo not delete the source sstables
-
-
-

Rewrite tables to the current Cassandra version

-

Start with a set of sstables in one version of Cassandra:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:45 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:45 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:45 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:45 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:45 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:45 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:45 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:45 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables:

-
sstableupgrade keyspace1 standard1
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-9695b790a63211e8a6fb091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 13:48 backups
--rw-r--r--   1 user  wheel      292 Aug 22 13:48 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4599475 Aug 22 13:48 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:48 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 13:48 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330807 Aug 22 13:48 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 13:48 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 13:48 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 13:48 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite tables to the current Cassandra version, and keep tables in old version

-

Again, starting with a set of sstables in one version:

-
ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
--rw-r--r--   1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--   1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--   1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--   1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--   1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--   1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--   1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
-
-
-

After upgrading the Cassandra version, upgrade the sstables, retaining the original sstables:

-
sstableupgrade keyspace1 standard1 -k
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/keyspace1-standard1-ka-1-Data.db') complete.
-
-ls -al /tmp/cassandra/data/keyspace1/standard1-db532690a63411e8b4ae091830ac5256/
-...
-drwxr-xr-x   2 user  wheel       64 Aug 22 14:00 backups
--rw-r--r--@  1 user  wheel      348 Aug 22 13:58 keyspace1-standard1-ka-1-CRC.db
--rw-r--r--@  1 user  wheel  5620000 Aug 22 13:58 keyspace1-standard1-ka-1-Data.db
--rw-r--r--@  1 user  wheel       10 Aug 22 13:58 keyspace1-standard1-ka-1-Digest.sha1
--rw-r--r--@  1 user  wheel    25016 Aug 22 13:58 keyspace1-standard1-ka-1-Filter.db
--rw-r--r--@  1 user  wheel   480000 Aug 22 13:58 keyspace1-standard1-ka-1-Index.db
--rw-r--r--@  1 user  wheel     9895 Aug 22 13:58 keyspace1-standard1-ka-1-Statistics.db
--rw-r--r--@  1 user  wheel     3562 Aug 22 13:58 keyspace1-standard1-ka-1-Summary.db
--rw-r--r--@  1 user  wheel       79 Aug 22 13:58 keyspace1-standard1-ka-1-TOC.txt
--rw-r--r--   1 user  wheel      292 Aug 22 14:01 mc-2-big-CRC.db
--rw-r--r--   1 user  wheel  4596370 Aug 22 14:01 mc-2-big-Data.db
--rw-r--r--   1 user  wheel       10 Aug 22 14:01 mc-2-big-Digest.crc32
--rw-r--r--   1 user  wheel    25256 Aug 22 14:01 mc-2-big-Filter.db
--rw-r--r--   1 user  wheel   330801 Aug 22 14:01 mc-2-big-Index.db
--rw-r--r--   1 user  wheel    10312 Aug 22 14:01 mc-2-big-Statistics.db
--rw-r--r--   1 user  wheel     3506 Aug 22 14:01 mc-2-big-Summary.db
--rw-r--r--   1 user  wheel       80 Aug 22 14:01 mc-2-big-TOC.txt
-
-
-
-
-

Rewrite a snapshot to the current Cassandra version

-

Find the snapshot name:

-
nodetool listsnapshots
-
-Snapshot Details:
-Snapshot name       Keyspace name                Column family name           True size          Size on disk
-...
-1534962986979       keyspace1                    standard1                    5.85 MB            5.85 MB
-
-
-

Then rewrite the snapshot:

-
sstableupgrade keyspace1 standard1 1534962986979
-Found 1 sstables that need upgrading.
-Upgrading BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db')
-Upgrade of BigTableReader(path='/var/lib/cassandra/data/keyspace1/standard1-5850e9f0a63711e8a5c5091830ac5256/snapshots/1534962986979/keyspace1-standard1-ka-1-Data.db') complete.
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableutil.html b/src/doc/4.0-beta1/tools/sstable/sstableutil.html deleted file mode 100644 index 53e44e29c..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableutil.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableutil" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableutil

-

List sstable files for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-7066

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableutil <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - - - - - - - -
-c, –cleanupclean up any outstanding transactions
-d, –debugdisplay stack traces
-h, –helpdisplay this help message
-o, –oploginclude operation logs
-t, –type <arg>all (list all files, final or temporary), tmp (list temporary files only), -final (list final files only),
-v, –verboseverbose output
-
-
-

List all sstables

-

The basic command lists the sstables associated with a given keyspace/table.

-

Example:

-
sstableutil keyspace eventlog
-Listing files...
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-TOC.txt
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-CRC.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Digest.crc32
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Filter.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Index.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Statistics.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Summary.db
-/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-TOC.txt
-
-
-
-
-

List only temporary sstables

-

Using the -t option followed by tmp will list all temporary sstables, in the format above. Temporary sstables were used in pre-3.0 versions of Cassandra.

-
-
-

List only final sstables

-

Using the -t option followed by final will list all final sstables, in the format above. In recent versions of Cassandra, this is the same output as not using the -t option.

-
-
-

Include transaction logs

-

Using the -o option will include transaction logs in the listing, in the format above.

-
-
-

Clean up sstables

-

Using the -c option removes any transactions left over from incomplete writes or compactions.

-

From the 3.0 upgrade notes:

-

New transaction log files have been introduced to replace the compactions_in_progress system table, temporary file markers (tmp and tmplink) and sstable ancestors. Therefore, compaction metadata no longer contains ancestors. Transaction log files list sstable descriptors involved in compactions and other operations such as flushing and streaming. Use the sstableutil tool to list any sstable files currently involved in operations not yet completed, which previously would have been marked as temporary. A transaction log file contains one sstable per line, with the prefix “add:” or “remove:”. They also contain a special line “commit”, only inserted at the end when the transaction is committed. On startup we use these files to cleanup any partial transactions that were in progress when the process exited. If the commit line is found, we keep new sstables (those with the “add” prefix) and delete the old sstables (those with the “remove” prefix), vice-versa if the commit line is missing. Should you lose or delete these log files, both old and new sstable files will be kept as live files, which will result in duplicated sstables. These files are protected by incremental checksums so you should not manually edit them. When restoring a full backup or moving sstable files, you should clean-up any left over transactions and their temporary files first.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/tools/sstable/sstableverify.html b/src/doc/4.0-beta1/tools/sstable/sstableverify.html deleted file mode 100644 index 5dc145d61..000000000 --- a/src/doc/4.0-beta1/tools/sstable/sstableverify.html +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Cassandra Tools" - -doc-parent: "SSTable Tools" - -doc-title: "sstableverify" -doc-header-links: ' - - - - -' -doc-search-path: "../../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

sstableverify

-

Check sstable(s) for errors or corruption, for the provided table.

-

ref: https://issues.apache.org/jira/browse/CASSANDRA-5791

-

Cassandra must be stopped before this tool is executed, or unexpected results will occur. Note: the script does not verify that Cassandra is stopped.

-
-

Usage

-

sstableverify <options> <keyspace> <table>

- ---- - - - - - - - - - - - - - - -
–debugdisplay stack traces
-e, –extendedextended verification
-h, –helpdisplay this help message
-v, –verboseverbose output
-
-
-

Basic Verification

-

This is the basic verification. It is not a very quick process, and uses memory. You might need to increase your memory settings if you have many sstables.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-
-
-
-
-

Extended Verification

-

During an extended verification, the individual values will be validated for errors or corruption. This of course takes more time.

-

Example:

-
root@DC1C1:/# sstableverify -e keyspace eventlog
-WARN  14:08:06,255 Only 33.096GiB free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') (7.353MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-32-big-Data.db') succeeded. All 33211 rows read successfully
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') (3.775MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db')
-Extended Verify requested, proceeding to inspect values
-Verify of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-37-big-Data.db') succeeded. All 17068 rows read successfully
-
-
-
-
-

Corrupted File

-

Corrupted files are listed if they are detected by the script.

-

Example:

-
sstableverify keyspace eventlog
-Verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db') (7.416MiB)
-Deserializing sstable metadata for BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Checking computed hash of BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db')
-Error verifying BigTableReader(path='/var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db'): Corrupted: /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db
-
-
-

A similar (but less verbose) tool will show the suggested actions:

-
nodetool verify keyspace eventlog
-error: Invalid SSTable /var/lib/cassandra/data/keyspace/eventlog-6365332094dd11e88f324f9c503e4753/mc-40-big-Data.db, please force repair
-
-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/troubleshooting/finding_nodes.html b/src/doc/4.0-beta1/troubleshooting/finding_nodes.html deleted file mode 100644 index 56ae1a9f9..000000000 --- a/src/doc/4.0-beta1/troubleshooting/finding_nodes.html +++ /dev/null @@ -1,241 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Find The Misbehaving Nodes" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Find The Misbehaving Nodes

-

The first step to troubleshooting a Cassandra issue is to use error messages, -metrics and monitoring information to identify if the issue lies with the -clients or the server and if it does lie with the server find the problematic -nodes in the Cassandra cluster. The goal is to determine if this is a systemic -issue (e.g. a query pattern that affects the entire cluster) or isolated to a -subset of nodes (e.g. neighbors holding a shared token range or even a single -node with bad hardware).

-

There are many sources of information that help determine where the problem -lies. Some of the most common are mentioned below.

-
-

Client Logs and Errors

-

Clients of the cluster often leave the best breadcrumbs to follow. Perhaps -client latencies or error rates have increased in a particular datacenter -(likely eliminating other datacenter’s nodes), or clients are receiving a -particular kind of error code indicating a particular kind of problem. -Troubleshooters can often rule out many failure modes just by reading the error -messages. In fact, many Cassandra error messages include the last coordinator -contacted to help operators find nodes to start with.

-

Some common errors (likely culprit in parenthesis) assuming the client has -similar error names as the Datastax drivers:

-
    -
  • SyntaxError (client). This and other QueryValidationException -indicate that the client sent a malformed request. These are rarely server -issues and usually indicate bad queries.
  • -
  • UnavailableException (server): This means that the Cassandra -coordinator node has rejected the query as it believes that insufficent -replica nodes are available. If many coordinators are throwing this error it -likely means that there really are (typically) multiple nodes down in the -cluster and you can identify them using nodetool status If only a single coordinator is throwing this error it may -mean that node has been partitioned from the rest.
  • -
  • OperationTimedOutException (server): This is the most frequent -timeout message raised when clients set timeouts and means that the query -took longer than the supplied timeout. This is a client side timeout -meaning that it took longer than the client specified timeout. The error -message will include the coordinator node that was last tried which is -usually a good starting point. This error usually indicates either -aggressive client timeout values or latent server coordinators/replicas.
  • -
  • ReadTimeoutException or WriteTimeoutException (server): These -are raised when clients do not specify lower timeouts and there is a -coordinator timeouts based on the values supplied in the cassandra.yaml -configuration file. They usually indicate a serious server side problem as -the default values are usually multiple seconds.
  • -
-
-
-

Metrics

-

If you have Cassandra metrics reporting to a -centralized location such as Graphite or -Grafana you can typically use those to narrow down -the problem. At this stage narrowing down the issue to a particular -datacenter, rack, or even group of nodes is the main goal. Some helpful metrics -to look at are:

-
-

Errors

-

Cassandra refers to internode messaging errors as “drops”, and provided a -number of Dropped Message Metrics to help narrow -down errors. If particular nodes are dropping messages actively, they are -likely related to the issue.

-
-
-

Latency

-

For timeouts or latency related issues you can start with Table -Metrics by comparing Coordinator level metrics e.g. -CoordinatorReadLatency or CoordinatorWriteLatency with their associated -replica metrics e.g. ReadLatency or WriteLatency. Issues usually show -up on the 99th percentile before they show up on the 50th percentile or -the mean. While maximum coordinator latencies are not typically very -helpful due to the exponentially decaying reservoir used internally to produce -metrics, maximum replica latencies that correlate with increased 99th -percentiles on coordinators can help narrow down the problem.

-

There are usually three main possibilities:

-
    -
  1. Coordinator latencies are high on all nodes, but only a few node’s local -read latencies are high. This points to slow replica nodes and the -coordinator’s are just side-effects. This usually happens when clients are -not token aware.
  2. -
  3. Coordinator latencies and replica latencies increase at the -same time on the a few nodes. If clients are token aware this is almost -always what happens and points to slow replicas of a subset of token -ranges (only part of the ring).
  4. -
  5. Coordinator and local latencies are high on many nodes. This usually -indicates either a tipping point in the cluster capacity (too many writes or -reads per second), or a new query pattern.
  6. -
-

It’s important to remember that depending on the client’s load balancing -behavior and consistency levels coordinator and replica metrics may or may -not correlate. In particular if you use TokenAware policies the same -node’s coordinator and replica latencies will often increase together, but if -you just use normal DCAwareRoundRobin coordinator latencies can increase -with unrelated replica node’s latencies. For example:

-
    -
  • TokenAware + LOCAL_ONE: should always have coordinator and replica -latencies on the same node rise together
  • -
  • TokenAware + LOCAL_QUORUM: should always have coordinator and -multiple replica latencies rise together in the same datacenter.
  • -
  • TokenAware + QUORUM: replica latencies in other datacenters can -affect coordinator latencies.
  • -
  • DCAwareRoundRobin + LOCAL_ONE: coordinator latencies and unrelated -replica node’s latencies will rise together.
  • -
  • DCAwareRoundRobin + LOCAL_QUORUM: different coordinator and replica -latencies will rise together with little correlation.
  • -
-
-
-

Query Rates

-

Sometimes the Table query rate metrics can help -narrow down load issues as “small” increase in coordinator queries per second -(QPS) may correlate with a very large increase in replica level QPS. This most -often happens with BATCH writes, where a client may send a single BATCH -query that might contain 50 statements in it, which if you have 9 copies (RF=3, -three datacenters) means that every coordinator BATCH write turns into 450 -replica writes! This is why keeping BATCH’s to the same partition is so -critical, otherwise you can exhaust significant CPU capacitity with a “single” -query.

-
-
-
-

Next Step: Investigate the Node(s)

-

Once you have narrowed down the problem as much as possible (datacenter, rack -, node), login to one of the nodes using SSH and proceed to debug using -logs, nodetool, and -os tools. If you are not able to login you may still -have access to logs and nodetool -remotely.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/troubleshooting/index.html b/src/doc/4.0-beta1/troubleshooting/index.html deleted file mode 100644 index 0aa5a8916..000000000 --- a/src/doc/4.0-beta1/troubleshooting/index.html +++ /dev/null @@ -1,148 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-title: "Troubleshooting" -doc-header-links: ' - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Troubleshooting

-

As any distributed database does, sometimes Cassandra breaks and you will have -to troubleshoot what is going on. Generally speaking you can debug Cassandra -like any other distributed Java program, meaning that you have to find which -machines in your cluster are misbehaving and then isolate the problem using -logs and tools. Luckily Cassandra had a great set of instrospection tools to -help you.

-

These pages include a number of command examples demonstrating various -debugging and analysis techniques, mostly for Linux/Unix systems. If you don’t -have access to the machines running Cassandra, or are running on Windows or -another operating system you may not be able to use the exact commands but -there are likely equivalent tools you can use.

- -
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/troubleshooting/reading_logs.html b/src/doc/4.0-beta1/troubleshooting/reading_logs.html deleted file mode 100644 index 75bb366d1..000000000 --- a/src/doc/4.0-beta1/troubleshooting/reading_logs.html +++ /dev/null @@ -1,351 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Cassandra Logs" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Cassandra Logs

-

Cassandra has rich support for logging and attempts to give operators maximum -insight into the database while at the same time limiting noise to the logs.

-
-

Common Log Files

-

Cassandra has three main logs, the system.log, debug.log and -gc.log which hold general logging messages, debugging logging messages, and -java garbage collection logs respectively.

-

These logs by default live in ${CASSANDRA_HOME}/logs, but most Linux -distributions relocate logs to /var/log/cassandra. Operators can tune -this location as well as what levels are logged using the provided -logback.xml file.

-
-

system.log

-

This log is the default Cassandra log and is a good place to start any -investigation. Some examples of activities logged to this log:

-
    -
  • Uncaught exceptions. These can be very useful for debugging errors.
  • -
  • GCInspector messages indicating long garbage collector pauses. When long -pauses happen Cassandra will print how long and also what was the state of -the system (thread state) at the time of that pause. This can help narrow -down a capacity issue (either not enough heap or not enough spare CPU).
  • -
  • Information about nodes joining and leaving the cluster as well as token -metadata (data ownersip) changes. This is useful for debugging network -partitions, data movements, and more.
  • -
  • Keyspace/Table creation, modification, deletion.
  • -
  • StartupChecks that ensure optimal configuration of the operating system -to run Cassandra
  • -
  • Information about some background operational tasks (e.g. Index -Redistribution).
  • -
-

As with any application, looking for ERROR or WARN lines can be a -great first step:

-
$ # Search for warnings or errors in the latest system.log
-$ grep 'WARN\|ERROR' system.log | tail
-...
-
-$ # Search for warnings or errors in all rotated system.log
-$ zgrep 'WARN\|ERROR' system.log.* | less
-...
-
-
-
-
-

debug.log

-

This log contains additional debugging information that may be useful when -troubleshooting but may be much noiser than the normal system.log. Some -examples of activities logged to this log:

-
    -
  • Information about compactions, including when they start, which sstables -they contain, and when they finish.
  • -
  • Information about memtable flushes to disk, including when they happened, -how large the flushes were, and which commitlog segments the flush impacted.
  • -
-

This log can be very noisy, so it is highly recommended to use grep and -other log analysis tools to dive deep. For example:

-
$ # Search for messages involving a CompactionTask with 5 lines of context
-$ grep CompactionTask debug.log -C 5
-...
-
-$ # Look at the distribution of flush tasks per keyspace
-$ grep "Enqueuing flush" debug.log | cut -f 10 -d ' ' | sort | uniq -c
-    6 compaction_history:
-    1 test_keyspace:
-    2 local:
-    17 size_estimates:
-    17 sstable_activity:
-
-
-
-
-

gc.log

-

The gc log is a standard Java GC log. With the default jvm.options -settings you get a lot of valuable information in this log such as -application pause times, and why pauses happened. This may help narrow -down throughput or latency issues to a mistuned JVM. For example you can -view the last few pauses:

-
$ grep stopped gc.log.0.current | tail
-2018-08-29T00:19:39.522+0000: 3022663.591: Total time for which application threads were stopped: 0.0332813 seconds, Stopping threads took: 0.0008189 seconds
-2018-08-29T00:19:44.369+0000: 3022668.438: Total time for which application threads were stopped: 0.0312507 seconds, Stopping threads took: 0.0007025 seconds
-2018-08-29T00:19:49.796+0000: 3022673.865: Total time for which application threads were stopped: 0.0307071 seconds, Stopping threads took: 0.0006662 seconds
-2018-08-29T00:19:55.452+0000: 3022679.521: Total time for which application threads were stopped: 0.0309578 seconds, Stopping threads took: 0.0006832 seconds
-2018-08-29T00:20:00.127+0000: 3022684.197: Total time for which application threads were stopped: 0.0310082 seconds, Stopping threads took: 0.0007090 seconds
-2018-08-29T00:20:06.583+0000: 3022690.653: Total time for which application threads were stopped: 0.0317346 seconds, Stopping threads took: 0.0007106 seconds
-2018-08-29T00:20:10.079+0000: 3022694.148: Total time for which application threads were stopped: 0.0299036 seconds, Stopping threads took: 0.0006889 seconds
-2018-08-29T00:20:15.739+0000: 3022699.809: Total time for which application threads were stopped: 0.0078283 seconds, Stopping threads took: 0.0006012 seconds
-2018-08-29T00:20:15.770+0000: 3022699.839: Total time for which application threads were stopped: 0.0301285 seconds, Stopping threads took: 0.0003789 seconds
-2018-08-29T00:20:15.798+0000: 3022699.867: Total time for which application threads were stopped: 0.0279407 seconds, Stopping threads took: 0.0003627 seconds
-
-
-

This shows a lot of valuable information including how long the application -was paused (meaning zero user queries were being serviced during the e.g. 33ms -JVM pause) as well as how long it took to enter the safepoint. You can use this -raw data to e.g. get the longest pauses:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n  | tail | xargs -IX grep X gc.log.0.current | sort -k 1
-2018-08-28T17:13:40.520-0700: 1.193: Total time for which application threads were stopped: 0.0157914 seconds, Stopping threads took: 0.0000355 seconds
-2018-08-28T17:13:41.206-0700: 1.879: Total time for which application threads were stopped: 0.0249811 seconds, Stopping threads took: 0.0000318 seconds
-2018-08-28T17:13:41.638-0700: 2.311: Total time for which application threads were stopped: 0.0561130 seconds, Stopping threads took: 0.0000328 seconds
-2018-08-28T17:13:41.677-0700: 2.350: Total time for which application threads were stopped: 0.0362129 seconds, Stopping threads took: 0.0000597 seconds
-2018-08-28T17:13:41.781-0700: 2.454: Total time for which application threads were stopped: 0.0442846 seconds, Stopping threads took: 0.0000238 seconds
-2018-08-28T17:13:41.976-0700: 2.649: Total time for which application threads were stopped: 0.0377115 seconds, Stopping threads took: 0.0000250 seconds
-2018-08-28T17:13:42.172-0700: 2.845: Total time for which application threads were stopped: 0.0475415 seconds, Stopping threads took: 0.0001018 seconds
-2018-08-28T17:13:42.825-0700: 3.498: Total time for which application threads were stopped: 0.0379155 seconds, Stopping threads took: 0.0000571 seconds
-2018-08-28T17:13:43.574-0700: 4.247: Total time for which application threads were stopped: 0.0323812 seconds, Stopping threads took: 0.0000574 seconds
-2018-08-28T17:13:44.602-0700: 5.275: Total time for which application threads were stopped: 0.0238975 seconds, Stopping threads took: 0.0000788 seconds
-
-
-

In this case any client waiting on a query would have experienced a 56ms -latency at 17:13:41.

-

Note that GC pauses are not _only_ garbage collection, although -generally speaking high pauses with fast safepoints indicate a lack of JVM heap -or mistuned JVM GC algorithm. High pauses with slow safepoints typically -indicate that the JVM is having trouble entering a safepoint which usually -indicates slow disk drives (Cassandra makes heavy use of memory mapped reads -which the JVM doesn’t know could have disk latency, so the JVM safepoint logic -doesn’t handle a blocking memory mapped read particularly well).

-

Using these logs you can even get a pause distribution with something like -histogram.py:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | histogram.py
-# NumSamples = 410293; Min = 0.00; Max = 11.49
-# Mean = 0.035346; Variance = 0.002216; SD = 0.047078; Median 0.036498
-# each ∎ represents a count of 5470
-    0.0001 -     1.1496 [410255]: ∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
-    1.1496 -     2.2991 [    15]:
-    2.2991 -     3.4486 [     5]:
-    3.4486 -     4.5981 [     1]:
-    4.5981 -     5.7475 [     5]:
-    5.7475 -     6.8970 [     9]:
-    6.8970 -     8.0465 [     1]:
-    8.0465 -     9.1960 [     0]:
-    9.1960 -    10.3455 [     0]:
-   10.3455 -    11.4949 [     2]:
-
-
-

We can see in this case while we have very good average performance something -is causing multi second JVM pauses … In this case it was mostly safepoint -pauses caused by slow disks:

-
$ grep stopped gc.log.0.current | cut -f 11 -d ' ' | sort -n | tail | xargs -IX grep X  gc.log.0.current| sort -k 1
-2018-07-27T04:52:27.413+0000: 187831.482: Total time for which application threads were stopped: 6.5037022 seconds, Stopping threads took: 0.0005212 seconds
-2018-07-30T23:38:18.354+0000: 514582.423: Total time for which application threads were stopped: 6.3262938 seconds, Stopping threads took: 0.0004882 seconds
-2018-08-01T02:37:48.380+0000: 611752.450: Total time for which application threads were stopped: 10.3879659 seconds, Stopping threads took: 0.0004475 seconds
-2018-08-06T22:04:14.990+0000: 1113739.059: Total time for which application threads were stopped: 6.0917409 seconds, Stopping threads took: 0.0005553 seconds
-2018-08-14T00:04:06.091+0000: 1725730.160: Total time for which application threads were stopped: 6.0141054 seconds, Stopping threads took: 0.0004976 seconds
-2018-08-17T06:23:06.755+0000: 2007670.824: Total time for which application threads were stopped: 6.0133694 seconds, Stopping threads took: 0.0006011 seconds
-2018-08-23T06:35:46.068+0000: 2526830.137: Total time for which application threads were stopped: 6.4767751 seconds, Stopping threads took: 6.4426849 seconds
-2018-08-23T06:36:29.018+0000: 2526873.087: Total time for which application threads were stopped: 11.4949489 seconds, Stopping threads took: 11.4638297 seconds
-2018-08-23T06:37:12.671+0000: 2526916.741: Total time for which application threads were stopped: 6.3867003 seconds, Stopping threads took: 6.3507166 seconds
-2018-08-23T06:37:47.156+0000: 2526951.225: Total time for which application threads were stopped: 7.9528200 seconds, Stopping threads took: 7.9197756 seconds
-
-
-

Sometimes reading and understanding java GC logs is hard, but you can take the -raw GC files and visualize them using tools such as GCViewer which take the Cassandra GC log as -input and show you detailed visual information on your garbage collection -performance. This includes pause analysis as well as throughput information. -For a stable Cassandra JVM you probably want to aim for pauses less than -200ms and GC throughput greater than 99% (ymmv).

-

Java GC pauses are one of the leading causes of tail latency in Cassandra -(along with drive latency) so sometimes this information can be crucial -while debugging tail latency issues.

-
-
-
-

Getting More Information

-

If the default logging levels are insuficient, nodetool can set higher -or lower logging levels for various packages and classes using the -nodetool setlogginglevel command. Start by viewing the current levels:

-
$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-
-
-

Perhaps the Gossiper is acting up and we wish to enable it at TRACE -level for even more insight:

-
$ nodetool setlogginglevel org.apache.cassandra.gms.Gossiper TRACE
-
-$ nodetool getlogginglevels
-
-Logger Name                                        Log Level
-ROOT                                                    INFO
-org.apache.cassandra                                   DEBUG
-org.apache.cassandra.gms.Gossiper                      TRACE
-
-$ grep TRACE debug.log | tail -2
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:1234 - Updating
-heartbeat state version to 2344 from 2343 for 127.0.0.2:7000 ...
-TRACE [GossipStage:1] 2018-07-04 17:07:47,879 Gossiper.java:923 - local
-heartbeat version 2341 greater than 2340 for 127.0.0.1:7000
-
-
-

Note that any changes made this way are reverted on next Cassandra process -restart. To make the changes permanent add the appropriate rule to -logback.xml.

-
diff --git a/conf/logback.xml b/conf/logback.xml
-index b2c5b10..71b0a49 100644
---- a/conf/logback.xml
-+++ b/conf/logback.xml
-@@ -98,4 +98,5 @@ appender reference in the root level section below.
-   </root>
-
-   <logger name="org.apache.cassandra" level="DEBUG"/>
-+  <logger name="org.apache.cassandra.gms.Gossiper" level="TRACE"/>
- </configuration>
-
-
-
-

Full Query Logger

-

Cassandra 4.0 additionally ships with support for full query logging. This -is a highly performant binary logging tool which captures Cassandra queries -in real time, writes them (if possible) to a log file, and ensures the total -size of the capture does not exceed a particular limit. FQL is enabled with -nodetool and the logs are read with the provided bin/fqltool utility:

-
$ mkdir /var/tmp/fql_logs
-$ nodetool enablefullquerylog --path /var/tmp/fql_logs
-
-# ... do some querying
-
-$ bin/fqltool dump /var/tmp/fql_logs/20180705-00.cq4 | tail
-Query time: 1530750927224
-Query: SELECT * FROM system_virtual_schema.columns WHERE keyspace_name =
-'system_views' AND table_name = 'sstable_tasks';
-Values:
-
-Type: single
-Protocol version: 4
-Query time: 1530750934072
-Query: select * from keyspace1.standard1 ;
-Values:
-
-$ nodetool disablefullquerylog
-
-
-

Note that if you want more information than this tool provides, there are other -live capture options available such as packet capture.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/troubleshooting/use_nodetool.html b/src/doc/4.0-beta1/troubleshooting/use_nodetool.html deleted file mode 100644 index fa2dc03f5..000000000 --- a/src/doc/4.0-beta1/troubleshooting/use_nodetool.html +++ /dev/null @@ -1,321 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Use Nodetool" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Use Nodetool

-

Cassandra’s nodetool allows you to narrow problems from the cluster down -to a particular node and gives a lot of insight into the state of the Cassandra -process itself. There are dozens of useful commands (see nodetool help -for all the commands), but briefly some of the most useful for troubleshooting:

-
-

Cluster Status

-

You can use nodetool status to assess status of the cluster:

-
$ nodetool status <optional keyspace>
-
-Datacenter: dc1
-=======================
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-UN  127.0.1.1  4.69 GiB   1            100.0%            35ea8c9f-b7a2-40a7-b9c5-0ee8b91fdd0e  r1
-UN  127.0.1.2  4.71 GiB   1            100.0%            752e278f-b7c5-4f58-974b-9328455af73f  r2
-UN  127.0.1.3  4.69 GiB   1            100.0%            9dc1a293-2cc0-40fa-a6fd-9e6054da04a7  r3
-
-
-

In this case we can see that we have three nodes in one datacenter with about -4.6GB of data each and they are all “up”. The up/down status of a node is -independently determined by every node in the cluster, so you may have to run -nodetool status on multiple nodes in a cluster to see the full view.

-

You can use nodetool status plus a little grep to see which nodes are -down:

-
$ nodetool status | grep -v '^UN'
-Datacenter: dc1
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-Datacenter: dc2
-===============
-Status=Up/Down
-|/ State=Normal/Leaving/Joining/Moving
---  Address    Load       Tokens       Owns (effective)  Host ID                               Rack
-DN  127.0.0.5  105.73 KiB  1            33.3%             df303ac7-61de-46e9-ac79-6e630115fd75  r1
-
-
-

In this case there are two datacenters and there is one node down in datacenter -dc2 and rack r1. This may indicate an issue on 127.0.0.5 -warranting investigation.

-
-
-

Coordinator Query Latency

-

You can view latency distributions of coordinator read and write latency -to help narrow down latency issues using nodetool proxyhistograms:

-
$ nodetool proxyhistograms
-Percentile       Read Latency      Write Latency      Range Latency   CAS Read Latency  CAS Write Latency View Write Latency
-                     (micros)           (micros)           (micros)           (micros)           (micros)           (micros)
-50%                    454.83             219.34               0.00               0.00               0.00               0.00
-75%                    545.79             263.21               0.00               0.00               0.00               0.00
-95%                    654.95             315.85               0.00               0.00               0.00               0.00
-98%                    785.94             379.02               0.00               0.00               0.00               0.00
-99%                   3379.39            2346.80               0.00               0.00               0.00               0.00
-Min                     42.51             105.78               0.00               0.00               0.00               0.00
-Max                  25109.16           43388.63               0.00               0.00               0.00               0.00
-
-
-

Here you can see the full latency distribution of reads, writes, range requests -(e.g. select * from keyspace.table), CAS read (compare phase of CAS) and -CAS write (set phase of compare and set). These can be useful for narrowing -down high level latency problems, for example in this case if a client had a -20 millisecond timeout on their reads they might experience the occasional -timeout from this node but less than 1% (since the 99% read latency is 3.3 -milliseconds < 20 milliseconds).

-
-
-

Local Query Latency

-

If you know which table is having latency/error issues, you can use -nodetool tablehistograms to get a better idea of what is happening -locally on a node:

-
$ nodetool tablehistograms keyspace table
-Percentile  SSTables     Write Latency      Read Latency    Partition Size        Cell Count
-                              (micros)          (micros)           (bytes)
-50%             0.00             73.46            182.79             17084               103
-75%             1.00             88.15            315.85             17084               103
-95%             2.00            126.93            545.79             17084               103
-98%             2.00            152.32            654.95             17084               103
-99%             2.00            182.79            785.94             17084               103
-Min             0.00             42.51             24.60             14238                87
-Max             2.00          12108.97          17436.92             17084               103
-
-
-

This shows you percentile breakdowns particularly critical metrics.

-

The first column contains how many sstables were read per logical read. A very -high number here indicates that you may have chosen the wrong compaction -strategy, e.g. SizeTieredCompactionStrategy typically has many more reads -per read than LeveledCompactionStrategy does for update heavy workloads.

-

The second column shows you a latency breakdown of local write latency. In -this case we see that while the p50 is quite good at 73 microseconds, the -maximum latency is quite slow at 12 milliseconds. High write max latencies -often indicate a slow commitlog volume (slow to fsync) or large writes -that quickly saturate commitlog segments.

-

The third column shows you a latency breakdown of local read latency. We can -see that local Cassandra reads are (as expected) slower than local writes, and -the read speed correlates highly with the number of sstables read per read.

-

The fourth and fifth columns show distributions of partition size and column -count per partition. These are useful for determining if the table has on -average skinny or wide partitions and can help you isolate bad data patterns. -For example if you have a single cell that is 2 megabytes, that is probably -going to cause some heap pressure when it’s read.

-
-
-

Threadpool State

-

You can use nodetool tpstats to view the current outstanding requests on -a particular node. This is useful for trying to find out which resource -(read threads, write threads, compaction, request response threads) the -Cassandra process lacks. For example:

-
$ nodetool tpstats
-Pool Name                         Active   Pending      Completed   Blocked  All time blocked
-ReadStage                              2         0             12         0                 0
-MiscStage                              0         0              0         0                 0
-CompactionExecutor                     0         0           1940         0                 0
-MutationStage                          0         0              0         0                 0
-GossipStage                            0         0          10293         0                 0
-Repair-Task                            0         0              0         0                 0
-RequestResponseStage                   0         0             16         0                 0
-ReadRepairStage                        0         0              0         0                 0
-CounterMutationStage                   0         0              0         0                 0
-MemtablePostFlush                      0         0             83         0                 0
-ValidationExecutor                     0         0              0         0                 0
-MemtableFlushWriter                    0         0             30         0                 0
-ViewMutationStage                      0         0              0         0                 0
-CacheCleanupExecutor                   0         0              0         0                 0
-MemtableReclaimMemory                  0         0             30         0                 0
-PendingRangeCalculator                 0         0             11         0                 0
-SecondaryIndexManagement               0         0              0         0                 0
-HintsDispatcher                        0         0              0         0                 0
-Native-Transport-Requests              0         0            192         0                 0
-MigrationStage                         0         0             14         0                 0
-PerDiskMemtableFlushWriter_0           0         0             30         0                 0
-Sampler                                0         0              0         0                 0
-ViewBuildExecutor                      0         0              0         0                 0
-InternalResponseStage                  0         0              0         0                 0
-AntiEntropyStage                       0         0              0         0                 0
-
-Message type           Dropped                  Latency waiting in queue (micros)
-                                             50%               95%               99%               Max
-READ                         0               N/A               N/A               N/A               N/A
-RANGE_SLICE                  0              0.00              0.00              0.00              0.00
-_TRACE                       0               N/A               N/A               N/A               N/A
-HINT                         0               N/A               N/A               N/A               N/A
-MUTATION                     0               N/A               N/A               N/A               N/A
-COUNTER_MUTATION             0               N/A               N/A               N/A               N/A
-BATCH_STORE                  0               N/A               N/A               N/A               N/A
-BATCH_REMOVE                 0               N/A               N/A               N/A               N/A
-REQUEST_RESPONSE             0              0.00              0.00              0.00              0.00
-PAGED_RANGE                  0               N/A               N/A               N/A               N/A
-READ_REPAIR                  0               N/A               N/A               N/A               N/A
-
-
-

This command shows you all kinds of interesting statistics. The first section -shows a detailed breakdown of threadpools for each Cassandra stage, including -how many threads are current executing (Active) and how many are waiting to -run (Pending). Typically if you see pending executions in a particular -threadpool that indicates a problem localized to that type of operation. For -example if the RequestResponseState queue is backing up, that means -that the coordinators are waiting on a lot of downstream replica requests and -may indicate a lack of token awareness, or very high consistency levels being -used on read requests (for example reading at ALL ties up RF -RequestResponseState threads whereas LOCAL_ONE only uses a single -thread in the ReadStage threadpool). On the other hand if you see a lot of -pending compactions that may indicate that your compaction threads cannot keep -up with the volume of writes and you may need to tune either the compaction -strategy or the concurrent_compactors or compaction_throughput options.

-

The second section shows drops (errors) and latency distributions for all the -major request types. Drops are cumulative since process start, but if you -have any that indicate a serious problem as the default timeouts to qualify as -a drop are quite high (~5-10 seconds). Dropped messages often warrants further -investigation.

-
-
-

Compaction State

-

As Cassandra is a LSM datastore, Cassandra sometimes has to compact sstables -together, which can have adverse effects on performance. In particular, -compaction uses a reasonable quantity of CPU resources, invalidates large -quantities of the OS page cache, -and can put a lot of load on your disk drives. There are great -os tools to determine if this is the case, but often it’s a -good idea to check if compactions are even running using -nodetool compactionstats:

-
$ nodetool compactionstats
-pending tasks: 2
-- keyspace.table: 2
-
-id                                   compaction type keyspace table completed total    unit  progress
-2062b290-7f3a-11e8-9358-cd941b956e60 Compaction      keyspace table 21848273  97867583 bytes 22.32%
-Active compaction remaining time :   0h00m04s
-
-
-

In this case there is a single compaction running on the keyspace.table -table, has completed 21.8 megabytes of 97 and Cassandra estimates (based on -the configured compaction throughput) that this will take 4 seconds. You can -also pass -H to get the units in a human readable format.

-

Generally each running compaction can consume a single core, but the more -you do in parallel the faster data compacts. Compaction is crucial to ensuring -good read performance so having the right balance of concurrent compactions -such that compactions complete quickly but don’t take too many resources -away from query threads is very important for performance. If you notice -compaction unable to keep up, try tuning Cassandra’s concurrent_compactors -or compaction_throughput options.

-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/4.0-beta1/troubleshooting/use_tools.html b/src/doc/4.0-beta1/troubleshooting/use_tools.html deleted file mode 100644 index 28a294081..000000000 --- a/src/doc/4.0-beta1/troubleshooting/use_tools.html +++ /dev/null @@ -1,609 +0,0 @@ ---- -layout: docpage - -title: "Documentation" - -is_homepage: false -is_sphinx_doc: true - -doc-parent: "Troubleshooting" - -doc-title: "Diving Deep, Use External Tools" -doc-header-links: ' - - - - -' -doc-search-path: "../search.html" - -extra-footer: ' - -' - ---- -
-
- -
-
-
- -
-

Diving Deep, Use External Tools

-

Machine access allows operators to dive even deeper than logs and nodetool -allow. While every Cassandra operator may have their personal favorite -toolsets for troubleshooting issues, this page contains some of the most common -operator techniques and examples of those tools. Many of these commands work -only on Linux, but if you are deploying on a different operating system you may -have access to other substantially similar tools that assess similar OS level -metrics and processes.

-
-

JVM Tooling

-

The JVM ships with a number of useful tools. Some of them are useful for -debugging Cassandra issues, especially related to heap and execution stacks.

-

NOTE: There are two common gotchas with JVM tooling and Cassandra:

-
    -
  1. By default Cassandra ships with -XX:+PerfDisableSharedMem set to prevent -long pauses (see CASSANDRA-9242 and CASSANDRA-9483 for details). If -you want to use JVM tooling you can instead have /tmp mounted on an in -memory tmpfs which also effectively works around CASSANDRA-9242.
  2. -
  3. Make sure you run the tools as the same user as Cassandra is running as, -e.g. if the database is running as cassandra the tool also has to be -run as cassandra, e.g. via sudo -u cassandra <cmd>.
  4. -
-
-

Garbage Collection State (jstat)

-

If you suspect heap pressure you can use jstat to dive deep into the -garbage collection state of a Cassandra process. This command is always -safe to run and yields detailed heap information including eden heap usage (E), -old generation heap usage (O), count of eden collections (YGC), time spend in -eden collections (YGCT), old/mixed generation collections (FGC) and time spent -in old/mixed generation collections (FGCT):

-
jstat -gcutil <cassandra pid> 500ms
- S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT
- 0.00   0.00  81.53  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  82.36  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  83.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  84.19  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.03  31.16  93.07  88.20     12    0.151     3    0.257    0.408
- 0.00   0.00  85.94  31.16  93.07  88.20     12    0.151     3    0.257    0.408
-
-
-

In this case we see we have a relatively healthy heap profile, with 31.16% -old generation heap usage and 83% eden. If the old generation routinely is -above 75% then you probably need more heap (assuming CMS with a 75% occupancy -threshold). If you do have such persistently high old gen that often means you -either have under-provisioned the old generation heap, or that there is too -much live data on heap for Cassandra to collect (e.g. because of memtables). -Another thing to watch for is time between young garbage collections (YGC), -which indicate how frequently the eden heap is collected. Each young gc pause -is about 20-50ms, so if you have a lot of them your clients will notice in -their high percentile latencies.

-
-
-

Thread Information (jstack)

-

To get a point in time snapshot of exactly what Cassandra is doing, run -jstack against the Cassandra PID. Note that this does pause the JVM for -a very brief period (<20ms).:

-
$ jstack <cassandra pid> > threaddump
-
-# display the threaddump
-$ cat threaddump
-...
-
-# look at runnable threads
-$grep RUNNABLE threaddump -B 1
-"Attach Listener" #15 daemon prio=9 os_prio=0 tid=0x00007f829c001000 nid=0x3a74 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"DestroyJavaVM" #13 prio=5 os_prio=0 tid=0x00007f82e800e000 nid=0x2a19 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"JPS thread pool" #10 prio=5 os_prio=0 tid=0x00007f82e84d0800 nid=0x2a2c runnable [0x00007f82d0856000]
-   java.lang.Thread.State: RUNNABLE
---
-"Service Thread" #9 daemon prio=9 os_prio=0 tid=0x00007f82e80d7000 nid=0x2a2a runnable [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-"C1 CompilerThread3" #8 daemon prio=9 os_prio=0 tid=0x00007f82e80cc000 nid=0x2a29 waiting on condition [0x0000000000000000]
-   java.lang.Thread.State: RUNNABLE
---
-...
-
-# Note that the nid is the Linux thread id
-
-
-

Some of the most important information in the threaddumps are waiting/blocking -threads, including what locks or monitors the thread is blocking/waiting on.

-
-
-
-

Basic OS Tooling

-

A great place to start when debugging a Cassandra issue is understanding how -Cassandra is interacting with system resources. The following are all -resources that Cassandra makes heavy uses of:

-
    -
  • CPU cores. For executing concurrent user queries
  • -
  • CPU processing time. For query activity (data decompression, row merging, -etc…)
  • -
  • CPU processing time (low priority). For background tasks (compaction, -streaming, etc …)
  • -
  • RAM for Java Heap. Used to hold internal data-structures and by default the -Cassandra memtables. Heap space is a crucial component of write performance -as well as generally.
  • -
  • RAM for OS disk cache. Used to cache frequently accessed SSTable blocks. OS -disk cache is a crucial component of read performance.
  • -
  • Disks. Cassandra cares a lot about disk read latency, disk write throughput, -and of course disk space.
  • -
  • Network latency. Cassandra makes many internode requests, so network latency -between nodes can directly impact performance.
  • -
  • Network throughput. Cassandra (as other databases) frequently have the -so called “incast” problem where a small request (e.g. SELECT * from -foo.bar) returns a massively large result set (e.g. the entire dataset). -In such situations outgoing bandwidth is crucial.
  • -
-

Often troubleshooting Cassandra comes down to troubleshooting what resource -the machine or cluster is running out of. Then you create more of that resource -or change the query pattern to make less use of that resource.

-
-

High Level Resource Usage (top/htop)

-

Cassandra makes signifiant use of system resources, and often the very first -useful action is to run top or htop (website)to see the state of the machine.

-

Useful things to look at:

-
    -
  • System load levels. While these numbers can be confusing, generally speaking -if the load average is greater than the number of CPU cores, Cassandra -probably won’t have very good (sub 100 millisecond) latencies. See -Linux Load Averages -for more information.
  • -
  • CPU utilization. htop in particular can help break down CPU utilization -into user (low and normal priority), system (kernel), and io-wait -. Cassandra query threads execute as normal priority user threads, while -compaction threads execute as low priority user threads. High system -time could indicate problems like thread contention, and high io-wait -may indicate slow disk drives. This can help you understand what Cassandra -is spending processing resources doing.
  • -
  • Memory usage. Look for which programs have the most resident memory, it is -probably Cassandra. The number for Cassandra is likely inaccurately high due -to how Linux (as of 2018) accounts for memory mapped file memory.
  • -
-
-
-

IO Usage (iostat)

-

Use iostat to determine how data drives are faring, including latency -distributions, throughput, and utilization:

-
$ sudo iostat -xdm 2
-Linux 4.13.0-13-generic (hostname)     07/03/2018     _x86_64_    (8 CPU)
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.28    0.32    5.42     0.01     0.13    48.55     0.01    2.21    0.26    2.32   0.64   0.37
-sdb               0.00     0.00    0.00    0.00     0.00     0.00    79.34     0.00    0.20    0.20    0.00   0.16   0.00
-sdc               0.34     0.27    0.76    0.36     0.01     0.02    47.56     0.03   26.90    2.98   77.73   9.21   1.03
-
-Device:         rrqm/s   wrqm/s     r/s     w/s    rMB/s    wMB/s avgrq-sz avgqu-sz   await r_await w_await  svctm  %util
-sda               0.00     0.00    2.00   32.00     0.01     4.04   244.24     0.54   16.00    0.00   17.00   1.06   3.60
-sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00    0.00    0.00   0.00   0.00
-sdc               0.00    24.50    0.00  114.00     0.00    11.62   208.70     5.56   48.79    0.00   48.79   1.12  12.80
-
-
-

In this case we can see that /dev/sdc1 is a very slow drive, having an -await close to 50 milliseconds and an avgqu-sz close to 5 ios. The -drive is not particularly saturated (utilization is only 12.8%), but we should -still be concerned about how this would affect our p99 latency since 50ms is -quite long for typical Cassandra operations. That being said, in this case -most of the latency is present in writes (typically writes are more latent -than reads), which due to the LSM nature of Cassandra is often hidden from -the user.

-

Important metrics to assess using iostat:

-
    -
  • Reads and writes per second. These numbers will change with the workload, -but generally speaking the more reads Cassandra has to do from disk the -slower Cassandra read latencies are. Large numbers of reads per second -can be a dead giveaway that the cluster has insufficient memory for OS -page caching.
  • -
  • Write throughput. Cassandra’s LSM model defers user writes and batches them -together, which means that throughput to the underlying medium is the most -important write metric for Cassandra.
  • -
  • Read latency (r_await). When Cassandra missed the OS page cache and reads -from SSTables, the read latency directly determines how fast Cassandra can -respond with the data.
  • -
  • Write latency. Cassandra is less sensitive to write latency except when it -syncs the commit log. This typically enters into the very high percentiles of -write latency.
  • -
-

Note that to get detailed latency breakdowns you will need a more advanced -tool such as bcc-tools.

-
-
-

OS page Cache Usage

-

As Cassandra makes heavy use of memory mapped files, the health of the -operating system’s Page Cache is -crucial to performance. Start by finding how much available cache is in the -system:

-
$ free -g
-              total        used        free      shared  buff/cache   available
-Mem:             15           9           2           0           3           5
-Swap:             0           0           0
-
-
-

In this case 9GB of memory is used by user processes (Cassandra heap) and 8GB -is available for OS page cache. Of that, 3GB is actually used to cache files. -If most memory is used and unavailable to the page cache, Cassandra performance -can suffer significantly. This is why Cassandra starts with a reasonably small -amount of memory reserved for the heap.

-

If you suspect that you are missing the OS page cache frequently you can use -advanced tools like cachestat or -vmtouch to dive deeper.

-
-
-

Network Latency and Reliability

-

Whenever Cassandra does writes or reads that involve other replicas, -LOCAL_QUORUM reads for example, one of the dominant effects on latency is -network latency. When trying to debug issues with multi machine operations, -the network can be an important resource to investigate. You can determine -internode latency using tools like ping and traceroute or most -effectively mtr:

-
$ mtr -nr www.google.com
-Start: Sun Jul 22 13:10:28 2018
-HOST: hostname                     Loss%   Snt   Last   Avg  Best  Wrst StDev
-  1.|-- 192.168.1.1                0.0%    10    2.0   1.9   1.1   3.7   0.7
-  2.|-- 96.123.29.15               0.0%    10   11.4  11.0   9.0  16.4   1.9
-  3.|-- 68.86.249.21               0.0%    10   10.6  10.7   9.0  13.7   1.1
-  4.|-- 162.141.78.129             0.0%    10   11.5  10.6   9.6  12.4   0.7
-  5.|-- 162.151.78.253             0.0%    10   10.9  12.1  10.4  20.2   2.8
-  6.|-- 68.86.143.93               0.0%    10   12.4  12.6   9.9  23.1   3.8
-  7.|-- 96.112.146.18              0.0%    10   11.9  12.4  10.6  15.5   1.6
-  9.|-- 209.85.252.250             0.0%    10   13.7  13.2  12.5  13.9   0.0
- 10.|-- 108.170.242.238            0.0%    10   12.7  12.4  11.1  13.0   0.5
- 11.|-- 74.125.253.149             0.0%    10   13.4  13.7  11.8  19.2   2.1
- 12.|-- 216.239.62.40              0.0%    10   13.4  14.7  11.5  26.9   4.6
- 13.|-- 108.170.242.81             0.0%    10   14.4  13.2  10.9  16.0   1.7
- 14.|-- 72.14.239.43               0.0%    10   12.2  16.1  11.0  32.8   7.1
- 15.|-- 216.58.195.68              0.0%    10   25.1  15.3  11.1  25.1   4.8
-
-
-

In this example of mtr, we can rapidly assess the path that your packets -are taking, as well as what their typical loss and latency are. Packet loss -typically leads to between 200ms and 3s of additional latency, so that -can be a common cause of latency issues.

-
-
-

Network Throughput

-

As Cassandra is sensitive to outgoing bandwidth limitations, sometimes it is -useful to determine if network throughput is limited. One handy tool to do -this is iftop which -shows both bandwidth usage as well as connection information at a glance. An -example showing traffic during a stress run against a local ccm cluster:

-
$ # remove the -t for ncurses instead of pure text
-$ sudo iftop -nNtP -i lo
-interface: lo
-IP address is: 127.0.0.1
-MAC address is: 00:00:00:00:00:00
-Listening on lo
-   # Host name (port/service if enabled)            last 2s   last 10s   last 40s cumulative
---------------------------------------------------------------------------------------------
-   1 127.0.0.1:58946                          =>      869Kb      869Kb      869Kb      217KB
-     127.0.0.3:9042                           <=         0b         0b         0b         0B
-   2 127.0.0.1:54654                          =>      736Kb      736Kb      736Kb      184KB
-     127.0.0.1:9042                           <=         0b         0b         0b         0B
-   3 127.0.0.1:51186                          =>      669Kb      669Kb      669Kb      167KB
-     127.0.0.2:9042                           <=         0b         0b         0b         0B
-   4 127.0.0.3:9042                           =>     3.30Kb     3.30Kb     3.30Kb       845B
-     127.0.0.1:58946                          <=         0b         0b         0b         0B
-   5 127.0.0.1:9042                           =>     2.79Kb     2.79Kb     2.79Kb       715B
-     127.0.0.1:54654                          <=         0b         0b         0b         0B
-   6 127.0.0.2:9042                           =>     2.54Kb     2.54Kb     2.54Kb       650B
-     127.0.0.1:51186                          <=         0b         0b         0b         0B
-   7 127.0.0.1:36894                          =>     1.65Kb     1.65Kb     1.65Kb       423B
-     127.0.0.5:7000                           <=         0b         0b         0b         0B
-   8 127.0.0.1:38034                          =>     1.50Kb     1.50Kb     1.50Kb       385B
-     127.0.0.2:7000                           <=         0b         0b         0b         0B
-   9 127.0.0.1:56324                          =>     1.50Kb     1.50Kb     1.50Kb       383B
-     127.0.0.1:7000                           <=         0b         0b         0b         0B
-  10 127.0.0.1:53044                          =>     1.43Kb     1.43Kb     1.43Kb       366B
-     127.0.0.4:7000                           <=         0b         0b         0b         0B
---------------------------------------------------------------------------------------------
-Total send rate:                                     2.25Mb     2.25Mb     2.25Mb
-Total receive rate:                                      0b         0b         0b
-Total send and receive rate:                         2.25Mb     2.25Mb     2.25Mb
---------------------------------------------------------------------------------------------
-Peak rate (sent/received/total):                     2.25Mb         0b     2.25Mb
-Cumulative (sent/received/total):                     576KB         0B      576KB
-============================================================================================
-
-
-

In this case we can see that bandwidth is fairly shared between many peers, -but if the total was getting close to the rated capacity of the NIC or was focussed -on a single client, that may indicate a clue as to what issue is occurring.

-
-
-
-

Advanced tools

-

Sometimes as an operator you may need to really dive deep. This is where -advanced OS tooling can come in handy.

-
-

bcc-tools

-

Most modern Linux distributions (kernels newer than 4.1) support bcc-tools for diving deep into performance problems. -First install bcc-tools, e.g. via apt on Debian:

-
$ apt install bcc-tools
-
-
-

Then you can use all the tools that bcc-tools contains. One of the most -useful tools is cachestat -(cachestat examples) -which allows you to determine exactly how many OS page cache hits and misses -are happening:

-
$ sudo /usr/share/bcc/tools/cachestat -T 1
-TIME        TOTAL   MISSES     HITS  DIRTIES   BUFFERS_MB  CACHED_MB
-18:44:08       66       66        0       64           88       4427
-18:44:09       40       40        0       75           88       4427
-18:44:10     4353       45     4308      203           88       4427
-18:44:11       84       77        7       13           88       4428
-18:44:12     2511       14     2497       14           88       4428
-18:44:13      101       98        3       18           88       4428
-18:44:14    16741        0    16741       58           88       4428
-18:44:15     1935       36     1899       18           88       4428
-18:44:16       89       34       55       18           88       4428
-
-
-

In this case there are not too many page cache MISSES which indicates a -reasonably sized cache. These metrics are the most direct measurement of your -Cassandra node’s “hot” dataset. If you don’t have enough cache, MISSES will -be high and performance will be slow. If you have enough cache, MISSES will -be low and performance will be fast (as almost all reads are being served out -of memory).

-

You can also measure disk latency distributions using biolatency -(biolatency examples) -to get an idea of how slow Cassandra will be when reads miss the OS page Cache -and have to hit disks:

-
$ sudo /usr/share/bcc/tools/biolatency -D 10
-Tracing block device I/O... Hit Ctrl-C to end.
-
-
-disk = 'sda'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 12       |****************************************|
-        32 -> 63         : 9        |******************************          |
-        64 -> 127        : 1        |***                                     |
-       128 -> 255        : 3        |**********                              |
-       256 -> 511        : 7        |***********************                 |
-       512 -> 1023       : 2        |******                                  |
-
-disk = 'sdc'
-     usecs               : count     distribution
-         0 -> 1          : 0        |                                        |
-         2 -> 3          : 0        |                                        |
-         4 -> 7          : 0        |                                        |
-         8 -> 15         : 0        |                                        |
-        16 -> 31         : 0        |                                        |
-        32 -> 63         : 0        |                                        |
-        64 -> 127        : 41       |************                            |
-       128 -> 255        : 17       |*****                                   |
-       256 -> 511        : 13       |***                                     |
-       512 -> 1023       : 2        |                                        |
-      1024 -> 2047       : 0        |                                        |
-      2048 -> 4095       : 0        |                                        |
-      4096 -> 8191       : 56       |*****************                       |
-      8192 -> 16383      : 131      |****************************************|
-     16384 -> 32767      : 9        |**                                      |
-
-
-

In this case most ios on the data drive (sdc) are fast, but many take -between 8 and 16 milliseconds.

-

Finally biosnoop (examples) -can be used to dive even deeper and see per IO latencies:

-
$ sudo /usr/share/bcc/tools/biosnoop | grep java | head
-0.000000000    java           17427  sdc     R  3972458600 4096      13.58
-0.000818000    java           17427  sdc     R  3972459408 4096       0.35
-0.007098000    java           17416  sdc     R  3972401824 4096       5.81
-0.007896000    java           17416  sdc     R  3972489960 4096       0.34
-0.008920000    java           17416  sdc     R  3972489896 4096       0.34
-0.009487000    java           17427  sdc     R  3972401880 4096       0.32
-0.010238000    java           17416  sdc     R  3972488368 4096       0.37
-0.010596000    java           17427  sdc     R  3972488376 4096       0.34
-0.011236000    java           17410  sdc     R  3972488424 4096       0.32
-0.011825000    java           17427  sdc     R  3972488576 16384      0.65
-... time passes
-8.032687000    java           18279  sdc     R  10899712  122880     3.01
-8.033175000    java           18279  sdc     R  10899952  8192       0.46
-8.073295000    java           18279  sdc     R  23384320  122880     3.01
-8.073768000    java           18279  sdc     R  23384560  8192       0.46
-
-
-

With biosnoop you see every single IO and how long they take. This data -can be used to construct the latency distributions in biolatency but can -also be used to better understand how disk latency affects performance. For -example this particular drive takes ~3ms to service a memory mapped read due to -the large default value (128kb) of read_ahead_kb. To improve point read -performance you may may want to decrease read_ahead_kb on fast data volumes -such as SSDs while keeping the a higher value like 128kb value is probably -right for HDs. There are tradeoffs involved, see queue-sysfs docs for more -information, but regardless biosnoop is useful for understanding how -Cassandra uses drives.

-
-
-

vmtouch

-

Sometimes it’s useful to know how much of the Cassandra data files are being -cached by the OS. A great tool for answering this question is -vmtouch.

-

First install it:

-
$ git clone https://github.com/hoytech/vmtouch.git
-$ cd vmtouch
-$ make
-
-
-

Then run it on the Cassandra data directory:

-
$ ./vmtouch /var/lib/cassandra/data/
-           Files: 312
-     Directories: 92
-  Resident Pages: 62503/64308  244M/251M  97.2%
-         Elapsed: 0.005657 seconds
-
-
-

In this case almost the entire dataset is hot in OS page Cache. Generally -speaking the percentage doesn’t really matter unless reads are missing the -cache (per e.g. cachestat), in which case having -additional memory may help read performance.

-
-
-

CPU Flamegraphs

-

Cassandra often uses a lot of CPU, but telling what it is doing can prove -difficult. One of the best ways to analyze Cassandra on CPU time is to use -CPU Flamegraphs -which display in a useful way which areas of Cassandra code are using CPU. This -may help narrow down a compaction problem to a “compaction problem dropping -tombstones” or just generally help you narrow down what Cassandra is doing -while it is having an issue. To get CPU flamegraphs follow the instructions for -Java Flamegraphs.

-

Generally:

-
    -
  1. Enable the -XX:+PreserveFramePointer option in Cassandra’s -jvm.options configuation file. This has a negligible performance impact -but allows you actually see what Cassandra is doing.
  2. -
  3. Run perf to get some data.
  4. -
  5. Send that data through the relevant scripts in the FlameGraph toolset and -convert the data into a pretty flamegraph. View the resulting SVG image in -a browser or other image browser.
  6. -
-

For example just cloning straight off github we first install the -perf-map-agent to the location of our JVMs (assumed to be -/usr/lib/jvm):

-
$ sudo bash
-$ export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
-$ cd /usr/lib/jvm
-$ git clone --depth=1 https://github.com/jvm-profiling-tools/perf-map-agent
-$ cd perf-map-agent
-$ cmake .
-$ make
-
-
-

Now to get a flamegraph:

-
$ git clone --depth=1 https://github.com/brendangregg/FlameGraph
-$ sudo bash
-$ cd FlameGraph
-$ # Record traces of Cassandra and map symbols for all java processes
-$ perf record -F 49 -a -g -p <CASSANDRA PID> -- sleep 30; ./jmaps
-$ # Translate the data
-$ perf script > cassandra_stacks
-$ cat cassandra_stacks | ./stackcollapse-perf.pl | grep -v cpu_idle | \
-    ./flamegraph.pl --color=java --hash > cassandra_flames.svg
-
-
-

The resulting SVG is searchable, zoomable, and generally easy to introspect -using a browser.

-
-
-

Packet Capture

-

Sometimes you have to understand what queries a Cassandra node is performing -right now to troubleshoot an issue. For these times trusty packet capture -tools like tcpdump and Wireshark can be very helpful to dissect packet captures. -Wireshark even has native CQL support although it sometimes has -compatibility issues with newer Cassandra protocol releases.

-

To get a packet capture first capture some packets:

-
$ sudo tcpdump -U -s0 -i <INTERFACE> -w cassandra.pcap -n "tcp port 9042"
-
-
-

Now open it up with wireshark:

-
$ wireshark cassandra.pcap
-
-
-

If you don’t see CQL like statements try telling to decode as CQL by right -clicking on a packet going to 9042 -> Decode as -> select CQL from the -dropdown for port 9042.

-

If you don’t want to do this manually or use a GUI, you can also use something -like cqltrace to ease obtaining and -parsing CQL packet captures.

-
-
-
- - - - - - -
-
-
-
-
\ No newline at end of file diff --git a/src/doc/latest b/src/doc/latest deleted file mode 120000 index e44560ef0..000000000 --- a/src/doc/latest +++ /dev/null @@ -1 +0,0 @@ -4.0-alpha5 \ No newline at end of file diff --git a/src/doc/old/CQL-2.1.html b/src/doc/old/CQL-2.1.html deleted file mode 100644 index c5d73d364..000000000 --- a/src/doc/old/CQL-2.1.html +++ /dev/null @@ -1,456 +0,0 @@ -CQL

Cassandra Query Language (CQL) v3.2.1

  1. Cassandra Query Language (CQL) v3.2.1
    1. CQL Syntax
      1. Preamble
      2. Conventions
      3. Identifiers and keywords
      4. Constants
      5. Comments
      6. Statements
      7. Prepared Statement
    2. Data Definition
      1. CREATE KEYSPACE
      2. USE
      3. ALTER KEYSPACE
      4. DROP KEYSPACE
      5. CREATE TABLE
      6. ALTER TABLE
      7. DROP TABLE
      8. TRUNCATE
      9. CREATE INDEX
      10. DROP INDEX
      11. CREATE TYPE
      12. ALTER TYPE
      13. DROP TYPE
      14. CREATE TRIGGER
      15. DROP TRIGGER
    3. Data Manipulation
      1. INSERT
      2. UPDATE
      3. DELETE
      4. BATCH
    4. Queries
      1. SELECT
    5. Database Users
      1. CREATE USER
      2. ALTER USER
      3. DROP USER
      4. LIST USERS
    6. Data Control
      1. Permissions
      2. GRANT PERMISSION
      3. REVOKE PERMISSION
    7. Data Types
      1. Working with dates
      2. Counters
      3. Working with collections
    8. Functions
      1. Token
      2. Uuid
      3. Timeuuid functions
      4. Blob conversion functions
    9. Appendix A: CQL Keywords
    10. Appendix B: CQL Reserved Types
    11. Changes
      1. 3.2.1
      2. 3.2.0
      3. 3.1.7
      4. 3.1.6
      5. 3.1.5
      6. 3.1.4
      7. 3.1.3
      8. 3.1.2
      9. 3.1.1
      10. 3.1.0
      11. 3.0.5
      12. 3.0.4
      13. 3.0.3
      14. 3.0.2
      15. 3.0.1
    12. Versioning

CQL Syntax

Preamble

This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the changes section provides the diff between the different versions of CQL v3.

CQL v3 offers a model very close to SQL in the sense that data is put in tables containing rows of columns. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.

Conventions

To aid in specifying the CQL syntax, we will use the following conventions in this document:

  • Language rules will be given in a BNF -like notation:
<start> ::= TERMINAL <non-terminal1> <non-terminal1>
-
  • Nonterminal symbols will have <angle brackets>.
  • As additional shortcut notations to BNF, we’ll use traditional regular expression’s symbols (?, + and *) to signify that a given symbol is optional and/or can be repeated. We’ll also allow parentheses to group symbols and the [<characters>] notation to represent any one of <characters>.
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the last column definition in a CREATE TABLE statement is optional but supported if present even though the provided grammar in this document suggest it is not supported.
  • Sample code will be provided in a code block:
SELECT sample_usage FROM cql;
-
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.

Identifiers and keywords

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in Appendix A.

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and myId is the same than myid or MYID for instance. A convention often used (in particular by the samples of this documentation) is to use upper case for keywords and lower case for other identifiers.

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of characters in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a reserved keyword and can be used to refer to a column, while select would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches [a-zA-Z][a-zA-Z0-9_]* is equivalent to the unquoted identifier obtained by removing the double-quote (so "myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

Constants

CQL defines the following kind of constants: strings, integers, floats, booleans, uuids and blobs:

  • A string constant is an arbitrary sequence of characters characters enclosed by single-quote('). One can include a single-quote in a string by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted identifiers that use double-quotes.
  • An integer constant is defined by '-'?[0-9]+.
  • A float constant is defined by '-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?. On top of that, NaN and Infinity are also float constants.
  • A boolean constant is either true or false up to case-insensitivity (i.e. True is a valid boolean constant).
  • A UUID constant is defined by hex{8}-hex{4}-hex{4}-hex{4}-hex{12} where hex is an hexadecimal character, e.g. [0-9a-fA-F] and {4} is the number of such characters.
  • A blob constant is an hexadecimal number defined by 0[xX](hex)+ where hex is an hexadecimal character, e.g. [0-9a-fA-F].

For how these constants are typed, see the data types section.

Comments

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-

Statements

CQL consists of statements. As in SQL, these statements can be divided in 3 categories:

  • Data definition statements, that allow to set and change the way data is stored.
  • Data manipulation statements, that allow to change data
  • Queries, to look up data

All statements end with a semicolon (;) but that semicolon can be omitted when dealing with a single statement. The supported statements are described in the following sections. When describing the grammar of said statements, we will reuse the non-terminal symbols defined below:

<identifier> ::= any quoted or unquoted identifier, excluding reserved keywords
- <tablename> ::= (<identifier> '.')? <identifier>
-
-    <string> ::= a string constant
-   <integer> ::= an integer constant
-     <float> ::= a float constant
-    <number> ::= <integer> | <float>
-      <uuid> ::= a uuid constant
-   <boolean> ::= a boolean constant
-       <hex> ::= a blob constant
-
-  <constant> ::= <string>
-               | <number>
-               | <uuid>
-               | <boolean>
-               | <hex>
-  <variable> ::= '?'
-               | ':' <identifier>
-      <term> ::= <constant>
-               | <collection-literal>
-               | <variable>
-               | <function> '(' (<term> (',' <term>)*)? ')'
-
-  <collection-literal> ::= <map-literal>
-                         | <set-literal>
-                         | <list-literal>
-         <map-literal> ::= '{' ( <term> ':' <term> ( ',' <term> ':' <term> )* )? '}'
-         <set-literal> ::= '{' ( <term> ( ',' <term> )* )? '}'
-        <list-literal> ::= '[' ( <term> ( ',' <term> )* )? ']'
-
-    <function> ::= <ident>
-
-  <properties> ::= <property> (AND <property>)*
-    <property> ::= <identifier> '=' ( <identifier> | <constant> | <map-literal> )
-


Please note that not every possible productions of the grammar above will be valid in practice. Most notably, <variable> and nested <collection-literal> are currently not allowed inside <collection-literal>.

A <variable> can be either anonymous (a question mark (?)) or named (an identifier preceded by :). Both declare a bind variables for prepared statements. The only difference between an anymous and a named variable is that a named one will be easier to refer to (how exactly depends on the client driver used).

The <properties> production is use by statement that create and alter keyspaces and tables. Each <property> is either a simple one, in which case it just has a value, or a map one, in which case it’s value is a map grouping sub-options. The following will refer to one or the other as the kind (simple or map) of the property.

A <tablename> will be used to identify a table. This is an identifier representing the table name that can be preceded by a keyspace name. The keyspace name, if provided, allow to identify a table in another keyspace than the currently active one (the currently active keyspace is set through the USE statement).

For supported <function>, see the section on functions.

Prepared Statement

CQL supports prepared statements. Prepared statement is an optimization that allows to parse a query only once but execute it multiple times with different concrete values.

In a statement, each time a column value is expected (in the data manipulation and query statements), a <variable> (see above) can be used instead. A statement with bind variables must then be prepared. Once it has been prepared, it can executed by providing concrete values for the bind variables. The exact procedure to prepare a statement and execute a prepared statement depends on the CQL driver used and is beyond the scope of this document.

In addition to providing column values, bind markers may be used to provide values for LIMIT, TIMESTAMP, and TTL clauses. If anonymous bind markers are used, the names for the query parameters will be [limit], [timestamp], and [ttl], respectively.

Data Definition

CREATE KEYSPACE

Syntax:

<create-keyspace-stmt> ::= CREATE KEYSPACE (IF NOT EXISTS)? <identifier> WITH <properties>
-


Sample:

CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-


The CREATE KEYSPACE statement creates a new top-level keyspace. A keyspace is a namespace that defines a replication strategy and some options for a set of tables. Valid keyspaces names are identifiers composed exclusively of alphanumerical characters and whose length is lesser or equal to 32. Note that as identifiers, keyspace names are case insensitive: use a quoted identifier for case sensitive keyspace names.

The supported <properties> for CREATE KEYSPACE are:

name kind mandatory default description
replication map yes The replication strategy and options to use for the keyspace.
durable_writes simple no true Whether to use the commit log for updates on this keyspace (disable this option at your own risk!).

The replication <property> is mandatory. It must at least contains the 'class' sub-option which defines the replication strategy class to use. The rest of the sub-options depends on that replication strategy class. By default, Cassandra support the following 'class':

  • 'SimpleStrategy': A simple strategy that defines a simple replication factor for the whole cluster. The only sub-options supported is 'replication_factor' to define that replication factor and is mandatory.
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for each data-center. The rest of the sub-options are key-value pairs where each time the key is the name of a datacenter and the value the replication factor for that data-center.
  • 'OldNetworkTopologyStrategy': A legacy replication strategy. You should avoid this strategy for new keyspaces and prefer 'NetworkTopologyStrategy'.

Attempting to create an already existing keyspace will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the keyspace already exists.

USE

Syntax:

<use-stmt> ::= USE <identifier>
-

Sample:

USE myApp;
-

The USE statement takes an existing keyspace name as argument and set it as the per-connection current working keyspace. All subsequent keyspace-specific actions will be performed in the context of the selected keyspace, unless otherwise specified, until another USE statement is issued or the connection terminates.

ALTER KEYSPACE

Syntax:

<create-keyspace-stmt> ::= ALTER KEYSPACE <identifier> WITH <properties>
-


Sample:

ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-


The ALTER KEYSPACE statement alters the properties of an existing keyspace. The supported <properties> are the same as for the CREATE KEYSPACE statement.

DROP KEYSPACE

Syntax:

<drop-keyspace-stmt> ::= DROP KEYSPACE ( IF EXISTS )? <identifier>
-

Sample:

DROP KEYSPACE myApp;
-

A DROP KEYSPACE statement results in the immediate, irreversible removal of an existing keyspace, including all column families in it, and all data contained in those column families.

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TABLE

Syntax:

<create-table-stmt> ::= CREATE ( TABLE | COLUMNFAMILY ) ( IF NOT EXISTS )? <tablename>
-                          '(' <column-definition> ( ',' <column-definition> )* ')'
-                          ( WITH <option> ( AND <option>)* )?
-
-<column-definition> ::= <identifier> <type> ( STATIC )? ( PRIMARY KEY )?
-                      | PRIMARY KEY '(' <partition-key> ( ',' <identifier> )* ')'
-
-<partition-key> ::= <identifier>
-                  | '(' <identifier> (',' <identifier> )* ')'
-
-<option> ::= <property>
-           | COMPACT STORAGE
-           | CLUSTERING ORDER
-


Sample:

CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-


The CREATE TABLE statement creates a new table. Each such table is a set of rows (usually representing related entities) for which it defines a number of properties. A table is defined by a name, it defines the columns composing rows of the table and have a number of options. Note that the CREATE COLUMNFAMILY syntax is supported as an alias for CREATE TABLE (for historical reasons).

Attempting to create an already existing table will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the table already exists.

<tablename>

Valid table names are the same as valid keyspace names (up to 32 characters long alphanumerical identifiers). If the table name is provided alone, the table is created within the current keyspace (see USE), but if it is prefixed by an existing keyspace name (see <tablename> grammar), it is created in the specified keyspace (but does not change the current keyspace).

<column-definition>

A CREATE TABLE statement defines the columns that rows of the table can have. A column is defined by its name (an identifier) and its type (see the data types section for more details on allowed types and their properties).

Within a table, a row is uniquely identified by its PRIMARY KEY (or more simply the key), and hence all table definitions must define a PRIMARY KEY (and only one). A PRIMARY KEY is composed of one or more of the columns defined in the table. If the PRIMARY KEY is only one column, this can be specified directly after the column definition. Otherwise, it must be specified by following PRIMARY KEY by the comma-separated list of column names composing the key within parenthesis. Note that:

CREATE TABLE t (
-    k int PRIMARY KEY,
-    other text
-)
-

is equivalent to

CREATE TABLE t (
-    k int,
-    other text,
-    PRIMARY KEY (k)
-)
-

Partition key and clustering columns

In CQL, the order in which columns are defined for the PRIMARY KEY matters. The first column of the key is called the partition key. It has the property that all the rows sharing the same partition key (even across table in fact) are stored on the same physical node. Also, insertion/update/deletion on rows sharing the same partition key for a given table are performed atomically and in isolation. Note that it is possible to have a composite partition key, i.e. a partition key formed of multiple columns, using an extra set of parentheses to define which columns forms the partition key.

The remaining columns of the PRIMARY KEY definition, if any, are called __clustering columns. On a given physical node, rows for a given partition key are stored in the order induced by the clustering columns, making the retrieval of rows in that clustering order particularly efficient (see SELECT).

STATIC columns

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the rows belonging to the same partition (having the same partition key). For instance, in:

CREATE TABLE test (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-INSERT INTO test(pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO test(pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-SELECT * FROM test WHERE pk=0 AND t=0;
-

the last query will return 'static1' as value for s, since s is static and thus the 2nd insertion modified this “shared” value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for pk), then the 2nd insertion would not have modified the value of s for the first row.

A few restrictions applies to when static columns are allowed:

  • tables with the COMPACT STORAGE option (see below) cannot have them
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).
  • only non PRIMARY KEY columns can be static

<option>

The CREATE TABLE statement supports a number of options that controls the configuration of a new table. These options can be specified after the WITH keyword.

The first of these option is COMPACT STORAGE. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details). The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table. Most notably, COMPACT STORAGE tables cannot have collections nor static columns and a COMPACT STORAGE table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the PRIMARY KEY definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, COMPACT STORAGE is not recommended outside of the backward compatibility reason evoked above.

Another option is CLUSTERING ORDER. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects what ORDER BY are allowed during SELECT.

Table creation supports the following other <property>:

option kind default description
comment simple none A free-form, human-readable comment.
read_repair_chance simple 0.1 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpose of read repairs.
dclocal_read_repair_chance simple 0 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.
gc_grace_seconds simple 864000 Time to wait before garbage collecting tombstones (deletion markers).
bloom_filter_fp_chance simple 0.00075 The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)
default_time_to_live simple 0 The default expiration time (“TTL”) in seconds for a table.
compaction map see below Compaction options, see below.
compression map see below Compression options, see below.
caching map see below Caching options, see below.

Compaction options

The compaction property must at least define the 'class' sub-option, that defines the compaction strategy class to use. The default supported class are 'SizeTieredCompactionStrategy', 'LeveledCompactionStrategy' and 'DateTieredCompactionStrategy'. Custom strategy can be provided by specifying the full class name as a string constant. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:

option supported compaction strategy default description
enabled all true A boolean denoting whether compaction should be enabled or not.
tombstone_threshold all 0.2 A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones.
tombstone_compaction_interval all 1 day The minimum time to wait after an sstable creation time before considering it for “tombstone compaction”, where “tombstone compaction” is the compaction triggered if the sstable has more gcable tombstones than tombstone_threshold.
unchecked_tombstone_compaction all false Setting this to true enables more aggressive tombstone compactions – single sstable tombstone compactions will run without checking how likely it is that they will be successful.
min_sstable_size SizeTieredCompactionStrategy 50MB The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size. However, for small sizes, this would result in a bucketing that is too fine grained. min_sstable_size defines a size threshold (in bytes) below which all SSTables belong to one unique bucket
min_threshold SizeTieredCompactionStrategy 4 Minimum number of SSTables needed to start a minor compaction.
max_threshold SizeTieredCompactionStrategy 32 Maximum number of SSTables processed by one minor compaction.
bucket_low SizeTieredCompactionStrategy 0.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%)
bucket_high SizeTieredCompactionStrategy 1.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%).
sstable_size_in_mb LeveledCompactionStrategy 5MB The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to sstable_size_in_mb, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables
timestamp_resolution DateTieredCompactionStrategy MICROSECONDS The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit) - don’t change this unless you do mutations with USING TIMESTAMP (or equivalent directly in the client)
base_time_seconds DateTieredCompactionStrategy 60 The base size of the time windows.
max_sstable_age_days DateTieredCompactionStrategy 365 SSTables only containing data that is older than this will never be compacted.

Compression options

For the compression property, the following sub-options are available:

option default description
sstable_compression LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string ('') to disable compression. Custom compressor can be provided by specifying the full class name as a string constant.
chunk_length_kb 64KB On disk SSTables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read
crc_check_chance 1.0 When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read

Caching options

For the caching property, the following sub-options are available:

option default description
keys ALL Whether to cache keys (“key cache”) for this table. Valid values are: ALL and NONE.
rows_per_partition NONE The amount of rows to cache per partition (“row cache”). If an integer n is specified, the first n queried rows of a partition will be cached. Other possible options are ALL, to cache all rows of a queried partition, or NONE to disable row caching.

Other considerations:

  • When inserting a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see <a href=#alterStmt>ALTER TABLE) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry when you haven’t) when creating a table.

ALTER TABLE

Syntax:

<alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction>
-
-<instruction> ::= ALTER <identifier> TYPE <type>
-                | ADD   <identifier> <type>
-                | DROP  <identifier>
-                | WITH  <option> ( AND <option> )*
-


Sample:

ALTER TABLE addamsFamily
-ALTER lastKnownLocation TYPE uuid;
-
-ALTER TABLE addamsFamily
-ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-WITH comment = 'A most excellent and useful column family'
- AND read_repair_chance = 0.2;
-


The ALTER statement is used to manipulate table definitions. It allows for adding new columns, dropping existing ones, changing the type of existing columns, or updating the table options. As with table creation, ALTER COLUMNFAMILY is allowed as an alias for ALTER TABLE.

The <tablename> is the table name optionally preceded by the keyspace name. The <instruction> defines the alteration to perform:

  • ALTER: Update the type of a given defined column. Note that the type of the clustering columns cannot be modified as it induces the on-disk ordering of rows. Columns on which a secondary index is defined have the same restriction. Other columns are free from those restrictions (no validation of existing data is performed), but it is usually a bad idea to change the type to a non-compatible one, unless no data have been inserted for that column yet, as this could confuse CQL drivers/tools.
  • ADD: Adds a new column to the table. The <identifier> for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the COMPACT STORAGE option.
  • DROP: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won’t return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can’t be dropped from tables defined with the COMPACT STORAGE option.
  • WITH: Allows to update the options of the table. The supported <option> (and syntax) are the same as for the CREATE TABLE statement except that COMPACT STORAGE is not supported. Note that setting any compaction sub-options has the effect of erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. The same note applies to the set of compression sub-options.

DROP TABLE

Syntax:

<drop-table-stmt> ::= DROP TABLE ( IF EXISTS )? <tablename>
-

Sample:

DROP TABLE worldSeriesAttendees;
-

The DROP TABLE statement results in the immediate, irreversible removal of a table, including all data contained in it. As for table creation, DROP COLUMNFAMILY is allowed as an alias for DROP TABLE.

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

TRUNCATE

Syntax:

<truncate-stmt> ::= TRUNCATE ( TABLE | COLUMNFAMILY )? <tablename>
-

Sample:

TRUNCATE superImportantData;
-

The TRUNCATE statement permanently removes all data from a table.

CREATE INDEX

Syntax:

<create-index-stmt> ::= CREATE ( CUSTOM )? INDEX ( IF NOT EXISTS )? ( <indexname> )?
-                            ON <tablename> '(' <index-identifier> ')'
-                            ( USING <string> ( WITH OPTIONS = <map-literal> )? )?
-
-<index-identifier> ::= <identifier>
-                     | keys( <identifier> )
-


Sample:

CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed automatically at insertion time.

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the index already exists.

Indexes on Map Keys

When creating an index on a map column, you may index either the keys or the values. If the column identifier is placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in WHERE clauses. Otherwise, the index will be on the map values.

DROP INDEX

Syntax:

<drop-index-stmt> ::= DROP INDEX ( IF EXISTS )? ( <keyspace> '.' )? <identifier>
-

Sample:

DROP INDEX userIndex;
-
-DROP INDEX userkeyspace.address_index;
-


The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index name, which may optionally specify the keyspace of the index.

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TYPE

Syntax:

<create-type-stmt> ::= CREATE TYPE ( IF NOT EXISTS )? <typename>
-                         '(' <field-definition> ( ',' <field-definition> )* ')'
-
-<typename> ::= ( <keyspace-name> '.' )? <identifier>
-
-<field-definition> ::= <identifier> <type>
-
-


Sample:

CREATE TYPE address (
-    street_name text,
-    street_number int,
-    city text,
-    state text,
-    zip int
-)
-
-CREATE TYPE work_and_home_addresses (
-    home_address address,
-    work_address address
-)
-


The CREATE TYPE statement creates a new user-defined type. Each type is a set of named, typed fields. Field types may be any valid type, including collections and other existing user-defined types.

Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the type already exists.

<typename>

Valid type names are identifiers. The names of existing CQL types and reserved type names may not be used.

If the type name is provided alone, the type is created with the current keyspace (see USE). If it is prefixed by an existing keyspace name, the type is created within the specified keyspace instead of the current keyspace.

ALTER TYPE

Syntax:

<alter-type-stmt> ::= ALTER TYPE <typename> <instruction>
-
-<instruction> ::= ALTER <field-name> TYPE <type>
-                | ADD <field-name> <type>
-                | RENAME <field-name> TO <field-name> ( AND <field-name> TO <field-name> )*
-


Sample:

ALTER TYPE address ALTER zip TYPE varint
-
-ALTER TYPE address ADD country text
-
-ALTER TYPE address RENAME zip TO zipcode AND street_name TO street
-


The ALTER TYPE statement is used to manipulate type definitions. It allows for adding new fields, renaming existing fields, or changing the type of existing fields.

When altering the type of a column, the new type must be compatible with the previous type.

DROP TYPE

Syntax:

<drop-type-stmt> ::= DROP TYPE ( IF EXISTS )? <typename>
-


The DROP TYPE statement results in the immediate, irreversible removal of a type. Attempting to drop a type that is still in use by another type or a table will result in an error.

If the type does not exist, an error will be returned unless IF EXISTS is used, in which case the operation is a no-op.

CREATE TRIGGER

Syntax:

<create-trigger-stmt> ::= CREATE TRIGGER ( IF NOT EXISTS )? ( <triggername> )?
-                            ON <tablename> 
-                            USING <string>
-
-


Sample:

CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a requested DML statement occurs, which ensures the atomicity of the transaction.

DROP TRIGGER

Syntax:

<drop-trigger-stmt> ::= DROP TRIGGER ( IF EXISTS )? ( <triggername> )?
-                            ON <tablename>
-
-


Sample:

DROP TRIGGER myTrigger ON myTable;
-

DROP TRIGGER statement removes the registration of a trigger created using CREATE TRIGGER.

Data Manipulation

INSERT

Syntax:

<insertStatement> ::= INSERT INTO <tablename>
-                             '(' <identifier> ( ',' <identifier> )* ')'
-                      VALUES '(' <term-or-literal> ( ',' <term-or-literal> )* ')'
-                      ( IF NOT EXISTS )?
-                      ( USING <option> ( AND <option> )* )?
-
-<term-or-literal> ::= <term>
-                    | <collection-literal>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-USING TTL 86400;
-

The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by its PRIMARY KEY, at least the columns composing it must be specified.

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

All updates for an INSERT are applied atomically and in isolation.

Please refer to the UPDATE section for information on the <option> available and to the collections section for use of <collection-literal>. Also note that INSERT does not support counters, while UPDATE does.

UPDATE

Syntax:

<update-stmt> ::= UPDATE <tablename>
-                  ( USING <option> ( AND <option> )* )?
-                  SET <assignment> ( ',' <assignment> )*
-                  WHERE <where-clause>
-                  ( IF <condition> ( AND condition )* )?
-
-<assignment> ::= <identifier> '=' <term>
-               | <identifier> '=' <identifier> ('+' | '-') (<int-term> | <set-literal> | <list-literal>)
-               | <identifier> '=' <identifier> '+' <map-literal>
-               | <identifier> '[' <term> ']' '=' <term>
-
-<condition> ::= <identifier> <op> <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' <op> <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
-             | <identifier> IN <variable>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

UPDATE NerdMovies USING TTL 400
-SET director = 'Joss Whedon',
-    main_actor = 'Nathan Fillion',
-    year = 2005
-WHERE movie = 'Serenity';
-
-UPDATE UserActions SET total = total + 2 WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 AND action = 'click';
-


The UPDATE statement writes one or more columns for a given row in a table. The <where-clause> is used to select the row to update and must include all columns composing the PRIMARY KEY (the IN relation is only supported for the last column of the partition key). Other columns values are specified through <assignment> after the SET keyword.

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through the use of <condition>, see below): the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated unless such condition are met. But please note that using IF conditions will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

The c = c + 3 form of <assignment> is used to increment/decrement counters. The identifier after the ‘=’ sign must be the same than the one before the ‘=’ sign (Only increment/decrement is supported on counters, not the assignment of a specific value).

The id = id + <collection-literal> and id[value1] = value2 forms of <assignment> are for collections. Please refer to the relevant section for more details.

<options>

The UPDATE and INSERT statements allows to specify the following options for the insertion:

  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • TTL: allows to specify an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not the column themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL is specified in that update). By default, values never expire. A TTL of 0 or a negative one is equivalent to no TTL.

DELETE

Syntax:

<delete-stmt> ::= DELETE ( <selection> ( ',' <selection> )* )?
-                  FROM <tablename>
-                  ( USING TIMESTAMP <integer>)?
-                  WHERE <where-clause>
-                  ( IF ( EXISTS | ( <condition> ( AND <condition> )*) ) )?
-
-<selection> ::= <identifier> ( '[' <term> ']' )?
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
-             | <identifier> IN <variable>
-
-<condition> ::= <identifier> <op> <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' <op> <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
-


Sample:

DELETE FROM NerdMovies USING TIMESTAMP 1240003134 WHERE movie = 'Serenity';
-
-DELETE phone FROM Users WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-


The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, only those columns are deleted from the row indicated by the <where-clause> (the id[value] syntax in <selection> is for collection, please refer to the collection section for more details). Otherwise whole rows are removed. The <where-clause> allows to specify the key for the row(s) to delete (the IN relation is only supported for the last column of the partition key).

DELETE supports the TIMESTAMP options with the same semantic that in the UPDATE statement.

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

A DELETE operation application can be conditioned using IF like for UPDATE and INSERT. But please not that as for the later, this will incur a non negligible performance cost (internally, Paxos will be used) and so should be used sparingly.

BATCH

Syntax:

<batch-stmt> ::= BEGIN ( UNLOGGED | COUNTER ) BATCH
-                 ( USING <option> ( AND <option> )* )?
-                    <modification-stmt> ( ';' <modification-stmt> )*
-                 APPLY BATCH
-
-<modification-stmt> ::= <insert-stmt>
-                      | <update-stmt>
-                      | <delete-stmt>
-
-<option> ::= TIMESTAMP <integer>
-


Sample:

BEGIN BATCH
-  INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-  UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-  INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-  DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single statement. It serves several purposes:

  1. It saves network round-trips between the client and the server (and sometimes between the server coordinator and the replicas) when batching multiple updates.
  2. All updates in a BATCH belonging to a given partition key are performed in isolation.
  3. By default, all operations in the batch are performed as LOGGED, to ensure all mutations eventually complete (or none will). See the notes on UNLOGGED for more details.

Note that:

  • BATCH statements may only contain UPDATE, INSERT and DELETE statements.
  • Batches are not a full analogue for SQL transactions.
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp. Due to Cassandra’s conflict resolution procedure in the case of timestamp ties, operations may be applied in an order that is different from the order they are listed in the BATCH statement. To force a particular operation ordering, you must specify per-operation timestamps.

UNLOGGED

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note however that operations are only isolated within a single partition).

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is used, a failed batch might leave the patch only partly applied.

COUNTER

Use the COUNTER option for batched counter updates. Unlike other updates in Cassandra, counter updates are not idempotent.

<option>

BATCH supports both the TIMESTAMP option, with similar semantic to the one described in the UPDATE statement (the timestamp applies to all the statement inside the batch). However, if used, TIMESTAMP must not be used in the statements within the batch.

Queries

SELECT

Syntax:

<select-stmt> ::= SELECT <select-clause>
-                  FROM <tablename>
-                  ( WHERE <where-clause> )?
-                  ( ORDER BY <order-by> )?
-                  ( LIMIT <integer> )?
-                  ( ALLOW FILTERING )?
-
-<select-clause> ::= DISTINCT? <selection-list>
-                  | COUNT '(' ( '*' | '1' ) ')' (AS <identifier>)?
-
-<selection-list> ::= <selector> (AS <identifier>)? ( ',' <selector> (AS <identifier>)? )*
-                   | '*'
-
-<selector> ::= <identifier>
-             | WRITETIME '(' <identifier> ')'
-             | TTL '(' <identifier> ')'
-             | <function> '(' (<selector> (',' <selector>)*)? ')'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> <op> <term>
-             | '(' <identifier> (',' <identifier>)* ')' <op> <term-tuple>
-             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
-             | TOKEN '(' <identifier> ( ',' <identifer>)* ')' <op> <term>
-
-<op> ::= '=' | '<' | '>' | '<=' | '>=' | CONTAINS | CONTAINS KEY
-<order-by> ::= <ordering> ( ',' <odering> )*
-<ordering> ::= <identifer> ( ASC | DESC )?
-<term-tuple> ::= '(' <term> (',' <term>)* ')'
-


Sample:

SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT(*) FROM users;
-
-SELECT COUNT(*) AS user_count FROM users;
-
-


The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of rows, where each row contains the collection of columns corresponding to the query.

<select-clause>

The <select-clause> determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of or the wildcard character (*) to select all the columns defined for the table.

A <selector> is either a column name to retrieve, or a <function> of one or multiple column names. The functions allows are the same that for <term> and are describe in the function section. In addition to these generic functions, the WRITETIME (resp. TTL) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)).

Any <selector> can be aliased using AS keyword (see examples). Please note that <where-clause> and <order-by> clause should refer to the columns by their original names and not by their aliases.

The COUNT keyword can be used with parenthesis enclosing *. If so, the query will return a single result: the number of rows matching the query. Note that COUNT(1) is supported as an alias.

<where-clause>

The <where-clause> specifies which rows must be queried. It is composed of relations on the columns that are part of the PRIMARY KEY and/or have a secondary index defined on them.

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For instance, given

CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-

The following query is allowed:

SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John''s Blog' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are set):

// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts WHERE userid='john doe' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, token(-1) > token(0) in particular). Example:

SELECT * FROM posts WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full primary key.

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-

will request all rows that sorts after the one having “John's Blog” as blog_tile and ‘2012-01-01’ for posted_at in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their blog_title > 'John''s Blog', which wouldn’t be the case for:

SELECT * FROM posts WHERE userid='john doe' AND blog_title > 'John''s Blog' AND posted_at > '2012-01-01'
-

The tuple notation may also be used for IN clauses on CLUSTERING COLUMNS:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01), ('Extreme Chess', '2014-06-01'))
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the map keys.

<order-by>

The ORDER BY option allows to select the order of the returned results. It takes as argument a list of column names along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being equivalent to ASC). Currently the possible orderings are limited (which depends on the table CLUSTERING ORDER):

  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order induced by the clustering columns and the reverse of that one.
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.

LIMIT

The LIMIT option to a SELECT statement limits the number of rows returned by a query.

ALLOW FILTERING

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of data returned by the query (which can be controlled through LIMIT).

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query that selects a handful of records may exhibit performance that depends on the total amount of data stored in the cluster.

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on it) and country of residence:

CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-

Then the following queries are valid:

SELECT * FROM users;
-SELECT firstname, lastname FROM users WHERE birth_year = 1981;
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile stored). Of course, both query may return very large result set in practice, but the amount of data returned can always be controlled by adding a LIMIT.

However, the following query will be rejected:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR';
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW FILTERING and so the following query is valid:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-

Database Users

CREATE USER

Syntax:

<create-user-statement> ::= CREATE USER ( IF NOT EXISTS )? <identifier> ( WITH PASSWORD <string> )? (<option>)?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

Sample:

CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-

By default users do not possess SUPERUSER status.

Permissions on database resources (keyspaces and tables) are granted to users.
USer names should be quoted if they contain non-alphanumeric characters.

Setting credentials for internal authentication

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single quotation marks.
If internal authentication has not been set up the WITH PASSWORD clause is not necessary.

Creating a user conditionally

Attempting to create an existing user results in an invalid query condition unless the IF NOT EXISTS option is used. If the option is used and the user exists, the statement is a no-op.

CREATE USER carlos;
-CREATE USER IF NOT EXISTS carlos;
-

ALTER USER

Syntax:

<alter-user-statement> ::= ALTER USER <identifier> ( WITH PASSWORD <string> )? ( <option> )?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-

ALTER USER requires SUPERUSER status, with two caveats:

  • A user cannot alter its own SUPERUSER status
  • A user without SUPERUSER status is permitted to modify a subset of it’s own properties (e.g. its PASSWORD)

DROP USER

Syntax:

<drop-user-stmt> ::= DROP USER ( IF EXISTS )? <identifier>
-

Sample:

DROP USER alice;
-DROP USER IF EXISTS bob;
-

DROP USER requires SUPERUSER status, and users are not permitted to DROP themselves.
Attempting to drop a user which does not exist results in an invalid query condition unless the IF EXISTS option is used. If the option is used and the user does not exist the statement is a no-op.

LIST USERS

Syntax:

<list-users-stmt> ::= LIST USERS;
-

Sample:

LIST USERS;
-

Return all known users in the system.

Data Control

Permissions

Permissions on resources are granted to users and data resources in Cassandra are organized hierarchically, like so: ALL KEYSPACES -> KEYSPACE -> TABLE

Permissions can be granted at any level of the hierarchy and they flow downwards. So granting a permission on a resource higher up the chain automatically grants that same permission on all resources lower down. For example, granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE.

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established following permissions changes.

The full set of available permissions is:

  • CREATE
  • ALTER
  • DROP
  • SELECT
  • MODIFY
  • AUTHORIZE
permission resource operations
CREATE ALL KEYSPACES CREATE KEYSPACE
CREATE TABLE in any keyspace
CREATE KEYSPACE CREATE TABLE in specified keyspace
ALTER ALL KEYSPACES ALTER KEYSPACE
ALTER TABLE in any keyspace
ALTER KEYSPACE ALTER KEYSPACE
ALTER TABLE in keyspace
ALTER TABLE ALTER TABLE
DROP ALL KEYSPACES DROP KEYSPACE
DROP TABLE in any keyspace
DROP KEYSPACE DROP TABLE in specified keyspace
DROP TABLE DROP TABLE
SELECT ALL KEYSPACES SELECT on any table
SELECT KEYSPACE SELECT on any table in keyspace
SELECT TABLE SELECT on specified table
MODIFY ALL KEYSPACES INSERT on any table
UPDATE on any table
DELETE on any table
TRUNCATE on any table
MODIFY KEYSPACE INSERT on any table in keyspace
UPDATE on any table in keyspace
DELETE on any table in keyspace
TRUNCATE on any table in keyspace
MODIFY TABLE INSERT
UPDATE
DELETE
TRUNCATE
AUTHORIZE ALL KEYSPACES GRANT PERMISSION on any table
REVOKE PERMISSION on any table
AUTHORIZE KEYSPACE GRANT PERMISSION on table in keyspace
REVOKE PERMISSION on table in keyspace
AUTHORIZE TABLE GRANT PERMISSION
REVOKE PERMISSION

GRANT PERMISSION

Syntax:

<grant-permission-stmt> ::= GRANT ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> TO <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE 
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-

Sample:

GRANT SELECT ON ALL KEYSPACES TO alice;
-

This gives alice permissions to execute SELECT statements on any table across all keyspaces

GRANT MODIFY ON KEYSPACE keyspace1 TO bob;
-

This gives bob permissions to perform UPDATE, INSERT, UPDATE, DELETE and TRUNCATE queries on all tables in the keyspace1 keyspace

GRANT DROP ON keyspace1.table1 TO carlos;
-

This gives carlos permissions to DROP keyspace1.table1.

REVOKE PERMISSION

Syntax:

<revoke-permission-stmt> ::= REVOKE ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> FROM <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE 
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-

Sample:

REVOKE SELECT ON ALL KEYSPACES FROM alice;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM bob;
-REVOKE DROP ON keyspace1.table1 FROM carlos;
-

LIST PERMISSIONS

Syntax:

<list-permissions-stmt> ::= LIST ( ALL ( PERMISSIONS )? | <permission> ) 
-                                 ( ON <resource> )? 
-                                 ( OF <identifier> ( NORECURSIVE )? )?
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-

Sample:

LIST ALL PERMISSIONS OF alice;
-

Show all permissions granted to alice.

LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-

Show all permissions on keyspace1.table1 granted to bob. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. For example, should bob have ALTER permission on keyspace1, that would be included in the results of this query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to bob.

LIST SELECT PERMISSIONS OF carlos;
-

Show any permissions granted to carlos, limited to SELECT permissions on any resource.

Data Types

CQL supports a rich set of data types for columns defined in a table, including collection types. On top of those native and collection types, users can also provide custom types (through a JAVA class extending AbstractType loadable by Cassandra). The syntax of types is thus:

<type> ::= <native-type>
-         | <collection-type>
-         | <tuple-type>
-         | <string>       // Used for custom types. The fully-qualified name of a JAVA class
-
-<native-type> ::= ascii
-                | bigint
-                | blob
-                | boolean
-                | counter
-                | decimal
-                | double
-                | float
-                | inet
-                | int
-                | text
-                | timestamp
-                | timeuuid
-                | uuid
-                | varchar
-                | varint
-
-<collection-type> ::= list '<' <native-type> '>'
-                    | set  '<' <native-type> '>'
-                    | map  '<' <native-type> ',' <native-type> '>'
-<tuple-type> ::= tuple '<' <type> (',' <type>)* '>'
-

Note that the native types are keywords and as such are case-insensitive. They are however not reserved ones.

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

type constants supporteddescription
ascii strings ASCII character string
bigint integers 64-bit signed long
blob blobs Arbitrary bytes (no validation)
boolean booleans true or false
counter integers Counter column (64-bit signed value). See Counters for details
decimal integers, floats Variable-precision decimal
double integers 64-bit IEEE-754 floating point
float integers, floats 32-bit IEEE-754 floating point
inet strings An IP address. It can be either 4 bytes long (IPv4) or 16 bytes long (IPv6). There is no inet constant, IP address should be inputed as strings
int integers 32-bit signed int
text strings UTF8 encoded string
timestamp integers, strings A timestamp. Strings constant are allow to input timestamps as dates, see Working with dates below for more information.
timeuuid uuids Type 1 UUID. This is generally used as a “conflict-free” timestamp. Also see the functions on Timeuuid
uuid uuids Type 1 or type 4 UUID
varchar strings UTF8 encoded string
varint integers Arbitrary-precision integer

For more information on how to use the collection types, see the Working with collections section below.

Working with dates

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the standard base time known as “the epoch”: January 1 1970 at 00:00:00 GMT.

Timestamp can be input in CQL as simple long integers, giving the number of milliseconds since the epoch, as defined above.

They can also be input as string literals in any of the following ISO 8601 formats, each representing the time and date Mar 2, 2011, at 04:05:00 AM, GMT.:

  • 2011-02-03 04:05+0000
  • 2011-02-03 04:05:00+0000
  • 2011-02-03 04:05:00.000+0000
  • 2011-02-03T04:05+0000
  • 2011-02-03T04:05:00+0000
  • 2011-02-03T04:05:00.000+0000

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is -0800. The time zone may be omitted if desired— the date will be interpreted as being in the time zone under which the coordinating Cassandra node is configured.

  • 2011-02-03 04:05
  • 2011-02-03 04:05:00
  • 2011-02-03 04:05:00.000
  • 2011-02-03T04:05
  • 2011-02-03T04:05:00
  • 2011-02-03T04:05:00.000

There are clear difficulties inherent in relying on the time zone configuration being as expected, though, so it is recommended that the time zone always be specified for timestamps when feasible.

The time of day may also be omitted, if the date is the only piece that matters:

  • 2011-02-03
  • 2011-02-03+0000

In that case, the time of day will default to 00:00:00, in the specified or default time zone.

Counters

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed integer and on which 2 operations are supported: incrementation and decrementation (see UPDATE for syntax). Note the value of a counter cannot be set. A counter doesn’t exist until first incremented/decremented, and the first incrementation/decrementation is made as if the previous value was 0. Deletion of counter columns is supported but have some limitations (see the Cassandra Wiki for more information).

The use of the counter type is limited in the following way:

  • It cannot be used for column that is part of the PRIMARY KEY of a table.
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside the PRIMARY KEY have the counter type, or none of them have it.

Working with collections

Noteworthy characteristics

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all the messages sent by a given user”, “events registered by a sensor”, ...), then collections are not appropriate anymore and a specific table (with clustering columns) should be used. Concretely, collections have the following limitations:

  • Collections are always read in their entirety (and reading one is not paged internally).
  • Collections cannot have more than 65535 elements. More precisely, while it may be possible to insert more than 65535 elements, it is not possible to read more than the 65535 first elements (see CASSANDRA-5428 for details).
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do (see the section on lists below for details). It is thus advised to prefer sets over lists when possible.

Please note that while some of those limitations may or may not be loosen in the future, the general rule that collections are for denormalizing small amount of data is meant to stay.

Maps

A map is a typed set of key-value pairs, where keys are unique. Furthermore, note that the map are internally sorted by their keys and will thus always be returned in that order. To create a column of type map, use the map keyword suffixed with comma-separated key and value types, enclosed in angle brackets. For example:

CREATE TABLE users (
-    id text PRIMARY KEY,
-    given text,
-    surname text,
-    favs map<text, text>   // A map of text keys, and text values
-)
-

Writing map data is accomplished with a JSON-inspired syntax. To write a record using INSERT, specify the entire map as a JSON-style associative array. Note: This form will always replace the entire map.

// Inserting (or Updating)
-INSERT INTO users (id, given, surname, favs)
-           VALUES ('jsmith', 'John', 'Smith', { 'fruit' : 'apple', 'band' : 'Beatles' })
-

Adding or updating key-values of a (potentially) existing map can be accomplished either by subscripting the map column in an UPDATE statement or by adding a new map literal:

// Updating (or inserting)
-UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'
-UPDATE users SET favs = favs +  { 'movie' : 'Cassablanca' } WHERE id = 'jsmith'
-

Note that TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly inserted/updated values. In other words,

// Updating (or inserting)
-UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

Deleting a map record is done with:

DELETE favs['author'] FROM users WHERE id = 'jsmith'
-

Sets

A set is a typed collection of unique values. Sets are ordered by their values. To create a column of type set, use the set keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    date timestamp,
-    tags set<text>
-);
-

Writing a set is accomplished by comma separating the set values, and enclosing them in curly braces. Note: An INSERT will always replace the entire set.

INSERT INTO images (name, owner, date, tags)
-            VALUES ('cat.jpg', 'jsmith', 'now', { 'kitten', 'cat', 'pet' });
-

Adding and removing values of a set can be accomplished with an UPDATE by adding/removing new set values to an existing set column.

UPDATE images SET tags = tags + { 'cute', 'cuddly' } WHERE name = 'cat.jpg';
-UPDATE images SET tags = tags - { 'lame' } WHERE name = 'cat.jpg';
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Lists

A list is a typed collection of non-unique values where elements are ordered by there position in the list. To create a column of type list, use the list keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int>
-)
-

Do note that as explained below, lists have some limitations and performance considerations to take into account, and it is advised to prefer sets over lists when this is possible.

Writing list data is accomplished with a JSON-style syntax. To write a record using INSERT, specify the entire list as a JSON array. Note: An INSERT will always replace the entire list.

INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-

Adding (appending or prepending) values to a list can be accomplished by adding a new JSON-style array to an existing list column.

UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
-UPDATE plays SET players = 5, scores = [ 12 ] + scores WHERE id = '123-afde';
-

It should be noted that append and prepend are not idempotent operations. This means that if during an append or a prepend the operation timeout, it is not always safe to retry the operation (as this could result in the record appended or prepended twice).

Lists also provides the following operation: setting an element by its position in the list, removing an element by its position in the list and remove all the occurrence of a given value in the list. However, and contrarily to all the other collection operations, these three operations induce an internal read before the update, and will thus typically have slower performance characteristics. Those operations have the following syntax:

UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';                // sets the 2nd element of scores to 7 (raises an error is scores has less than 2 elements)
-DELETE scores[1] FROM plays WHERE id = '123-afde';                   // deletes the 2nd element of scores (raises an error is scores has less than 2 elements)
-UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; // removes all occurrences of 12 and 21 from scores
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Functions

CQL3 supports a few functions (more to come). Currently, it only support functions on values (functions that transform one or more column values into a new value) and in particular aggregation functions are not supported. The functions supported are described below:

Token

The token function allows to compute the token for a given partition key. The exact signature of the token function depends on the table concerned and of the partitioner used by the cluster.

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on the partitioner in use:

  • For Murmur3Partitioner, the return type is bigint.
  • For RandomPartitioner, the return type is varint.
  • For ByteOrderedPartitioner, the return type is blob.

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by

CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-    ...
-)
-

then the token function will take a single argument of type text (in that case, the partition key is userid (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be bigint.

Uuid

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.

Timeuuid functions

now

The now function takes no arguments and generates a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in WHERE clauses. For instance, a query of the form

SELECT * FROM myTable WHERE t = now()
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

minTimeuuid and maxTimeuuid

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp or a date string) and return a fake timeuuid corresponding to the smallest (resp. biggest) possible timeuuid having for timestamp t. So for instance:

SELECT * FROM myTable WHERE t > maxTimeuuid('2013-01-01 00:05+0000') AND t < minTimeuuid('2013-02-02 10:00+0000')
-

will select all rows where the timeuuid column t is strictly older than ‘2013-01-01 00:05+0000’ but strictly younger than ‘2013-02-02 10:00+0000’. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > maxTimeuuid('2013-01-01 00:05+0000').

Warning: We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect the Time-Based UUID generation process specified by the RFC 4122. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

dateOf and unixTimestampOf

The dateOf and unixTimestampOf functions take a timeuuid argument and extract the embedded timestamp. However, while the dateof function return it with the timestamp type (that most client, including cqlsh, interpret as a date), the unixTimestampOf function returns it as a bigint raw value.

Blob conversion functions

A number of functions are provided to “convert” the native types into binary data (blob). For every <native-type> type supported by CQL3 (a notable exceptions is blob, for obvious reasons), the function typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is 0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

Appendix A: CQL Keywords

CQL distinguishes between reserved and non-reserved keywords. Reserved keywords cannot be used as identifier, they are truly reserved for the language (but one can enclose a reserved keyword by double-quotes to use it as an identifier). Non-reserved keywords however only have a specific meaning in certain context but can used as identifer otherwise. The only raison d'être of these non-reserved keywords is convenience: some keyword are non-reserved when it was always easy for the parser to decide whether they were used as keywords or not.

Keyword Reserved?
ADD yes
ALL no
ALTER yes
AND yes
ANY yes
APPLY yes
AS no
ASC yes
ASCII no
AUTHORIZE yes
BATCH yes
BEGIN yes
BIGINT no
BLOB no
BOOLEAN no
BY yes
CLUSTERING no
COLUMNFAMILY yes
COMPACT no
CONSISTENCY no
COUNT no
COUNTER no
CREATE yes
DECIMAL no
DELETE yes
DESC yes
DOUBLE no
DROP yes
EACH_QUORUM yes
FLOAT no
FROM yes
GRANT yes
IN yes
INDEX yes
CUSTOM no
INSERT yes
INT no
INTO yes
KEY no
KEYSPACE yes
LEVEL no
LIMIT yes
LOCAL_ONE yes
LOCAL_QUORUM yes
MODIFY yes
NORECURSIVE yes
NOSUPERUSER no
OF yes
ON yes
ONE yes
ORDER yes
PASSWORD no
PERMISSION no
PERMISSIONS no
PRIMARY yes
QUORUM yes
REVOKE yes
SCHEMA yes
SELECT yes
SET yes
STORAGE no
SUPERUSER no
TABLE yes
TEXT no
TIMESTAMP no
TIMEUUID no
THREE yes
TOKEN yes
TRUNCATE yes
TTL no
TWO yes
TYPE no
UPDATE yes
USE yes
USER no
USERS no
USING yes
UUID no
VALUES no
VARCHAR no
VARINT no
WHERE yes
WITH yes
WRITETIME no
DISTINCT no

Appendix B: CQL Reserved Types

The following type names are not currently used by CQL, but are reserved for potential future use. User-defined types may not use reserved type names as their name.

type
byte
smallint
complex
enum
date
interval
macaddr
bitstring

Changes

The following describes the changes in each version of CQL.

3.2.1

  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X

3.2.0

  • User-defined types are now supported through CREATE TYPE, ALTER TYPE, and DROP TYPE
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the keys() function
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • Tuple types were added to hold fixed-length sets of typed positional fields (see the section on types)
  • DROP INDEX now supports optionally specifying a keyspace

3.1.7

  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations of clustering columns. See SELECT WHERE clauses.
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statmenets, respectively.

3.1.6

  • A new uuid method has been added.
  • Support for DELETE ... IF EXISTS syntax.

3.1.5

3.1.4

3.1.3

  • Millisecond precision formats have been added to the timestamp parser (see working with dates).

3.1.2

  • NaN and Infinity has been added as valid float contants. They are now reserved keywords. In the unlikely case you we using them as a column identifier (or keyspace/table one), you will noew need to double quote them (see quote identifiers).

3.1.1

  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable will be a list of whatever type c is.
  • It is now possible to use named bind variables (using :name instead of ?).

3.1.0

  • ALTER TABLE DROP option has been reenabled for CQL3 tables and has new semantics now: the space formerly used by dropped columns will now be eventually reclaimed (post-compaction). You should not readd previously dropped columns unless you use timestamps with microsecond precision (see CASSANDRA-3919 for more details).
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. See the “section on select”#selectStmt for details.
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. Similarly, DROP statements support a IF EXISTS condition.
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.

3.0.5

  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626).

3.0.4

  • Updated the syntax for custom secondary indexes.
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not correct (the order was not the one of the type of the partition key). Instead, the token method should always be used for range queries on the partition key (see WHERE clauses).

3.0.3

3.0.2

  • Type validation for the constants has been fixed. For instance, the implementation used to allow '2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer the case, type validation of constants is now more strict. See the data types section for details on which constant is allowed for which type.
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow inputing blobs. Do note that while inputing blobs as strings constant is still supported by this version (to allow smoother transition to blob constant), it is now deprecated (in particular the data types section does not list strings constants as valid blobs) and will be removed by a future version. If you were using strings as blobs, you should thus update your client code ASAP to switch blob constants.
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is now also allowed in select clauses. See the section on functions for details.

3.0.1

  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help working with timeuuid: now, minTimeuuid, maxTimeuuid , dateOf and unixTimestampOf. See the section dedicated to these methods for more detail.
  • “Float constants”#constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.

Versioning

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no correlation between Cassandra release versions and the CQL language version.

versiondescription
Major The major version must be bumped when backward incompatible changes are introduced. This should rarely occur.
Minor Minor version increments occur when new, but backward compatible, functionality is introduced.
Patch The patch version is incremented when bugs are fixed.
\ No newline at end of file diff --git a/src/doc/old/CQL-2.2.html b/src/doc/old/CQL-2.2.html deleted file mode 100644 index 02effb339..000000000 --- a/src/doc/old/CQL-2.2.html +++ /dev/null @@ -1,647 +0,0 @@ -CQL

Cassandra Query Language (CQL) v3.3.1

  1. Cassandra Query Language (CQL) v3.3.1
    1. CQL Syntax
      1. Preamble
      2. Conventions
      3. Identifiers and keywords
      4. Constants
      5. Comments
      6. Statements
      7. Prepared Statement
    2. Data Definition
      1. CREATE KEYSPACE
      2. USE
      3. ALTER KEYSPACE
      4. DROP KEYSPACE
      5. CREATE TABLE
      6. ALTER TABLE
      7. DROP TABLE
      8. TRUNCATE
      9. CREATE INDEX
      10. DROP INDEX
      11. CREATE TYPE
      12. ALTER TYPE
      13. DROP TYPE
      14. CREATE TRIGGER
      15. DROP TRIGGER
      16. CREATE FUNCTION
      17. DROP FUNCTION
      18. CREATE AGGREGATE
      19. DROP AGGREGATE
    3. Data Manipulation
      1. INSERT
      2. UPDATE
      3. DELETE
      4. BATCH
    4. Queries
      1. SELECT
    5. Database Roles
      1. CREATE ROLE
      2. ALTER ROLE
      3. DROP ROLE
      4. GRANT ROLE
      5. REVOKE ROLE
      6. CREATE USER
      7. ALTER USER
      8. DROP USER
      9. LIST USERS
    6. Data Control
      1. Permissions
      2. GRANT PERMISSION
      3. REVOKE PERMISSION
    7. Data Types
      1. Working with timestamps
      2. Working with dates
      3. Working with time
      4. Counters
      5. Working with collections
    8. Functions
      1. Token
      2. Uuid
      3. Timeuuid functions
      4. Time conversion functions
      5. Blob conversion functions
    9. Aggregates
      1. Count
      2. Max and Min
      3. Sum
      4. Avg
    10. User-Defined Functions
    11. User-Defined Aggregates
    12. JSON Support
      1. SELECT JSON
      2. INSERT JSON
      3. JSON Encoding of Cassandra Data Types
      4. The fromJson() Function
      5. The toJson() Function
    13. Appendix A: CQL Keywords
    14. Appendix B: CQL Reserved Types
    15. Changes
      1. 3.3.1
      2. 3.3.0
      3. 3.2.0
      4. 3.1.7
      5. 3.1.6
      6. 3.1.5
      7. 3.1.4
      8. 3.1.3
      9. 3.1.2
      10. 3.1.1
      11. 3.1.0
      12. 3.0.5
      13. 3.0.4
      14. 3.0.3
      15. 3.0.2
      16. 3.0.1
    16. Versioning

CQL Syntax

Preamble

This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the changes section provides the diff between the different versions of CQL v3.

CQL v3 offers a model very close to SQL in the sense that data is put in tables containing rows of columns. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.

Conventions

To aid in specifying the CQL syntax, we will use the following conventions in this document:

  • Language rules will be given in a BNF -like notation:
<start> ::= TERMINAL <non-terminal1> <non-terminal1>
-
  • Nonterminal symbols will have <angle brackets>.
  • As additional shortcut notations to BNF, we’ll use traditional regular expression’s symbols (?, + and *) to signify that a given symbol is optional and/or can be repeated. We’ll also allow parentheses to group symbols and the [<characters>] notation to represent any one of <characters>.
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the last column definition in a CREATE TABLE statement is optional but supported if present even though the provided grammar in this document suggest it is not supported.
  • Sample code will be provided in a code block:
SELECT sample_usage FROM cql;
-
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.

Identifiers and keywords

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in Appendix A.

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and myId is the same than myid or MYID for instance. A convention often used (in particular by the samples of this documentation) is to use upper case for keywords and lower case for other identifiers.

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of characters in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a reserved keyword and can be used to refer to a column, while select would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches [a-zA-Z][a-zA-Z0-9_]* is equivalent to the unquoted identifier obtained by removing the double-quote (so "myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

Warning: quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with specific names used by the server. For instance, when using conditional update, the server will respond with a result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like "[applied]") and any name that looks like a function call (like "f(x)").

Constants

CQL defines the following kind of constants: strings, integers, floats, booleans, uuids and blobs:

  • A string constant is an arbitrary sequence of characters characters enclosed by single-quote('). One can include a single-quote in a string by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted identifiers that use double-quotes.
  • An integer constant is defined by '-'?[0-9]+.
  • A float constant is defined by '-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?. On top of that, NaN and Infinity are also float constants.
  • A boolean constant is either true or false up to case-insensitivity (i.e. True is a valid boolean constant).
  • A UUID constant is defined by hex{8}-hex{4}-hex{4}-hex{4}-hex{12} where hex is an hexadecimal character, e.g. [0-9a-fA-F] and {4} is the number of such characters.
  • A blob constant is an hexadecimal number defined by 0[xX](hex)+ where hex is an hexadecimal character, e.g. [0-9a-fA-F].

For how these constants are typed, see the data types section.

Comments

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-

Statements

CQL consists of statements. As in SQL, these statements can be divided in 3 categories:

  • Data definition statements, that allow to set and change the way data is stored.
  • Data manipulation statements, that allow to change data
  • Queries, to look up data

All statements end with a semicolon (;) but that semicolon can be omitted when dealing with a single statement. The supported statements are described in the following sections. When describing the grammar of said statements, we will reuse the non-terminal symbols defined below:

<identifier> ::= any quoted or unquoted identifier, excluding reserved keywords
- <tablename> ::= (<identifier> '.')? <identifier>
-
-    <string> ::= a string constant
-   <integer> ::= an integer constant
-     <float> ::= a float constant
-    <number> ::= <integer> | <float>
-      <uuid> ::= a uuid constant
-   <boolean> ::= a boolean constant
-       <hex> ::= a blob constant
-
-  <constant> ::= <string>
-               | <number>
-               | <uuid>
-               | <boolean>
-               | <hex>
-  <variable> ::= '?'
-               | ':' <identifier>
-      <term> ::= <constant>
-               | <collection-literal>
-               | <variable>
-               | <function> '(' (<term> (',' <term>)*)? ')'
-
-  <collection-literal> ::= <map-literal>
-                         | <set-literal>
-                         | <list-literal>
-         <map-literal> ::= '{' ( <term> ':' <term> ( ',' <term> ':' <term> )* )? '}'
-         <set-literal> ::= '{' ( <term> ( ',' <term> )* )? '}'
-        <list-literal> ::= '[' ( <term> ( ',' <term> )* )? ']'
-
-    <function> ::= <ident>
-
-  <properties> ::= <property> (AND <property>)*
-    <property> ::= <identifier> '=' ( <identifier> | <constant> | <map-literal> )
-


Please note that not every possible productions of the grammar above will be valid in practice. Most notably, <variable> and nested <collection-literal> are currently not allowed inside <collection-literal>.

A <variable> can be either anonymous (a question mark (?)) or named (an identifier preceded by :). Both declare a bind variables for prepared statements. The only difference between an anymous and a named variable is that a named one will be easier to refer to (how exactly depends on the client driver used).

The <properties> production is use by statement that create and alter keyspaces and tables. Each <property> is either a simple one, in which case it just has a value, or a map one, in which case it’s value is a map grouping sub-options. The following will refer to one or the other as the kind (simple or map) of the property.

A <tablename> will be used to identify a table. This is an identifier representing the table name that can be preceded by a keyspace name. The keyspace name, if provided, allow to identify a table in another keyspace than the currently active one (the currently active keyspace is set through the USE statement).

For supported <function>, see the section on functions.

Strings can be either enclosed with single quotes or two dollar characters. The second syntax has been introduced to allow strings that contain single quotes. Typical candidates for such strings are source code fragments for user-defined functions.

Sample:

  'some string value'
-
-  $$double-dollar string can contain single ' quotes$$
-

Prepared Statement

CQL supports prepared statements. Prepared statement is an optimization that allows to parse a query only once but execute it multiple times with different concrete values.

In a statement, each time a column value is expected (in the data manipulation and query statements), a <variable> (see above) can be used instead. A statement with bind variables must then be prepared. Once it has been prepared, it can executed by providing concrete values for the bind variables. The exact procedure to prepare a statement and execute a prepared statement depends on the CQL driver used and is beyond the scope of this document.

In addition to providing column values, bind markers may be used to provide values for LIMIT, TIMESTAMP, and TTL clauses. If anonymous bind markers are used, the names for the query parameters will be [limit], [timestamp], and [ttl], respectively.

Data Definition

CREATE KEYSPACE

Syntax:

<create-keyspace-stmt> ::= CREATE KEYSPACE (IF NOT EXISTS)? <identifier> WITH <properties>
-


Sample:

CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-


The CREATE KEYSPACE statement creates a new top-level keyspace. A keyspace is a namespace that defines a replication strategy and some options for a set of tables. Valid keyspaces names are identifiers composed exclusively of alphanumerical characters and whose length is lesser or equal to 32. Note that as identifiers, keyspace names are case insensitive: use a quoted identifier for case sensitive keyspace names.

The supported <properties> for CREATE KEYSPACE are:

name kind mandatory default description
replication map yes The replication strategy and options to use for the keyspace.
durable_writes simple no true Whether to use the commit log for updates on this keyspace (disable this option at your own risk!).

The replication <property> is mandatory. It must at least contains the 'class' sub-option which defines the replication strategy class to use. The rest of the sub-options depends on that replication strategy class. By default, Cassandra support the following 'class':

  • 'SimpleStrategy': A simple strategy that defines a simple replication factor for the whole cluster. The only sub-options supported is 'replication_factor' to define that replication factor and is mandatory.
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for each data-center. The rest of the sub-options are key-value pairs where each time the key is the name of a datacenter and the value the replication factor for that data-center.
  • 'OldNetworkTopologyStrategy': A legacy replication strategy. You should avoid this strategy for new keyspaces and prefer 'NetworkTopologyStrategy'.

Attempting to create an already existing keyspace will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the keyspace already exists.

USE

Syntax:

<use-stmt> ::= USE <identifier>
-

Sample:

USE myApp;
-

The USE statement takes an existing keyspace name as argument and set it as the per-connection current working keyspace. All subsequent keyspace-specific actions will be performed in the context of the selected keyspace, unless otherwise specified, until another USE statement is issued or the connection terminates.

ALTER KEYSPACE

Syntax:

<create-keyspace-stmt> ::= ALTER KEYSPACE <identifier> WITH <properties>
-


Sample:

ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-


The ALTER KEYSPACE statement alters the properties of an existing keyspace. The supported <properties> are the same as for the CREATE KEYSPACE statement.

DROP KEYSPACE

Syntax:

<drop-keyspace-stmt> ::= DROP KEYSPACE ( IF EXISTS )? <identifier>
-

Sample:

DROP KEYSPACE myApp;
-

A DROP KEYSPACE statement results in the immediate, irreversible removal of an existing keyspace, including all column families in it, and all data contained in those column families.

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TABLE

Syntax:

<create-table-stmt> ::= CREATE ( TABLE | COLUMNFAMILY ) ( IF NOT EXISTS )? <tablename>
-                          '(' <column-definition> ( ',' <column-definition> )* ')'
-                          ( WITH <option> ( AND <option>)* )?
-
-<column-definition> ::= <identifier> <type> ( STATIC )? ( PRIMARY KEY )?
-                      | PRIMARY KEY '(' <partition-key> ( ',' <identifier> )* ')'
-
-<partition-key> ::= <identifier>
-                  | '(' <identifier> (',' <identifier> )* ')'
-
-<option> ::= <property>
-           | COMPACT STORAGE
-           | CLUSTERING ORDER
-


Sample:

CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-


The CREATE TABLE statement creates a new table. Each such table is a set of rows (usually representing related entities) for which it defines a number of properties. A table is defined by a name, it defines the columns composing rows of the table and have a number of options. Note that the CREATE COLUMNFAMILY syntax is supported as an alias for CREATE TABLE (for historical reasons).

Attempting to create an already existing table will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the table already exists.

<tablename>

Valid table names are the same as valid keyspace names (up to 32 characters long alphanumerical identifiers). If the table name is provided alone, the table is created within the current keyspace (see USE), but if it is prefixed by an existing keyspace name (see <tablename> grammar), it is created in the specified keyspace (but does not change the current keyspace).

<column-definition>

A CREATE TABLE statement defines the columns that rows of the table can have. A column is defined by its name (an identifier) and its type (see the data types section for more details on allowed types and their properties).

Within a table, a row is uniquely identified by its PRIMARY KEY (or more simply the key), and hence all table definitions must define a PRIMARY KEY (and only one). A PRIMARY KEY is composed of one or more of the columns defined in the table. If the PRIMARY KEY is only one column, this can be specified directly after the column definition. Otherwise, it must be specified by following PRIMARY KEY by the comma-separated list of column names composing the key within parenthesis. Note that:

CREATE TABLE t (
-    k int PRIMARY KEY,
-    other text
-)
-

is equivalent to

CREATE TABLE t (
-    k int,
-    other text,
-    PRIMARY KEY (k)
-)
-

Partition key and clustering columns

In CQL, the order in which columns are defined for the PRIMARY KEY matters. The first column of the key is called the partition key. It has the property that all the rows sharing the same partition key (even across table in fact) are stored on the same physical node. Also, insertion/update/deletion on rows sharing the same partition key for a given table are performed atomically and in isolation. Note that it is possible to have a composite partition key, i.e. a partition key formed of multiple columns, using an extra set of parentheses to define which columns forms the partition key.

The remaining columns of the PRIMARY KEY definition, if any, are called __clustering columns. On a given physical node, rows for a given partition key are stored in the order induced by the clustering columns, making the retrieval of rows in that clustering order particularly efficient (see SELECT).

STATIC columns

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the rows belonging to the same partition (having the same partition key). For instance, in:

CREATE TABLE test (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-INSERT INTO test(pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO test(pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-SELECT * FROM test WHERE pk=0 AND t=0;
-

the last query will return 'static1' as value for s, since s is static and thus the 2nd insertion modified this “shared” value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for pk), then the 2nd insertion would not have modified the value of s for the first row.

A few restrictions applies to when static columns are allowed:

  • tables with the COMPACT STORAGE option (see below) cannot have them
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).
  • only non PRIMARY KEY columns can be static

<option>

The CREATE TABLE statement supports a number of options that controls the configuration of a new table. These options can be specified after the WITH keyword.

The first of these option is COMPACT STORAGE. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details). The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table. Most notably, COMPACT STORAGE tables cannot have collections nor static columns and a COMPACT STORAGE table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the PRIMARY KEY definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, COMPACT STORAGE is not recommended outside of the backward compatibility reason evoked above.

Another option is CLUSTERING ORDER. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects what ORDER BY are allowed during SELECT.

Table creation supports the following other <property>:

option kind default description
comment simple none A free-form, human-readable comment.
read_repair_chance simple 0.1 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpose of read repairs.
dclocal_read_repair_chance simple 0 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.
gc_grace_seconds simple 864000 Time to wait before garbage collecting tombstones (deletion markers).
bloom_filter_fp_chance simple 0.00075 The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)
default_time_to_live simple 0 The default expiration time (“TTL”) in seconds for a table.
compaction map see below Compaction options, see below.
compression map see below Compression options, see below.
caching map see below Caching options, see below.

Compaction options

The compaction property must at least define the 'class' sub-option, that defines the compaction strategy class to use. The default supported class are 'SizeTieredCompactionStrategy', 'LeveledCompactionStrategy' and 'DateTieredCompactionStrategy'. Custom strategy can be provided by specifying the full class name as a string constant. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:

option supported compaction strategy default description
enabled all true A boolean denoting whether compaction should be enabled or not.
tombstone_threshold all 0.2 A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones.
tombstone_compaction_interval all 1 day The minimum time to wait after an sstable creation time before considering it for “tombstone compaction”, where “tombstone compaction” is the compaction triggered if the sstable has more gcable tombstones than tombstone_threshold.
unchecked_tombstone_compaction all false Setting this to true enables more aggressive tombstone compactions – single sstable tombstone compactions will run without checking how likely it is that they will be successful.
min_sstable_size SizeTieredCompactionStrategy 50MB The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size. However, for small sizes, this would result in a bucketing that is too fine grained. min_sstable_size defines a size threshold (in bytes) below which all SSTables belong to one unique bucket
min_threshold SizeTieredCompactionStrategy 4 Minimum number of SSTables needed to start a minor compaction.
max_threshold SizeTieredCompactionStrategy 32 Maximum number of SSTables processed by one minor compaction.
bucket_low SizeTieredCompactionStrategy 0.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%)
bucket_high SizeTieredCompactionStrategy 1.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%).
sstable_size_in_mb LeveledCompactionStrategy 5MB The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to sstable_size_in_mb, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables
timestamp_resolution DateTieredCompactionStrategy MICROSECONDS The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit) - don’t change this unless you do mutations with USING TIMESTAMP (or equivalent directly in the client)
base_time_seconds DateTieredCompactionStrategy 60 The base size of the time windows.
max_sstable_age_days DateTieredCompactionStrategy 365 SSTables only containing data that is older than this will never be compacted.

Compression options

For the compression property, the following sub-options are available:

option default description
sstable_compression LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string ('') to disable compression. Custom compressor can be provided by specifying the full class name as a string constant.
chunk_length_kb 64KB On disk SSTables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read
crc_check_chance 1.0 When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read

Caching options

For the caching property, the following sub-options are available:

option default description
keys ALL Whether to cache keys (“key cache”) for this table. Valid values are: ALL and NONE.
rows_per_partition NONE The amount of rows to cache per partition (“row cache”). If an integer n is specified, the first n queried rows of a partition will be cached. Other possible options are ALL, to cache all rows of a queried partition, or NONE to disable row caching.

Other considerations:

  • When inserting / updating a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see <a href=#alterStmt>ALTER TABLE) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry when you haven’t) when creating a table.

ALTER TABLE

Syntax:

<alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction>
-
-<instruction> ::= ALTER <identifier> TYPE <type>
-                | ADD   <identifier> <type>
-                | DROP  <identifier>
-                | WITH  <option> ( AND <option> )*
-


Sample:

ALTER TABLE addamsFamily
-ALTER lastKnownLocation TYPE uuid;
-
-ALTER TABLE addamsFamily
-ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-WITH comment = 'A most excellent and useful column family'
- AND read_repair_chance = 0.2;
-


The ALTER statement is used to manipulate table definitions. It allows for adding new columns, dropping existing ones, changing the type of existing columns, or updating the table options. As with table creation, ALTER COLUMNFAMILY is allowed as an alias for ALTER TABLE.

The <tablename> is the table name optionally preceded by the keyspace name. The <instruction> defines the alteration to perform:

  • ALTER: Update the type of a given defined column. Note that the type of the clustering columns cannot be modified as it induces the on-disk ordering of rows. Columns on which a secondary index is defined have the same restriction. Other columns are free from those restrictions (no validation of existing data is performed), but it is usually a bad idea to change the type to a non-compatible one, unless no data have been inserted for that column yet, as this could confuse CQL drivers/tools.
  • ADD: Adds a new column to the table. The <identifier> for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the COMPACT STORAGE option.
  • DROP: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won’t return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can’t be dropped from tables defined with the COMPACT STORAGE option.
  • WITH: Allows to update the options of the table. The supported <option> (and syntax) are the same as for the CREATE TABLE statement except that COMPACT STORAGE is not supported. Note that setting any compaction sub-options has the effect of erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. The same note applies to the set of compression sub-options.

DROP TABLE

Syntax:

<drop-table-stmt> ::= DROP TABLE ( IF EXISTS )? <tablename>
-

Sample:

DROP TABLE worldSeriesAttendees;
-

The DROP TABLE statement results in the immediate, irreversible removal of a table, including all data contained in it. As for table creation, DROP COLUMNFAMILY is allowed as an alias for DROP TABLE.

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

TRUNCATE

Syntax:

<truncate-stmt> ::= TRUNCATE ( TABLE | COLUMNFAMILY )? <tablename>
-

Sample:

TRUNCATE superImportantData;
-

The TRUNCATE statement permanently removes all data from a table.

CREATE INDEX

Syntax:

<create-index-stmt> ::= CREATE ( CUSTOM )? INDEX ( IF NOT EXISTS )? ( <indexname> )?
-                            ON <tablename> '(' <index-identifier> ')'
-                            ( USING <string> ( WITH OPTIONS = <map-literal> )? )?
-
-<index-identifier> ::= <identifier>
-                     | keys( <identifier> )
-


Sample:

CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed automatically at insertion time.

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the index already exists.

Indexes on Map Keys

When creating an index on a map column, you may index either the keys or the values. If the column identifier is placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in WHERE clauses. Otherwise, the index will be on the map values.

DROP INDEX

Syntax:

<drop-index-stmt> ::= DROP INDEX ( IF EXISTS )? ( <keyspace> '.' )? <identifier>
-

Sample:

DROP INDEX userIndex;
-
-DROP INDEX userkeyspace.address_index;
-


The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index name, which may optionally specify the keyspace of the index.

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TYPE

Syntax:

<create-type-stmt> ::= CREATE TYPE ( IF NOT EXISTS )? <typename>
-                         '(' <field-definition> ( ',' <field-definition> )* ')'
-
-<typename> ::= ( <keyspace-name> '.' )? <identifier>
-
-<field-definition> ::= <identifier> <type>
-
-


Sample:

CREATE TYPE address (
-    street_name text,
-    street_number int,
-    city text,
-    state text,
-    zip int
-)
-
-CREATE TYPE work_and_home_addresses (
-    home_address address,
-    work_address address
-)
-


The CREATE TYPE statement creates a new user-defined type. Each type is a set of named, typed fields. Field types may be any valid type, including collections and other existing user-defined types.

Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the type already exists.

<typename>

Valid type names are identifiers. The names of existing CQL types and reserved type names may not be used.

If the type name is provided alone, the type is created with the current keyspace (see USE). If it is prefixed by an existing keyspace name, the type is created within the specified keyspace instead of the current keyspace.

ALTER TYPE

Syntax:

<alter-type-stmt> ::= ALTER TYPE <typename> <instruction>
-
-<instruction> ::= ALTER <field-name> TYPE <type>
-                | ADD <field-name> <type>
-                | RENAME <field-name> TO <field-name> ( AND <field-name> TO <field-name> )*
-


Sample:

ALTER TYPE address ALTER zip TYPE varint
-
-ALTER TYPE address ADD country text
-
-ALTER TYPE address RENAME zip TO zipcode AND street_name TO street
-


The ALTER TYPE statement is used to manipulate type definitions. It allows for adding new fields, renaming existing fields, or changing the type of existing fields.

When altering the type of a column, the new type must be compatible with the previous type.

DROP TYPE

Syntax:

<drop-type-stmt> ::= DROP TYPE ( IF EXISTS )? <typename>
-


The DROP TYPE statement results in the immediate, irreversible removal of a type. Attempting to drop a type that is still in use by another type or a table will result in an error.

If the type does not exist, an error will be returned unless IF EXISTS is used, in which case the operation is a no-op.

CREATE TRIGGER

Syntax:

<create-trigger-stmt> ::= CREATE TRIGGER ( IF NOT EXISTS )? ( <triggername> )?
-                            ON <tablename> 
-                            USING <string>
-
-


Sample:

CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a requested DML statement occurs, which ensures the atomicity of the transaction.

DROP TRIGGER

Syntax:

<drop-trigger-stmt> ::= DROP TRIGGER ( IF EXISTS )? ( <triggername> )?
-                            ON <tablename>
-


Sample:

DROP TRIGGER myTrigger ON myTable;
-

DROP TRIGGER statement removes the registration of a trigger created using CREATE TRIGGER.

CREATE FUNCTION

Syntax:

<create-function-stmt> ::= CREATE ( OR REPLACE )? 
-                            FUNCTION ( IF NOT EXISTS )?
-                            ( <keyspace> '.' )? <function-name>
-                            '(' <arg-name> <arg-type> ( ',' <arg-name> <arg-type> )* ')'
-                            ( CALLED | RETURNS NULL ) ON NULL INPUT
-                            RETURNS <type>
-                            LANGUAGE <language>
-                            AS <body>
-


Sample:

CREATE OR REPLACE FUNCTION somefunction
-    ( somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list<bigint> )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-       // some Java code
-    $$;
-CREATE FUNCTION akeyspace.fname IF NOT EXISTS
-    ( someArg int )
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-       // some Java code
-    $$;
-

CREATE FUNCTION creates or replaces a user-defined function.

Function Signature

Signatures are used to distinguish individual functions. The signature consists of:

  1. The fully qualified function name – i.e keyspace plus function-name
  2. The concatenated list of all argument types

Note that keyspace names, function names and argument types are subject to the default naming conventions and case-sensitivity rules.

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already exists.

Behavior on invocation with null values must be defined for each function. There are two options:

  1. RETURNS NULL ON NULL INPUT declares that the function will always return null if any of the input arguments is null.
  2. CALLED ON NULL INPUT declares that the function will always be executed.

If the optional IF NOT EXISTS keywords are used, the function will only be created if another function with the same signature does not exist.

OR REPLACE and IF NOT EXIST cannot be used together.

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the system keyspaces.

See the section on user-defined functions for more information.

DROP FUNCTION

Syntax:

<drop-function-stmt> ::= DROP FUNCTION ( IF EXISTS )?
-                         ( <keyspace> '.' )? <function-name>
-                         ( '(' <arg-type> ( ',' <arg-type> )* ')' )?
-
-


Sample:

DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-

DROP FUNCTION statement removes a function created using CREATE FUNCTION.
You must specify the argument types (signature ) of the function to drop if there are multiple functions with the same name but a different signature (overloaded functions).

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists.

CREATE AGGREGATE

Syntax:

<create-aggregate-stmt> ::= CREATE ( OR REPLACE )? 
-                            AGGREGATE ( IF NOT EXISTS )?
-                            ( <keyspace> '.' )? <aggregate-name>
-                            '(' <arg-type> ( ',' <arg-type> )* ')'
-                            SFUNC <state-functionname>
-                            STYPE <state-type>
-                            ( FINALFUNC <final-functionname> )?
-                            ( INITCOND <init-cond> )?
-


Sample:

CREATE AGGREGATE myaggregate ( val text )
-  SFUNC myaggregate_state
-  STYPE text
-  FINALFUNC myaggregate_final
-  INITCOND 'foo';
-

See the section on user-defined aggregates for a complete example.

CREATE AGGREGATE creates or replaces a user-defined aggregate.

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature already exists.

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already exist.

OR REPLACE and IF NOT EXIST cannot be used together.

Aggregates belong to a keyspace. If no keyspace is specified in <aggregate-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined aggregate in one of the system keyspaces.

Signatures for user-defined aggregates follow the same rules as for user-defined functions.

STYPE defines the type of the state value and must be specified.

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-@null@ INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the state function must match STYPE. The remaining argument types of the state function must match the argument types of the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called with null.

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is defined, it is the return type of that function.

See the section on user-defined aggregates for more information.

DROP AGGREGATE

Syntax:

<drop-aggregate-stmt> ::= DROP AGGREGATE ( IF EXISTS )?
-                         ( <keyspace> '.' )? <aggregate-name>
-                         ( '(' <arg-type> ( ',' <arg-type> )* ')' )?
-

Sample:

DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded aggregates).

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a function with the signature does not exist.

Signatures for user-defined aggregates follow the same rules as for user-defined functions.

Data Manipulation

INSERT

Syntax:

<insertStatement> ::= INSERT INTO <tablename>
-                      ( ( <name-list> VALUES <value-list> )
-                      | ( JSON <string> ))
-                      ( IF NOT EXISTS )?
-                      ( USING <option> ( AND <option> )* )?
-
-<names-list> ::= '(' <identifier> ( ',' <identifier> )* ')'
-
-<value-list> ::= '(' <term-or-literal> ( ',' <term-or-literal> )* ')'
-
-<term-or-literal> ::= <term>
-                    | <collection-literal>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity", "director": "Joss Whedon", "year": 2005}'
-


The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the section on INSERT JSON for more details.

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

All updates for an INSERT are applied atomically and in isolation.

Please refer to the UPDATE section for information on the <option> available and to the collections section for use of <collection-literal>. Also note that INSERT does not support counters, while UPDATE does.

UPDATE

Syntax:

<update-stmt> ::= UPDATE <tablename>
-                  ( USING <option> ( AND <option> )* )?
-                  SET <assignment> ( ',' <assignment> )*
-                  WHERE <where-clause>
-                  ( IF <condition> ( AND condition )* )?
-
-<assignment> ::= <identifier> '=' <term>
-               | <identifier> '=' <identifier> ('+' | '-') (<int-term> | <set-literal> | <list-literal>)
-               | <identifier> '=' <identifier> '+' <map-literal>
-               | <identifier> '[' <term> ']' '=' <term>
-
-<condition> ::= <identifier> <op> <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' <op> <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
-             | <identifier> IN <variable>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

UPDATE NerdMovies USING TTL 400
-SET director = 'Joss Whedon',
-    main_actor = 'Nathan Fillion',
-    year = 2005
-WHERE movie = 'Serenity';
-
-UPDATE UserActions SET total = total + 2 WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 AND action = 'click';
-


The UPDATE statement writes one or more columns for a given row in a table. The <where-clause> is used to select the row to update and must include all columns composing the PRIMARY KEY (the IN relation is only supported for the last column of the partition key). Other columns values are specified through <assignment> after the SET keyword.

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through the use of <condition>, see below): the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated unless such condition are met. But please note that using IF conditions will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

The c = c + 3 form of <assignment> is used to increment/decrement counters. The identifier after the ‘=’ sign must be the same than the one before the ‘=’ sign (Only increment/decrement is supported on counters, not the assignment of a specific value).

The id = id + <collection-literal> and id[value1] = value2 forms of <assignment> are for collections. Please refer to the relevant section for more details.

<options>

The UPDATE and INSERT statements allows to specify the following options for the insertion:

  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • TTL: allows to specify an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not the column themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL is specified in that update). By default, values never expire. A TTL of 0 or a negative one is equivalent to no TTL.

DELETE

Syntax:

<delete-stmt> ::= DELETE ( <selection> ( ',' <selection> )* )?
-                  FROM <tablename>
-                  ( USING TIMESTAMP <integer>)?
-                  WHERE <where-clause>
-                  ( IF ( EXISTS | ( <condition> ( AND <condition> )*) ) )?
-
-<selection> ::= <identifier> ( '[' <term> ']' )?
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
-             | <identifier> IN <variable>
-
-<condition> ::= <identifier> <op> <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' <op> <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
-


Sample:

DELETE FROM NerdMovies USING TIMESTAMP 1240003134 WHERE movie = 'Serenity';
-
-DELETE phone FROM Users WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-


The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, only those columns are deleted from the row indicated by the <where-clause> (the id[value] syntax in <selection> is for collection, please refer to the collection section for more details). Otherwise whole rows are removed. The <where-clause> allows to specify the key for the row(s) to delete (the IN relation is only supported for the last column of the partition key).

DELETE supports the TIMESTAMP options with the same semantic that in the UPDATE statement.

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

A DELETE operation application can be conditioned using IF like for UPDATE and INSERT. But please not that as for the later, this will incur a non negligible performance cost (internally, Paxos will be used) and so should be used sparingly.

BATCH

Syntax:

<batch-stmt> ::= BEGIN ( UNLOGGED | COUNTER ) BATCH
-                 ( USING <option> ( AND <option> )* )?
-                    <modification-stmt> ( ';' <modification-stmt> )*
-                 APPLY BATCH
-
-<modification-stmt> ::= <insert-stmt>
-                      | <update-stmt>
-                      | <delete-stmt>
-
-<option> ::= TIMESTAMP <integer>
-


Sample:

BEGIN BATCH
-  INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-  UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-  INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-  DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single statement. It serves several purposes:

  1. It saves network round-trips between the client and the server (and sometimes between the server coordinator and the replicas) when batching multiple updates.
  2. All updates in a BATCH belonging to a given partition key are performed in isolation.
  3. By default, all operations in the batch are performed as LOGGED, to ensure all mutations eventually complete (or none will). See the notes on UNLOGGED for more details.

Note that:

  • BATCH statements may only contain UPDATE, INSERT and DELETE statements.
  • Batches are not a full analogue for SQL transactions.
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp. Due to Cassandra’s conflict resolution procedure in the case of timestamp ties, operations may be applied in an order that is different from the order they are listed in the BATCH statement. To force a particular operation ordering, you must specify per-operation timestamps.

UNLOGGED

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note however that operations are only isolated within a single partition).

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is used, a failed batch might leave the patch only partly applied.

COUNTER

Use the COUNTER option for batched counter updates. Unlike other updates in Cassandra, counter updates are not idempotent.

<option>

BATCH supports both the TIMESTAMP option, with similar semantic to the one described in the UPDATE statement (the timestamp applies to all the statement inside the batch). However, if used, TIMESTAMP must not be used in the statements within the batch.

Queries

SELECT

Syntax:

<select-stmt> ::= SELECT ( JSON )? <select-clause>
-                  FROM <tablename>
-                  ( WHERE <where-clause> )?
-                  ( ORDER BY <order-by> )?
-                  ( LIMIT <integer> )?
-                  ( ALLOW FILTERING )?
-
-<select-clause> ::= DISTINCT? <selection-list>
-                  | COUNT '(' ( '*' | '1' ) ')' (AS <identifier>)?
-
-<selection-list> ::= <selector> (AS <identifier>)? ( ',' <selector> (AS <identifier>)? )*
-                   | '*'
-
-<selector> ::= <identifier>
-             | WRITETIME '(' <identifier> ')'
-             | TTL '(' <identifier> ')'
-             | <function> '(' (<selector> (',' <selector>)*)? ')'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> <op> <term>
-             | '(' <identifier> (',' <identifier>)* ')' <op> <term-tuple>
-             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
-             | TOKEN '(' <identifier> ( ',' <identifer>)* ')' <op> <term>
-
-<op> ::= '=' | '<' | '>' | '<=' | '>=' | CONTAINS | CONTAINS KEY
-<order-by> ::= <ordering> ( ',' <odering> )*
-<ordering> ::= <identifer> ( ASC | DESC )?
-<term-tuple> ::= '(' <term> (',' <term>)* ')'
-


Sample:

SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT(*) FROM users;
-
-SELECT COUNT(*) AS user_count FROM users;
-
-


The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of rows, where each row contains the collection of columns corresponding to the query. If the JSON keyword is used, the results for each row will contain only a single column named “json”. See the section on SELECT JSON for more details.

<select-clause>

The <select-clause> determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of or the wildcard character (*) to select all the columns defined for the table.

A <selector> is either a column name to retrieve or a <function> of one or more @@s. The function allowed are the same as for <term> and are described in the function section. In addition to these generic functions, the WRITETIME (resp. TTL) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)).

Any <selector> can be aliased using AS keyword (see examples). Please note that <where-clause> and <order-by> clause should refer to the columns by their original names and not by their aliases.

The COUNT keyword can be used with parenthesis enclosing *. If so, the query will return a single result: the number of rows matching the query. Note that COUNT(1) is supported as an alias.

<where-clause>

The <where-clause> specifies which rows must be queried. It is composed of relations on the columns that are part of the PRIMARY KEY and/or have a secondary index defined on them.

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For instance, given

CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-

The following query is allowed:

SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John''s Blog' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are set):

// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts WHERE userid='john doe' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, token(-1) > token(0) in particular). Example:

SELECT * FROM posts WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full primary key.

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-

will request all rows that sorts after the one having “John's Blog” as blog_tile and ‘2012-01-01’ for posted_at in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their blog_title > 'John''s Blog', which wouldn’t be the case for:

SELECT * FROM posts WHERE userid='john doe' AND blog_title > 'John''s Blog' AND posted_at > '2012-01-01'
-

The tuple notation may also be used for IN clauses on CLUSTERING COLUMNS:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01), ('Extreme Chess', '2014-06-01'))
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the map keys.

<order-by>

The ORDER BY option allows to select the order of the returned results. It takes as argument a list of column names along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being equivalent to ASC). Currently the possible orderings are limited (which depends on the table CLUSTERING ORDER ):

  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order induced by the clustering columns and the reverse of that one.
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.

LIMIT

The LIMIT option to a SELECT statement limits the number of rows returned by a query.

ALLOW FILTERING

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of data returned by the query (which can be controlled through LIMIT).

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query that selects a handful of records may exhibit performance that depends on the total amount of data stored in the cluster.

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on it) and country of residence:

CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-

Then the following queries are valid:

SELECT * FROM users;
-SELECT firstname, lastname FROM users WHERE birth_year = 1981;
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile stored). Of course, both query may return very large result set in practice, but the amount of data returned can always be controlled by adding a LIMIT.

However, the following query will be rejected:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR';
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW FILTERING and so the following query is valid:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-

Database Roles

CREATE ROLE

Syntax:

<create-role-stmt> ::= CREATE ROLE ( IF NOT EXISTS )? <identifier> ( WITH <option> ( AND <option> )* )?
-
-<option> ::= PASSWORD = <string>
-           | LOGIN = <boolean>
-           | SUPERUSER = <boolean>
-           | OPTIONS = <map_literal>
-

Sample:

CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

Permissions on database resources are granted to roles; types of resources include keyspaces, tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is not.

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that connection, the client will acquire any roles and privileges granted to that role.

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra is pluggable and custom implementations may support only a subset of the listed options.

Role names should be quoted if they contain non-alphanumeric characters.

Setting credentials for internal authentication

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single quotation marks.
If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD clause is not necessary.

Creating a role conditionally

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. If the option is used and the role exists, the statement is a no-op.

CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-

ALTER ROLE

Syntax:

<alter-role-stmt> ::= ALTER ROLE <identifier> ( WITH <option> ( AND <option> )* )?
-
-<option> ::= PASSWORD = <string>
-           | LOGIN = <boolean>
-           | SUPERUSER = <boolean>
-           | OPTIONS = <map_literal>
-

Sample:

ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-

Conditions on executing ALTER ROLE statements:

  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • To modify properties of a role, the client must be granted ALTER permission on that role

DROP ROLE

Syntax:

<drop-role-stmt> ::= DROP ROLE ( IF EXISTS )? <identifier>
-

Sample:

DROP ROLE alice;
-DROP ROLE IF EXISTS bob;
-

DROP ROLE requires the client to have DROP permission on the role in question. In addition, client may not DROP the role with which it identified at login. Finaly, only a client with SUPERUSER status may DROP another SUPERUSER role.
Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is used. If the option is used and the role does not exist the statement is a no-op.

GRANT ROLE

Syntax:

<grant-role-stmt> ::= GRANT <identifier> TO <identifier>
-

Sample:

GRANT report_writer TO alice;
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also acquired by alice.
Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in error conditions:

GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-

REVOKE ROLE

Syntax:

<revoke-role-stmt> ::= REVOKE <identifier> FROM <identifier>
-

Sample:

REVOKE report_writer FROM alice;
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the report_writer role are also revoked.

LIST ROLES

Syntax:

<list-roles-stmt> ::= LIST ROLES ( OF <identifier> )? ( NORECURSIVE )?
-

Sample:

LIST ROLES;
-

Return all known roles in the system, this requires DESCRIBE permission on the database roles resource.

LIST ROLES OF @alice@;
-

Enumerate all roles granted to alice, including those transitively aquired.

LIST ROLES OF @bob@ NORECURSIVE
-

List all roles directly granted to bob.

CREATE USER

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a USER. For backward compatibility, the legacy syntax has been preserved with USER centric statments becoming synonyms for the ROLE based equivalents.

Syntax:

<create-user-statement> ::= CREATE USER ( IF NOT EXISTS )? <identifier> ( WITH PASSWORD <string> )? (<option>)?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

Sample:

CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of statements are equivalent:

CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-

ALTER USER

Syntax:

<alter-user-statement> ::= ALTER USER <identifier> ( WITH PASSWORD <string> )? ( <option> )?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-

DROP USER

Syntax:

<drop-user-stmt> ::= DROP USER ( IF EXISTS )? <identifier>
-

Sample:

DROP USER alice;
-DROP USER IF EXISTS bob;
-

LIST USERS

Syntax:

<list-users-stmt> ::= LIST USERS;
-

Sample:

LIST USERS;
-

This statement is equivalent to

LIST ROLES;
-

but only roles with the LOGIN privilege are included in the output.

Data Control

Permissions

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type is modelled hierarchically:

  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> TABLE
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • Resources representing roles have the structure ALL ROLES -> ROLE

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a resource higher up the chain automatically grants that same permission on all resources lower down. For example, granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It is also possible to grant permissions on all functions scoped to a particular keyspace.

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established following permissions changes.

The full set of available permissions is:

  • CREATE
  • ALTER
  • DROP
  • SELECT
  • MODIFY
  • AUTHORIZE
  • DESCRIBE
  • EXECUTE

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context of functions; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT a permission on resource to which it cannot be applied results in an error response. The following illustrates which permissions can be granted on which types of resource, and which statements are enabled by that permission.

permission resource operations
CREATE ALL KEYSPACES CREATE KEYSPACE
CREATE TABLE in any keyspace
CREATE KEYSPACE CREATE TABLE in specified keyspace
CREATE ALL FUNCTIONS CREATE FUNCTION in any keyspace
CREATE AGGREGATE in any keyspace
CREATE ALL FUNCTIONS IN KEYSPACE CREATE FUNCTION in keyspace
CREATE AGGREGATE in keyspace
CREATE ALL ROLES CREATE ROLE
ALTER ALL KEYSPACES ALTER KEYSPACE
ALTER TABLE in any keyspace
ALTER KEYSPACE ALTER KEYSPACE
ALTER TABLE in keyspace
ALTER TABLE ALTER TABLE
ALTER ALL FUNCTIONS CREATE FUNCTION replacing any existing
CREATE AGGREGATE replacing any existing
ALTER ALL FUNCTIONS IN KEYSPACE CREATE FUNCTION replacing existing in keyspace
CREATE AGGREGATE replacing any existing in keyspace
ALTER FUNCTION CREATE FUNCTION replacing existing
CREATE AGGREGATE replacing existing
ALTER ALL ROLES ALTER ROLE on any role
ALTER ROLE ALTER ROLE
DROP ALL KEYSPACES DROP KEYSPACE
DROP TABLE in any keyspace
DROP KEYSPACE DROP TABLE in specified keyspace
DROP TABLE DROP TABLE
DROP ALL FUNCTIONS DROP FUNCTION in any keyspace
DROP AGGREGATE in any existing
DROP ALL FUNCTIONS IN KEYSPACE DROP FUNCTION in keyspace
DROP AGGREGATE in existing
DROP FUNCTION DROP FUNCTION
DROP ALL ROLES DROP ROLE on any role
DROP ROLE DROP ROLE
SELECT ALL KEYSPACES SELECT on any table
SELECT KEYSPACE SELECT on any table in keyspace
SELECT TABLE SELECT on specified table
MODIFY ALL KEYSPACES INSERT on any table
UPDATE on any table
DELETE on any table
TRUNCATE on any table
MODIFY KEYSPACE INSERT on any table in keyspace
UPDATE on any table in keyspace
== @DELETE@ on any table in keyspace ==
TRUNCATE on any table in keyspace
MODIFY TABLE INSERT
UPDATE
DELETE
TRUNCATE
AUTHORIZE ALL KEYSPACES GRANT PERMISSION on any table
REVOKE PERMISSION on any table
AUTHORIZE KEYSPACE GRANT PERMISSION on table in keyspace
REVOKE PERMISSION on table in keyspace
AUTHORIZE TABLE GRANT PERMISSION
REVOKE PERMISSION
AUTHORIZE ALL FUNCTIONS GRANT PERMISSION on any function
REVOKE PERMISSION on any function
AUTHORIZE ALL FUNCTIONS IN KEYSPACE GRANT PERMISSION in keyspace
REVOKE PERMISSION in keyspace
AUTHORIZE ALL FUNCTIONS IN KEYSPACE GRANT PERMISSION in keyspace
REVOKE PERMISSION in keyspace
AUTHORIZE FUNCTION GRANT PERMISSION
REVOKE PERMISSION
AUTHORIZE ALL ROLES GRANT ROLE grant any role
REVOKE ROLE revoke any role
AUTHORIZE ROLES GRANT ROLE grant role
REVOKE ROLE revoke role
DESCRIBE ALL ROLES LIST ROLES all roles or only roles granted to another, specified role
EXECUTE ALL FUNCTIONS SELECT, INSERT, UPDATE using any function
use of any function in CREATE AGGREGATE
EXECUTE ALL FUNCTIONS IN KEYSPACE SELECT, INSERT, UPDATE using any function in keyspace
use of any function in keyspace in CREATE AGGREGATE
EXECUTE FUNCTION SELECT, INSERT, UPDATE using function
use of function in CREATE AGGREGATE

GRANT PERMISSION

Syntax:

<grant-permission-stmt> ::= GRANT ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> TO <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

GRANT SELECT ON ALL KEYSPACES TO data_reader;
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all keyspaces

GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE and TRUNCATE queries on all tables in the keyspace1 keyspace

GRANT DROP ON keyspace1.table1 TO schema_owner;
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1.

GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries which use the function keyspace1.user_function( int )

GRANT DESCRIBE ON ALL ROLES TO role_admin;
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST ROLES statement

GRANT ALL

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target resource.

Automatic Granting

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is automatically granted all applicable permissions on the new resource.

REVOKE PERMISSION

Syntax:

<revoke-permission-stmt> ::= REVOKE ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> FROM <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-

LIST PERMISSIONS

Syntax:

<list-permissions-stmt> ::= LIST ( ALL ( PERMISSIONS )? | <permission> ) 
-                                 ( ON <resource> )? 
-                                 ( OF <identifier> ( NORECURSIVE )? )?
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

LIST ALL PERMISSIONS OF alice;
-

Show all permissions granted to alice, including those acquired transitively from any other roles.

LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. For example, should bob have ALTER permission on keyspace1, that would be included in the results of this query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to bob or one of bob's roles.

LIST SELECT PERMISSIONS OF carlos;
-

Show any permissions granted to carlos or any of carlos's roles, limited to SELECT permissions on any resource.

Data Types

CQL supports a rich set of data types for columns defined in a table, including collection types. On top of those native and collection types, users can also provide custom types (through a JAVA class extending AbstractType loadable by Cassandra). The syntax of types is thus:

<type> ::= <native-type>
-         | <collection-type>
-         | <tuple-type>
-         | <string>       // Used for custom types. The fully-qualified name of a JAVA class
-
-<native-type> ::= ascii
-                | bigint
-                | blob
-                | boolean
-                | counter
-                | date
-                | decimal
-                | double
-                | float
-                | inet
-                | int
-                | smallint
-                | text
-                | time
-                | timestamp
-                | timeuuid
-                | tinyint
-                | uuid
-                | varchar
-                | varint
-
-<collection-type> ::= list '<' <native-type> '>'
-                    | set  '<' <native-type> '>'
-                    | map  '<' <native-type> ',' <native-type> '>'
-<tuple-type> ::= tuple '<' <type> (',' <type>)* '>'
-

Note that the native types are keywords and as such are case-insensitive. They are however not reserved ones.

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

type constants supporteddescription
ascii strings ASCII character string
bigint integers 64-bit signed long
blob blobs Arbitrary bytes (no validation)
boolean booleans true or false
counter integers Counter column (64-bit signed value). See Counters for details
date integers, strings A date (with no corresponding time value). See Working with dates below for more information.
decimal integers, floats Variable-precision decimal
double integers 64-bit IEEE-754 floating point
float integers, floats 32-bit IEEE-754 floating point
inet strings An IP address. It can be either 4 bytes long (IPv4) or 16 bytes long (IPv6). There is no inet constant, IP address should be inputed as strings
int integers 32-bit signed int
smallint integers 16-bit signed int
text strings UTF8 encoded string
time integers, strings A time with nanosecond precision. See Working with time below for more information.
timestamp integers, strings A timestamp. Strings constant are allow to input timestamps as dates, see Working with timestamps below for more information.
timeuuid uuids Type 1 UUID. This is generally used as a “conflict-free” timestamp. Also see the functions on Timeuuid
tinyint integers 8-bit signed int
uuid uuids Type 1 or type 4 UUID
varchar strings UTF8 encoded string
varint integers Arbitrary-precision integer

For more information on how to use the collection types, see the Working with collections section below.

Working with timestamps

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the standard base time known as “the epoch”: January 1 1970 at 00:00:00 GMT.

Timestamp can be input in CQL as simple long integers, giving the number of milliseconds since the epoch, as defined above.

They can also be input as string literals in any of the following ISO 8601 formats, each representing the time and date Mar 2, 2011, at 04:05:00 AM, GMT.:

  • 2011-02-03 04:05+0000
  • 2011-02-03 04:05:00+0000
  • 2011-02-03 04:05:00.000+0000
  • 2011-02-03T04:05+0000
  • 2011-02-03T04:05:00+0000
  • 2011-02-03T04:05:00.000+0000

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is -0800. The time zone may be omitted if desired— the date will be interpreted as being in the time zone under which the coordinating Cassandra node is configured.

  • 2011-02-03 04:05
  • 2011-02-03 04:05:00
  • 2011-02-03 04:05:00.000
  • 2011-02-03T04:05
  • 2011-02-03T04:05:00
  • 2011-02-03T04:05:00.000

There are clear difficulties inherent in relying on the time zone configuration being as expected, though, so it is recommended that the time zone always be specified for timestamps when feasible.

The time of day may also be omitted, if the date is the only piece that matters:

  • 2011-02-03
  • 2011-02-03+0000

In that case, the time of day will default to 00:00:00, in the specified or default time zone.

Working with dates

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at the center of the range (2^31). Epoch is January 1st, 1970

A date can be input in CQL as an unsigned integer as defined above.

They can also be input as string literals in the following format:

  • 2014-01-01

Working with time

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

A time can be input in CQL as simple long integers, giving the number of nanoseconds since midnight.

They can also be input as string literals in any of the following formats:

  • 08:12:54
  • 08:12:54.123
  • 08:12:54.123456
  • 08:12:54.123456789

Counters

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed integer and on which 2 operations are supported: incrementation and decrementation (see UPDATE for syntax). Note the value of a counter cannot be set. A counter doesn’t exist until first incremented/decremented, and the first incrementation/decrementation is made as if the previous value was 0. Deletion of counter columns is supported but have some limitations (see the Cassandra Wiki for more information).

The use of the counter type is limited in the following way:

  • It cannot be used for column that is part of the PRIMARY KEY of a table.
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside the PRIMARY KEY have the counter type, or none of them have it.

Working with collections

Noteworthy characteristics

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all the messages sent by a given user”, “events registered by a sensor”, ...), then collections are not appropriate anymore and a specific table (with clustering columns) should be used. Concretely, collections have the following limitations:

  • Collections are always read in their entirety (and reading one is not paged internally).
  • Collections cannot have more than 65535 elements. More precisely, while it may be possible to insert more than 65535 elements, it is not possible to read more than the 65535 first elements (see CASSANDRA-5428 for details).
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do (see the section on lists below for details). It is thus advised to prefer sets over lists when possible.

Please note that while some of those limitations may or may not be loosen in the future, the general rule that collections are for denormalizing small amount of data is meant to stay.

Maps

A map is a typed set of key-value pairs, where keys are unique. Furthermore, note that the map are internally sorted by their keys and will thus always be returned in that order. To create a column of type map, use the map keyword suffixed with comma-separated key and value types, enclosed in angle brackets. For example:

CREATE TABLE users (
-    id text PRIMARY KEY,
-    given text,
-    surname text,
-    favs map<text, text>   // A map of text keys, and text values
-)
-

Writing map data is accomplished with a JSON-inspired syntax. To write a record using INSERT, specify the entire map as a JSON-style associative array. Note: This form will always replace the entire map.

// Inserting (or Updating)
-INSERT INTO users (id, given, surname, favs)
-           VALUES ('jsmith', 'John', 'Smith', { 'fruit' : 'apple', 'band' : 'Beatles' })
-

Adding or updating key-values of a (potentially) existing map can be accomplished either by subscripting the map column in an UPDATE statement or by adding a new map literal:

// Updating (or inserting)
-UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'
-UPDATE users SET favs = favs +  { 'movie' : 'Cassablanca' } WHERE id = 'jsmith'
-

Note that TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly inserted/updated values. In other words,

// Updating (or inserting)
-UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

Deleting a map record is done with:

DELETE favs['author'] FROM users WHERE id = 'jsmith'
-

Sets

A set is a typed collection of unique values. Sets are ordered by their values. To create a column of type set, use the set keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    date timestamp,
-    tags set<text>
-);
-

Writing a set is accomplished by comma separating the set values, and enclosing them in curly braces. Note: An INSERT will always replace the entire set.

INSERT INTO images (name, owner, date, tags)
-            VALUES ('cat.jpg', 'jsmith', 'now', { 'kitten', 'cat', 'pet' });
-

Adding and removing values of a set can be accomplished with an UPDATE by adding/removing new set values to an existing set column.

UPDATE images SET tags = tags + { 'cute', 'cuddly' } WHERE name = 'cat.jpg';
-UPDATE images SET tags = tags - { 'lame' } WHERE name = 'cat.jpg';
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Lists

A list is a typed collection of non-unique values where elements are ordered by there position in the list. To create a column of type list, use the list keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int>
-)
-

Do note that as explained below, lists have some limitations and performance considerations to take into account, and it is advised to prefer sets over lists when this is possible.

Writing list data is accomplished with a JSON-style syntax. To write a record using INSERT, specify the entire list as a JSON array. Note: An INSERT will always replace the entire list.

INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-

Adding (appending or prepending) values to a list can be accomplished by adding a new JSON-style array to an existing list column.

UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
-UPDATE plays SET players = 5, scores = [ 12 ] + scores WHERE id = '123-afde';
-

It should be noted that append and prepend are not idempotent operations. This means that if during an append or a prepend the operation timeout, it is not always safe to retry the operation (as this could result in the record appended or prepended twice).

Lists also provides the following operation: setting an element by its position in the list, removing an element by its position in the list and remove all the occurrence of a given value in the list. However, and contrarily to all the other collection operations, these three operations induce an internal read before the update, and will thus typically have slower performance characteristics. Those operations have the following syntax:

UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';                // sets the 2nd element of scores to 7 (raises an error is scores has less than 2 elements)
-DELETE scores[1] FROM plays WHERE id = '123-afde';                   // deletes the 2nd element of scores (raises an error is scores has less than 2 elements)
-UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; // removes all occurrences of 12 and 21 from scores
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Functions

CQL3 distinguishes between built-in functions (so called ‘native functions’) and user-defined functions. CQL3 includes several native functions, described below:

Token

The token function allows to compute the token for a given partition key. The exact signature of the token function depends on the table concerned and of the partitioner used by the cluster.

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on the partitioner in use:

  • For Murmur3Partitioner, the return type is bigint.
  • For RandomPartitioner, the return type is varint.
  • For ByteOrderedPartitioner, the return type is blob.

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by

CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-    ...
-)
-

then the token function will take a single argument of type text (in that case, the partition key is userid (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be bigint.

Uuid

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.

Timeuuid functions

now

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in WHERE clauses. For instance, a query of the form

SELECT * FROM myTable WHERE t = now()
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

minTimeuuid and maxTimeuuid

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp or a date string ) and return a fake timeuuid corresponding to the smallest (resp. biggest) possible timeuuid having for timestamp t. So for instance:

SELECT * FROM myTable WHERE t > maxTimeuuid('2013-01-01 00:05+0000') AND t < minTimeuuid('2013-02-02 10:00+0000')
-

will select all rows where the timeuuid column t is strictly older than ‘2013-01-01 00:05+0000’ but strictly younger than ‘2013-02-02 10:00+0000’. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > maxTimeuuid('2013-01-01 00:05+0000').

Warning: We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect the Time-Based UUID generation process specified by the RFC 4122. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

Time conversion functions

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native type.

function name input type description
toDate timeuuid Converts the timeuuid argument into a date type
toDate timestamp Converts the timestamp argument into a date type
toTimestamp timeuuid Converts the timeuuid argument into a timestamp type
toTimestamp date Converts the date argument into a timestamp type
toUnixTimestamp timeuuid Converts the timeuuid argument into a bigInt raw value
toUnixTimestamp timestamp Converts the timestamp argument into a bigInt raw value
toUnixTimestamp date Converts the date argument into a bigInt raw value
dateOf timeuuid Similar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOf timeuuid Similar to toUnixTimestamp(timeuuid) (DEPRECATED)

Blob conversion functions

A number of functions are provided to “convert” the native types into binary data (blob). For every <native-type> type supported by CQL3 (a notable exceptions is blob, for obvious reasons), the function typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is 0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

Aggregates

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.
If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.

CQL3 distinguishes between built-in aggregates (so called ‘native aggregates’) and user-defined aggregates. CQL3 includes several native aggregates, described below:

Count

The count function can be used to count the rows returned by a query. Example:

SELECT COUNT(*) FROM plays;
-SELECT COUNT(1) FROM plays;
-

It also can be used to count the non null value of a given column. Example:

SELECT COUNT(scores) FROM plays;
-

Max and Min

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a given column.

SELECT MIN(players), MAX(players) FROM plays WHERE game = 'quake';
-

Sum

The sum function can be used to sum up all the values returned by a query for a given column.

SELECT SUM(players) FROM plays;
-

Avg

The avg function can be used to compute the average of all the values returned by a query for a given column.

SELECT AVG(players) FROM plays;
-

User-Defined Functions

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and Scala) can be added by adding a JAR to the classpath.

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of exceptions. An exception during function execution will result in the entire statement failing.

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the documentation of the Java Driver for details on handling tuple types and user-defined types.

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

CREATE FUNCTION some_function ( arg int )
-  RETURNS NULL ON NULL INPUT
-  RETURNS int
-  LANGUAGE java
-  AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-

CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen<custom_type> )
-  RETURNS NULL ON NULL INPUT
-  RETURNS text
-  LANGUAGE java
-  AS $$ return udtarg.getString("txt"); $$;
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

See CREATE FUNCTION and DROP FUNCTION.

User-Defined Aggregates

User-defined aggregates allow creation of custom aggregate functions using UDFs. Common examples of aggregate functions are count, min, and max.

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first argument of the state function must have type STYPE. The remaining arguments of the state function must match the types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last state value as its argument.

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final function (since the overload can appear after creation of the aggregate).

User-defined aggregates can be used in SELECT statement.

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE statement):

CREATE OR REPLACE FUNCTION averageState ( state tuple<int,bigint>, val int )
-  CALLED ON NULL INPUT
-  RETURNS tuple<int,bigint>
-  LANGUAGE java
-  AS '
-    if (val != null) {
-      state.setInt(0, state.getInt(0)+1);
-      state.setLong(1, state.getLong(1)+val.intValue());
-    }
-    return state;
-  ';
-
-CREATE OR REPLACE FUNCTION averageFinal ( state tuple<int,bigint> )
-  CALLED ON NULL INPUT
-  RETURNS double
-  LANGUAGE java
-  AS '
-    double r = 0;
-    if (state.getInt(0) == 0) return null;
-    r = state.getLong(1);
-    r /= state.getInt(0);
-    return Double.valueOf(r);
-  ';
-
-CREATE OR REPLACE AGGREGATE average ( int )
-  SFUNC averageState
-  STYPE tuple<int,bigint>
-  FINALFUNC averageFinal
-  INITCOND (0, 0);
-
-CREATE TABLE atable (
-  pk int PRIMARY KEY,
-  val int);
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-SELECT average(val) FROM atable;
-

See CREATE AGGREGATE and DROP AGGREGATE.

JSON Support

Cassandra 2.2 introduces JSON support to SELECT and INSERT statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply provides a convenient way to work with JSON documents.

SELECT JSON

With SELECT statements, the new JSON keyword can be used to return each row as a single JSON encoded map. The remainder of the SELECT statment behavior is the same.

The result map keys are the same as the column names in a normal result set. For example, a statement like "SELECT JSON a, ttl(b) FROM ..." would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with double quotes. For example, "SELECT JSON myColumn FROM ..." would result in a map key "\"myColumn\"" (note the escaped quotes).

The map values will JSON-encoded representations (as described below) of the result set values.

INSERT JSON

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a table with two columns named “myKey” and “value”, you would do the following:

INSERT INTO mytable JSON '{"\"myKey\"": 0, "value": 0}'
-

Any columns which are ommitted from the JSON map will be defaulted to a NULL value (which will result in a tombstone being created).

JSON Encoding of Cassandra Data Types

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will also accept string representations matching the CQL literal format for all single-field types. For example, floats, ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string representation of the collection.

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and fromJson()):

type formats accepted return format notes
ascii string string Uses JSON’s \u character escape
bigint integer, string integer String must be valid 64 bit integer
blob string string String should be 0x followed by an even number of hex digits
boolean boolean, string boolean String must be “true” or "false"
date string string Date in format YYYY-MM-DD, timezone UTC
decimal integer, float, stringfloat May exceed 32 or 64-bit IEEE-754 floating point precision in client-side decoder
double integer, float, stringfloat String must be valid integer or float
float integer, float, stringfloat String must be valid integer or float
inet string string IPv4 or IPv6 address
int integer, string integer String must be valid 32 bit integer
list list, string list Uses JSON’s native list representation
map map, string map Uses JSON’s native map representation
smallint integer, string integer String must be valid 16 bit integer
set list, string list Uses JSON’s native list representation
text string string Uses JSON’s \u character escape
time string string Time of day in format HH-MM-SS[.fffffffff]
timestampinteger, string string A timestamp. Strings constant are allow to input timestamps as dates, see Working with dates below for more information. Datestamps with format YYYY-MM-DD HH:MM:SS.SSS are returned.
timeuuid string string Type 1 UUID. See Constants for the UUID format
tinyint integer, string integer String must be valid 8 bit integer
tuple list, string list Uses JSON’s native list representation
UDT map, string map Uses JSON’s native map representation with field names as keys
uuid string string See Constants for the UUID format
varchar string string Uses JSON’s \u character escape
varint integer, string integer Variable length; may overflow 32 or 64 bit integers in client-side decoder

The fromJson() Function

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

The toJson() Function

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used in the selection clause of a SELECT statement.

Appendix A: CQL Keywords

CQL distinguishes between reserved and non-reserved keywords. Reserved keywords cannot be used as identifier, they are truly reserved for the language (but one can enclose a reserved keyword by double-quotes to use it as an identifier). Non-reserved keywords however only have a specific meaning in certain context but can used as identifer otherwise. The only raison d'être of these non-reserved keywords is convenience: some keyword are non-reserved when it was always easy for the parser to decide whether they were used as keywords or not.

Keyword Reserved?
ADD yes
AGGREGATE no
ALL no
ALLOW yes
ALTER yes
AND yes
APPLY yes
AS no
ASC yes
ASCII no
AUTHORIZE yes
BATCH yes
BEGIN yes
BIGINT no
BLOB no
BOOLEAN no
BY yes
CALLED no
CLUSTERING no
COLUMNFAMILY yes
COMPACT no
CONTAINS no
COUNT no
COUNTER no
CREATE yes
CUSTOM no
DATE no
DECIMAL no
DELETE yes
DESC yes
DESCRIBE yes
DISTINCT no
DOUBLE no
DROP yes
ENTRIES yes
EXECUTE yes
EXISTS no
FILTERING no
FINALFUNC no
FLOAT no
FROM yes
FROZEN no
FULL yes
FUNCTION no
FUNCTIONS no
GRANT yes
IF yes
IN yes
INDEX yes
INET no
INFINITY yes
INITCOND no
INPUT no
INSERT yes
INT no
INTO yes
JSON no
KEY no
KEYS no
KEYSPACE yes
KEYSPACES no
LANGUAGE no
LIMIT yes
LIST no
LOGIN no
MAP no
MODIFY yes
NAN yes
NOLOGIN no
NORECURSIVE yes
NOSUPERUSER no
NOT yes
NULL yes
OF yes
ON yes
OPTIONS no
OR yes
ORDER yes
PASSWORD no
PERMISSION no
PERMISSIONS no
PRIMARY yes
RENAME yes
REPLACE yes
RETURNS no
REVOKE yes
ROLE no
ROLES no
SCHEMA yes
SELECT yes
SET yes
SFUNC no
SMALLINT no
STATIC no
STORAGE no
STYPE no
SUPERUSER no
TABLE yes
TEXT no
TIME no
TIMESTAMP no
TIMEUUID no
TINYINT no
TO yes
TOKEN yes
TRIGGER no
TRUNCATE yes
TTL no
TUPLE no
TYPE no
UNLOGGED yes
UPDATE yes
USE yes
USER no
USERS no
USING yes
UUID no
VALUES no
VARCHAR no
VARINT no
WHERE yes
WITH yes
WRITETIME no

Appendix B: CQL Reserved Types

The following type names are not currently used by CQL, but are reserved for potential future use. User-defined types may not use reserved type names as their name.

type
bitstring
byte
complex
date
enum
interval
macaddr
smallint

Changes

The following describes the changes in each version of CQL.

3.3.1

  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X

3.3.0

  • Adds new aggregates
  • User-defined functions are now supported through CREATE FUNCTION and DROP FUNCTION.
  • User-defined aggregates are now supported through CREATE AGGREGATE and DROP AGGREGATE.
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • Introduces Roles to supercede user based authentication and access control
  • Date and Time data types have been added
  • JSON support has been added
  • Tinyint and Smallint data types have been added
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf. See Time conversion functions

3.2.0

  • User-defined types are now supported through CREATE TYPE, ALTER TYPE, and DROP TYPE
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the keys() function
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • Tuple types were added to hold fixed-length sets of typed positional fields (see the section on types )
  • DROP INDEX now supports optionally specifying a keyspace

3.1.7

  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations of clustering columns. See SELECT WHERE clauses.
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statmenets, respectively.

3.1.6

  • A new uuid method has been added.
  • Support for DELETE ... IF EXISTS syntax.

3.1.5

3.1.4

3.1.3

  • Millisecond precision formats have been added to the timestamp parser (see working with dates ).

3.1.2

  • NaN and Infinity has been added as valid float contants. They are now reserved keywords. In the unlikely case you we using them as a column identifier (or keyspace/table one), you will noew need to double quote them (see quote identifiers ).

3.1.1

  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable will be a list of whatever type c is.
  • It is now possible to use named bind variables (using :name instead of ?).

3.1.0

  • ALTER TABLE DROP option has been reenabled for CQL3 tables and has new semantics now: the space formerly used by dropped columns will now be eventually reclaimed (post-compaction). You should not readd previously dropped columns unless you use timestamps with microsecond precision (see CASSANDRA-3919 for more details).
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. See the section on select for details.
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. Similarly, DROP statements support a IF EXISTS condition.
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.

3.0.5

  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626).

3.0.4

  • Updated the syntax for custom secondary indexes.
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not correct (the order was not the one of the type of the partition key). Instead, the token method should always be used for range queries on the partition key (see WHERE clauses ).

3.0.3

3.0.2

  • Type validation for the constants has been fixed. For instance, the implementation used to allow '2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer the case, type validation of constants is now more strict. See the data types section for details on which constant is allowed for which type.
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow inputing blobs. Do note that while inputing blobs as strings constant is still supported by this version (to allow smoother transition to blob constant), it is now deprecated (in particular the data types section does not list strings constants as valid blobs) and will be removed by a future version. If you were using strings as blobs, you should thus update your client code ASAP to switch blob constants.
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is now also allowed in select clauses. See the section on functions for details.

3.0.1

  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help working with timeuuid: now, minTimeuuid, maxTimeuuid , dateOf and unixTimestampOf. See the section dedicated to these methods for more detail.
  • “Float constants”#constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.

Versioning

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no correlation between Cassandra release versions and the CQL language version.

versiondescription
Major The major version must be bumped when backward incompatible changes are introduced. This should rarely occur.
Minor Minor version increments occur when new, but backward compatible, functionality is introduced.
Patch The patch version is incremented when bugs are fixed.
\ No newline at end of file diff --git a/src/doc/old/CQL-3.0.html b/src/doc/old/CQL-3.0.html deleted file mode 100644 index a3395b7e0..000000000 --- a/src/doc/old/CQL-3.0.html +++ /dev/null @@ -1,670 +0,0 @@ -CQL

Cassandra Query Language (CQL) v3.4.0

  1. Cassandra Query Language (CQL) v3.4.0
    1. CQL Syntax
      1. Preamble
      2. Conventions
      3. Identifiers and keywords
      4. Constants
      5. Comments
      6. Statements
      7. Prepared Statement
    2. Data Definition
      1. CREATE KEYSPACE
      2. USE
      3. ALTER KEYSPACE
      4. DROP KEYSPACE
      5. CREATE TABLE
      6. ALTER TABLE
      7. DROP TABLE
      8. TRUNCATE
      9. CREATE INDEX
      10. DROP INDEX
      11. CREATE MATERIALIZED VIEW
      12. ALTER MATERIALIZED VIEW
      13. DROP MATERIALIZED VIEW
      14. CREATE TYPE
      15. ALTER TYPE
      16. DROP TYPE
      17. CREATE TRIGGER
      18. DROP TRIGGER
      19. CREATE FUNCTION
      20. DROP FUNCTION
      21. CREATE AGGREGATE
      22. DROP AGGREGATE
    3. Data Manipulation
      1. INSERT
      2. UPDATE
      3. DELETE
      4. BATCH
    4. Queries
      1. SELECT
    5. Database Roles
      1. CREATE ROLE
      2. ALTER ROLE
      3. DROP ROLE
      4. GRANT ROLE
      5. REVOKE ROLE
      6. CREATE USER
      7. ALTER USER
      8. DROP USER
      9. LIST USERS
    6. Data Control
      1. Permissions
      2. GRANT PERMISSION
      3. REVOKE PERMISSION
    7. Data Types
      1. Working with timestamps
      2. Working with dates
      3. Working with time
      4. Counters
      5. Working with collections
    8. Functions
      1. Token
      2. Uuid
      3. Timeuuid functions
      4. Time conversion functions
      5. Blob conversion functions
    9. Aggregates
      1. Count
      2. Max and Min
      3. Sum
      4. Avg
    10. User-Defined Functions
    11. User-Defined Aggregates
    12. JSON Support
      1. SELECT JSON
      2. INSERT JSON
      3. JSON Encoding of Cassandra Data Types
      4. The fromJson() Function
      5. The toJson() Function
    13. Appendix A: CQL Keywords
    14. Appendix B: CQL Reserved Types
    15. Changes
      1. 3.4.0
      2. 3.3.1
      3. 3.3.0
      4. 3.2.0
      5. 3.1.7
      6. 3.1.6
      7. 3.1.5
      8. 3.1.4
      9. 3.1.3
      10. 3.1.2
      11. 3.1.1
      12. 3.1.0
      13. 3.0.5
      14. 3.0.4
      15. 3.0.3
      16. 3.0.2
      17. 3.0.1
    16. Versioning

CQL Syntax

Preamble

This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the changes section provides the diff between the different versions of CQL v3.

CQL v3 offers a model very close to SQL in the sense that data is put in tables containing rows of columns. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do not refer to the concept of rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.

Conventions

To aid in specifying the CQL syntax, we will use the following conventions in this document:

  • Language rules will be given in a BNF -like notation:
<start> ::= TERMINAL <non-terminal1> <non-terminal1>
-
  • Nonterminal symbols will have <angle brackets>.
  • As additional shortcut notations to BNF, we’ll use traditional regular expression’s symbols (?, + and *) to signify that a given symbol is optional and/or can be repeated. We’ll also allow parentheses to group symbols and the [<characters>] notation to represent any one of <characters>.
  • The grammar is provided for documentation purposes and leave some minor details out. For instance, the last column definition in a CREATE TABLE statement is optional but supported if present even though the provided grammar in this document suggest it is not supported.
  • Sample code will be provided in a code block:
SELECT sample_usage FROM cql;
-
  • References to keywords or pieces of CQL code in running text will be shown in a fixed-width font.

Identifiers and keywords

The CQL language uses identifiers (or names) to identify tables, columns and other objects. An identifier is a token matching the regular expression [a-zA-Z][a-zA-Z0-9_]*.

A number of such identifiers, like SELECT or WITH, are keywords. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in Appendix A.

Identifiers and (unquoted) keywords are case insensitive. Thus SELECT is the same than select or sElEcT, and myId is the same than myid or MYID for instance. A convention often used (in particular by the samples of this documentation) is to use upper case for keywords and lower case for other identifiers.

There is a second kind of identifiers called quoted identifiers defined by enclosing an arbitrary sequence of characters in double-quotes("). Quoted identifiers are never keywords. Thus "select" is not a reserved keyword and can be used to refer to a column, while select would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive ("My Quoted Id" is different from "my quoted id"). A fully lowercase quoted identifier that matches [a-zA-Z][a-zA-Z0-9_]* is equivalent to the unquoted identifier obtained by removing the double-quote (so "myid" is equivalent to myid and to myId but different from "myId"). Inside a quoted identifier, the double-quote character can be repeated to escape it, so "foo "" bar" is a valid identifier.

Warning: quoted identifiers allows to declare columns with arbitrary names, and those can sometime clash with specific names used by the server. For instance, when using conditional update, the server will respond with a result-set containing a special result named "[applied]". If you’ve declared a column with such a name, this could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like "[applied]") and any name that looks like a function call (like "f(x)").

Constants

CQL defines the following kind of constants: strings, integers, floats, booleans, uuids and blobs:

  • A string constant is an arbitrary sequence of characters characters enclosed by single-quote('). One can include a single-quote in a string by repeating it, e.g. 'It''s raining today'. Those are not to be confused with quoted identifiers that use double-quotes.
  • An integer constant is defined by '-'?[0-9]+.
  • A float constant is defined by '-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?. On top of that, NaN and Infinity are also float constants.
  • A boolean constant is either true or false up to case-insensitivity (i.e. True is a valid boolean constant).
  • A UUID constant is defined by hex{8}-hex{4}-hex{4}-hex{4}-hex{12} where hex is an hexadecimal character, e.g. [0-9a-fA-F] and {4} is the number of such characters.
  • A blob constant is an hexadecimal number defined by 0[xX](hex)+ where hex is an hexadecimal character, e.g. [0-9a-fA-F].

For how these constants are typed, see the data types section.

Comments

A comment in CQL is a line beginning by either double dashes (--) or double slash (//).

Multi-line comments are also supported through enclosure within /* and */ (but nesting is not supported).

-- This is a comment
-// This is a comment too
-/* This is
-   a multi-line comment */
-

Statements

CQL consists of statements. As in SQL, these statements can be divided in 3 categories:

  • Data definition statements, that allow to set and change the way data is stored.
  • Data manipulation statements, that allow to change data
  • Queries, to look up data

All statements end with a semicolon (;) but that semicolon can be omitted when dealing with a single statement. The supported statements are described in the following sections. When describing the grammar of said statements, we will reuse the non-terminal symbols defined below:

<identifier> ::= any quoted or unquoted identifier, excluding reserved keywords
- <tablename> ::= (<identifier> '.')? <identifier>
-
-    <string> ::= a string constant
-   <integer> ::= an integer constant
-     <float> ::= a float constant
-    <number> ::= <integer> | <float>
-      <uuid> ::= a uuid constant
-   <boolean> ::= a boolean constant
-       <hex> ::= a blob constant
-
-  <constant> ::= <string>
-               | <number>
-               | <uuid>
-               | <boolean>
-               | <hex>
-  <variable> ::= '?'
-               | ':' <identifier>
-      <term> ::= <constant>
-               | <collection-literal>
-               | <variable>
-               | <function> '(' (<term> (',' <term>)*)? ')'
-
-  <collection-literal> ::= <map-literal>
-                         | <set-literal>
-                         | <list-literal>
-         <map-literal> ::= '{' ( <term> ':' <term> ( ',' <term> ':' <term> )* )? '}'
-         <set-literal> ::= '{' ( <term> ( ',' <term> )* )? '}'
-        <list-literal> ::= '[' ( <term> ( ',' <term> )* )? ']'
-
-    <function> ::= <ident>
-
-  <properties> ::= <property> (AND <property>)*
-    <property> ::= <identifier> '=' ( <identifier> | <constant> | <map-literal> )
-


Please note that not every possible productions of the grammar above will be valid in practice. Most notably, <variable> and nested <collection-literal> are currently not allowed inside <collection-literal>.

A <variable> can be either anonymous (a question mark (?)) or named (an identifier preceded by :). Both declare a bind variables for prepared statements. The only difference between an anymous and a named variable is that a named one will be easier to refer to (how exactly depends on the client driver used).

The <properties> production is use by statement that create and alter keyspaces and tables. Each <property> is either a simple one, in which case it just has a value, or a map one, in which case it’s value is a map grouping sub-options. The following will refer to one or the other as the kind (simple or map) of the property.

A <tablename> will be used to identify a table. This is an identifier representing the table name that can be preceded by a keyspace name. The keyspace name, if provided, allow to identify a table in another keyspace than the currently active one (the currently active keyspace is set through the USE statement).

For supported <function>, see the section on functions.

Strings can be either enclosed with single quotes or two dollar characters. The second syntax has been introduced to allow strings that contain single quotes. Typical candidates for such strings are source code fragments for user-defined functions.

Sample:

  'some string value'
-
-  $$double-dollar string can contain single ' quotes$$
-

Prepared Statement

CQL supports prepared statements. Prepared statement is an optimization that allows to parse a query only once but execute it multiple times with different concrete values.

In a statement, each time a column value is expected (in the data manipulation and query statements), a <variable> (see above) can be used instead. A statement with bind variables must then be prepared. Once it has been prepared, it can executed by providing concrete values for the bind variables. The exact procedure to prepare a statement and execute a prepared statement depends on the CQL driver used and is beyond the scope of this document.

In addition to providing column values, bind markers may be used to provide values for LIMIT, TIMESTAMP, and TTL clauses. If anonymous bind markers are used, the names for the query parameters will be [limit], [timestamp], and [ttl], respectively.

Data Definition

CREATE KEYSPACE

Syntax:

<create-keyspace-stmt> ::= CREATE KEYSPACE (IF NOT EXISTS)? <identifier> WITH <properties>
-


Sample:

CREATE KEYSPACE Excelsior
-           WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3};
-
-CREATE KEYSPACE Excalibur
-           WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1' : 1, 'DC2' : 3}
-            AND durable_writes = false;
-


The CREATE KEYSPACE statement creates a new top-level keyspace. A keyspace is a namespace that defines a replication strategy and some options for a set of tables. Valid keyspaces names are identifiers composed exclusively of alphanumerical characters and whose length is lesser or equal to 32. Note that as identifiers, keyspace names are case insensitive: use a quoted identifier for case sensitive keyspace names.

The supported <properties> for CREATE KEYSPACE are:

name kind mandatory default description
replication map yes The replication strategy and options to use for the keyspace.
durable_writes simple no true Whether to use the commit log for updates on this keyspace (disable this option at your own risk!).

The replication <property> is mandatory. It must at least contains the 'class' sub-option which defines the replication strategy class to use. The rest of the sub-options depends on that replication strategy class. By default, Cassandra support the following 'class':

  • 'SimpleStrategy': A simple strategy that defines a simple replication factor for the whole cluster. The only sub-options supported is 'replication_factor' to define that replication factor and is mandatory.
  • 'NetworkTopologyStrategy': A replication strategy that allows to set the replication factor independently for each data-center. The rest of the sub-options are key-value pairs where each time the key is the name of a datacenter and the value the replication factor for that data-center.
  • 'OldNetworkTopologyStrategy': A legacy replication strategy. You should avoid this strategy for new keyspaces and prefer 'NetworkTopologyStrategy'.

Attempting to create an already existing keyspace will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the keyspace already exists.

USE

Syntax:

<use-stmt> ::= USE <identifier>
-

Sample:

USE myApp;
-

The USE statement takes an existing keyspace name as argument and set it as the per-connection current working keyspace. All subsequent keyspace-specific actions will be performed in the context of the selected keyspace, unless otherwise specified, until another USE statement is issued or the connection terminates.

ALTER KEYSPACE

Syntax:

<create-keyspace-stmt> ::= ALTER KEYSPACE <identifier> WITH <properties>
-


Sample:

ALTER KEYSPACE Excelsior
-          WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 4};
-
-


The ALTER KEYSPACE statement alters the properties of an existing keyspace. The supported <properties> are the same as for the CREATE KEYSPACE statement.

DROP KEYSPACE

Syntax:

<drop-keyspace-stmt> ::= DROP KEYSPACE ( IF EXISTS )? <identifier>
-

Sample:

DROP KEYSPACE myApp;
-

A DROP KEYSPACE statement results in the immediate, irreversible removal of an existing keyspace, including all column families in it, and all data contained in those column families.

If the keyspace does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TABLE

Syntax:

<create-table-stmt> ::= CREATE ( TABLE | COLUMNFAMILY ) ( IF NOT EXISTS )? <tablename>
-                          '(' <column-definition> ( ',' <column-definition> )* ')'
-                          ( WITH <option> ( AND <option>)* )?
-
-<column-definition> ::= <identifier> <type> ( STATIC )? ( PRIMARY KEY )?
-                      | PRIMARY KEY '(' <partition-key> ( ',' <identifier> )* ')'
-
-<partition-key> ::= <identifier>
-                  | '(' <identifier> (',' <identifier> )* ')'
-
-<option> ::= <property>
-           | COMPACT STORAGE
-           | CLUSTERING ORDER
-


Sample:

CREATE TABLE monkeySpecies (
-    species text PRIMARY KEY,
-    common_name text,
-    population varint,
-    average_size int
-) WITH comment='Important biological records'
-   AND read_repair_chance = 1.0;
-
-CREATE TABLE timeline (
-    userid uuid,
-    posted_month int,
-    posted_time uuid,
-    body text,
-    posted_by text,
-    PRIMARY KEY (userid, posted_month, posted_time)
-) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };
-


The CREATE TABLE statement creates a new table. Each such table is a set of rows (usually representing related entities) for which it defines a number of properties. A table is defined by a name, it defines the columns composing rows of the table and have a number of options. Note that the CREATE COLUMNFAMILY syntax is supported as an alias for CREATE TABLE (for historical reasons).

Attempting to create an already existing table will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the table already exists.

<tablename>

Valid table names are the same as valid keyspace names (up to 32 characters long alphanumerical identifiers). If the table name is provided alone, the table is created within the current keyspace (see USE), but if it is prefixed by an existing keyspace name (see <tablename> grammar), it is created in the specified keyspace (but does not change the current keyspace).

<column-definition>

A CREATE TABLE statement defines the columns that rows of the table can have. A column is defined by its name (an identifier) and its type (see the data types section for more details on allowed types and their properties).

Within a table, a row is uniquely identified by its PRIMARY KEY (or more simply the key), and hence all table definitions must define a PRIMARY KEY (and only one). A PRIMARY KEY is composed of one or more of the columns defined in the table. If the PRIMARY KEY is only one column, this can be specified directly after the column definition. Otherwise, it must be specified by following PRIMARY KEY by the comma-separated list of column names composing the key within parenthesis. Note that:

CREATE TABLE t (
-    k int PRIMARY KEY,
-    other text
-)
-

is equivalent to

CREATE TABLE t (
-    k int,
-    other text,
-    PRIMARY KEY (k)
-)
-

Partition key and clustering columns

In CQL, the order in which columns are defined for the PRIMARY KEY matters. The first column of the key is called the partition key. It has the property that all the rows sharing the same partition key (even across table in fact) are stored on the same physical node. Also, insertion/update/deletion on rows sharing the same partition key for a given table are performed atomically and in isolation. Note that it is possible to have a composite partition key, i.e. a partition key formed of multiple columns, using an extra set of parentheses to define which columns forms the partition key.

The remaining columns of the PRIMARY KEY definition, if any, are called __clustering columns. On a given physical node, rows for a given partition key are stored in the order induced by the clustering columns, making the retrieval of rows in that clustering order particularly efficient (see SELECT).

STATIC columns

Some columns can be declared as STATIC in a table definition. A column that is static will be “shared” by all the rows belonging to the same partition (having the same partition key). For instance, in:

CREATE TABLE test (
-    pk int,
-    t int,
-    v text,
-    s text static,
-    PRIMARY KEY (pk, t)
-);
-INSERT INTO test(pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
-INSERT INTO test(pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
-SELECT * FROM test WHERE pk=0 AND t=0;
-

the last query will return 'static1' as value for s, since s is static and thus the 2nd insertion modified this “shared” value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for pk), then the 2nd insertion would not have modified the value of s for the first row.

A few restrictions applies to when static columns are allowed:

  • tables with the COMPACT STORAGE option (see below) cannot have them
  • a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).
  • only non PRIMARY KEY columns can be static

<option>

The CREATE TABLE statement supports a number of options that controls the configuration of a new table. These options can be specified after the WITH keyword.

The first of these option is COMPACT STORAGE. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see www.datastax.com/dev/blog/thrift-to-cql3 for more details). The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table. Most notably, COMPACT STORAGE tables cannot have collections nor static columns and a COMPACT STORAGE table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the PRIMARY KEY definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, COMPACT STORAGE is not recommended outside of the backward compatibility reason evoked above.

Another option is CLUSTERING ORDER. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects what ORDER BY are allowed during SELECT.

Table creation supports the following other <property>:

option kind default description
comment simple none A free-form, human-readable comment.
read_repair_chance simple 0.1 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpose of read repairs.
dclocal_read_repair_chance simple 0 The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.
gc_grace_seconds simple 864000 Time to wait before garbage collecting tombstones (deletion markers).
bloom_filter_fp_chance simple 0.00075 The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)
default_time_to_live simple 0 The default expiration time (“TTL”) in seconds for a table.
compaction map see below Compaction options, see below.
compression map see below Compression options, see below.
caching map see below Caching options, see below.

Compaction options

The compaction property must at least define the 'class' sub-option, that defines the compaction strategy class to use. The default supported class are 'SizeTieredCompactionStrategy', 'LeveledCompactionStrategy' and 'DateTieredCompactionStrategy'. Custom strategy can be provided by specifying the full class name as a string constant. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:

option supported compaction strategy default description
enabled all true A boolean denoting whether compaction should be enabled or not.
tombstone_threshold all 0.2 A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones.
tombstone_compaction_interval all 1 day The minimum time to wait after an sstable creation time before considering it for “tombstone compaction”, where “tombstone compaction” is the compaction triggered if the sstable has more gcable tombstones than tombstone_threshold.
unchecked_tombstone_compaction all false Setting this to true enables more aggressive tombstone compactions – single sstable tombstone compactions will run without checking how likely it is that they will be successful.
min_sstable_size SizeTieredCompactionStrategy 50MB The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size. However, for small sizes, this would result in a bucketing that is too fine grained. min_sstable_size defines a size threshold (in bytes) below which all SSTables belong to one unique bucket
min_threshold SizeTieredCompactionStrategy 4 Minimum number of SSTables needed to start a minor compaction.
max_threshold SizeTieredCompactionStrategy 32 Maximum number of SSTables processed by one minor compaction.
bucket_low SizeTieredCompactionStrategy 0.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%)
bucket_high SizeTieredCompactionStrategy 1.5 Size tiered consider sstables to be within the same bucket if their size is within [average_size * bucket_low, average_size * bucket_high ] (i.e the default groups sstable whose sizes diverges by at most 50%).
sstable_size_in_mb LeveledCompactionStrategy 5MB The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to sstable_size_in_mb, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables
timestamp_resolution DateTieredCompactionStrategy MICROSECONDS The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit) - don’t change this unless you do mutations with USING TIMESTAMP (or equivalent directly in the client)
base_time_seconds DateTieredCompactionStrategy 60 The base size of the time windows.
max_sstable_age_days DateTieredCompactionStrategy 365 SSTables only containing data that is older than this will never be compacted.

Compression options

For the compression property, the following sub-options are available:

option default description
class LZ4Compressor The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use 'enabled' : false to disable compression. Custom compressor can be provided by specifying the full class name as a string constant.
enabled true By default compression is enabled. To disable it, set enabled to false
chunk_length_in_kb 64KB On disk SSTables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read
crc_check_chance 1.0 When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read

Caching options

For the caching property, the following sub-options are available:

option default description
keys ALL Whether to cache keys (“key cache”) for this table. Valid values are: ALL and NONE.
rows_per_partition NONE The amount of rows to cache per partition (“row cache”). If an integer n is specified, the first n queried rows of a partition will be cached. Other possible options are ALL, to cache all rows of a queried partition, or NONE to disable row caching.

Other considerations:

  • When inserting / updating a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see <a href=#alterStmt>ALTER TABLE) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry when you haven’t) when creating a table.

ALTER TABLE

Syntax:

<alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction>
-
-<instruction> ::= ALTER <identifier> TYPE <type>
-                | ADD   <identifier> <type>
-                | DROP  <identifier>
-                | WITH  <option> ( AND <option> )*
-


Sample:

ALTER TABLE addamsFamily
-ALTER lastKnownLocation TYPE uuid;
-
-ALTER TABLE addamsFamily
-ADD gravesite varchar;
-
-ALTER TABLE addamsFamily
-WITH comment = 'A most excellent and useful column family'
- AND read_repair_chance = 0.2;
-


The ALTER statement is used to manipulate table definitions. It allows for adding new columns, dropping existing ones, changing the type of existing columns, or updating the table options. As with table creation, ALTER COLUMNFAMILY is allowed as an alias for ALTER TABLE.

The <tablename> is the table name optionally preceded by the keyspace name. The <instruction> defines the alteration to perform:

  • ALTER: Update the type of a given defined column. Note that the type of the clustering columns cannot be modified as it induces the on-disk ordering of rows. Columns on which a secondary index is defined have the same restriction. Other columns are free from those restrictions (no validation of existing data is performed), but it is usually a bad idea to change the type to a non-compatible one, unless no data have been inserted for that column yet, as this could confuse CQL drivers/tools.
  • ADD: Adds a new column to the table. The <identifier> for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the COMPACT STORAGE option.
  • DROP: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won’t return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can’t be dropped from tables defined with the COMPACT STORAGE option.
  • WITH: Allows to update the options of the table. The supported <option> (and syntax) are the same as for the CREATE TABLE statement except that COMPACT STORAGE is not supported. Note that setting any compaction sub-options has the effect of erasing all previous compaction options, so you need to re-specify all the sub-options if you want to keep them. The same note applies to the set of compression sub-options.

DROP TABLE

Syntax:

<drop-table-stmt> ::= DROP TABLE ( IF EXISTS )? <tablename>
-

Sample:

DROP TABLE worldSeriesAttendees;
-

The DROP TABLE statement results in the immediate, irreversible removal of a table, including all data contained in it. As for table creation, DROP COLUMNFAMILY is allowed as an alias for DROP TABLE.

If the table does not exist, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

TRUNCATE

Syntax:

<truncate-stmt> ::= TRUNCATE ( TABLE | COLUMNFAMILY )? <tablename>
-

Sample:

TRUNCATE superImportantData;
-

The TRUNCATE statement permanently removes all data from a table.

CREATE INDEX

Syntax:

<create-index-stmt> ::= CREATE ( CUSTOM )? INDEX ( IF NOT EXISTS )? ( <indexname> )?
-                            ON <tablename> '(' <index-identifier> ')'
-                            ( USING <string> ( WITH OPTIONS = <map-literal> )? )?
-
-<index-identifier> ::= <identifier>
-                     | keys( <identifier> )
-


Sample:

CREATE INDEX userIndex ON NerdMovies (user);
-CREATE INDEX ON Mutants (abilityId);
-CREATE INDEX ON users (keys(favs));
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass';
-CREATE CUSTOM INDEX ON users (email) USING 'path.to.the.IndexClass' WITH OPTIONS = {'storage': '/mnt/ssd/indexes/'};
-

The CREATE INDEX statement is used to create a new (automatic) secondary index for a given (existing) column in a given table. A name for the index itself can be specified before the ON keyword, if desired. If data already exists for the column, it will be indexed asynchronously. After the index is created, new data for the column is indexed automatically at insertion time.

Attempting to create an already existing index will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the index already exists.

Indexes on Map Keys

When creating an index on a map column, you may index either the keys or the values. If the column identifier is placed within the keys() function, the index will be on the map keys, allowing you to use CONTAINS KEY in WHERE clauses. Otherwise, the index will be on the map values.

DROP INDEX

Syntax:

<drop-index-stmt> ::= DROP INDEX ( IF EXISTS )? ( <keyspace> '.' )? <identifier>
-

Sample:

DROP INDEX userIndex;
-
-DROP INDEX userkeyspace.address_index;
-


The DROP INDEX statement is used to drop an existing secondary index. The argument of the statement is the index name, which may optionally specify the keyspace of the index.

If the index does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE MATERIALIZED VIEW

Syntax:

<create-table-stmt> ::= CREATE MATERIALIZED VIEW ( IF NOT EXISTS )? <viewname> AS
-                          SELECT ( '(' <identifier> ( ',' <identifier> ) * ')' | '*' )
-                          FROM <tablename>
-                          ( WHERE <where-clause> )?
-                          PRIMARY KEY '(' <partition-key> ( ',' <identifier> )* ')'
-                          ( WITH <option> ( AND <option>)* )?
-


Sample:

CREATE MATERIALIZED VIEW monkeySpecies_by_population AS
-    SELECT *
-    FROM monkeySpecies
-    WHERE population IS NOT NULL AND species IS NOT NULL
-    PRIMARY KEY (population, species)
-    WITH comment='Allow query by population instead of species';
-


The CREATE MATERIALIZED VIEW statement creates a new materialized view. Each such view is a set of rows which corresponds to rows which are present in the underlying, or base, table specified in the SELECT statement. A materialized view cannot be directly updated, but updates to the base table will cause corresponding updates in the view.

Attempting to create an already existing materialized view will return an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the materialized view already exists.

WHERE Clause

The <where-clause> is similar to the where clause of a SELECT statement, with a few differences. First, the where clause must contain an expression that disallows NULL values in columns in the view’s primary key. If no other restriction is desired, this can be accomplished with an IS NOT NULL expression. Second, only columns which are in the base table’s primary key may be restricted with expressions other than IS NOT NULL. (Note that this second restriction may be lifted in the future.)

ALTER MATERIALIZED VIEW

Syntax:

<alter-materialized-view-stmt> ::= ALTER MATERIALIZED VIEW <viewname>
-                                                 WITH <option> ( AND <option> )*
-

p.
The ALTER MATERIALIZED VIEW statement allows options to be update; these options are the same as CREATE TABLE's options.

DROP MATERIALIZED VIEW

Syntax:

<drop-materialized-stmt> ::= DROP MATERIALIZED VIEW ( IF EXISTS )? <tablename>
-

Sample:

DROP MATERIALIZED VIEW monkeySpecies_by_population;
-

The DROP MATERIALIZED VIEW statement is used to drop an existing materialized view.

If the materialized view does not exists, the statement will return an error, unless IF EXISTS is used in which case the operation is a no-op.

CREATE TYPE

Syntax:

<create-type-stmt> ::= CREATE TYPE ( IF NOT EXISTS )? <typename>
-                         '(' <field-definition> ( ',' <field-definition> )* ')'
-
-<typename> ::= ( <keyspace-name> '.' )? <identifier>
-
-<field-definition> ::= <identifier> <type>
-
-


Sample:

CREATE TYPE address (
-    street_name text,
-    street_number int,
-    city text,
-    state text,
-    zip int
-)
-
-CREATE TYPE work_and_home_addresses (
-    home_address address,
-    work_address address
-)
-


The CREATE TYPE statement creates a new user-defined type. Each type is a set of named, typed fields. Field types may be any valid type, including collections and other existing user-defined types.

Attempting to create an already existing type will result in an error unless the IF NOT EXISTS option is used. If it is used, the statement will be a no-op if the type already exists.

<typename>

Valid type names are identifiers. The names of existing CQL types and reserved type names may not be used.

If the type name is provided alone, the type is created with the current keyspace (see USE). If it is prefixed by an existing keyspace name, the type is created within the specified keyspace instead of the current keyspace.

ALTER TYPE

Syntax:

<alter-type-stmt> ::= ALTER TYPE <typename> <instruction>
-
-<instruction> ::= ALTER <field-name> TYPE <type>
-                | ADD <field-name> <type>
-                | RENAME <field-name> TO <field-name> ( AND <field-name> TO <field-name> )*
-


Sample:

ALTER TYPE address ALTER zip TYPE varint
-
-ALTER TYPE address ADD country text
-
-ALTER TYPE address RENAME zip TO zipcode AND street_name TO street
-


The ALTER TYPE statement is used to manipulate type definitions. It allows for adding new fields, renaming existing fields, or changing the type of existing fields.

When altering the type of a column, the new type must be compatible with the previous type.

DROP TYPE

Syntax:

<drop-type-stmt> ::= DROP TYPE ( IF EXISTS )? <typename>
-


The DROP TYPE statement results in the immediate, irreversible removal of a type. Attempting to drop a type that is still in use by another type or a table will result in an error.

If the type does not exist, an error will be returned unless IF EXISTS is used, in which case the operation is a no-op.

CREATE TRIGGER

Syntax:

<create-trigger-stmt> ::= CREATE TRIGGER ( IF NOT EXISTS )? ( <triggername> )?
-                            ON <tablename> 
-                            USING <string>
-
-


Sample:

CREATE TRIGGER myTrigger ON myTable USING 'org.apache.cassandra.triggers.InvertedIndex';
-

The actual logic that makes up the trigger can be written in any Java (JVM) language and exists outside the database. You place the trigger code in a lib/triggers subdirectory of the Cassandra installation directory, it loads during cluster startup, and exists on every node that participates in a cluster. The trigger defined on a table fires before a requested DML statement occurs, which ensures the atomicity of the transaction.

DROP TRIGGER

Syntax:

<drop-trigger-stmt> ::= DROP TRIGGER ( IF EXISTS )? ( <triggername> )?
-                            ON <tablename>
-


Sample:

DROP TRIGGER myTrigger ON myTable;
-

DROP TRIGGER statement removes the registration of a trigger created using CREATE TRIGGER.

CREATE FUNCTION

Syntax:

<create-function-stmt> ::= CREATE ( OR REPLACE )? 
-                            FUNCTION ( IF NOT EXISTS )?
-                            ( <keyspace> '.' )? <function-name>
-                            '(' <arg-name> <arg-type> ( ',' <arg-name> <arg-type> )* ')'
-                            ( CALLED | RETURNS NULL ) ON NULL INPUT
-                            RETURNS <type>
-                            LANGUAGE <language>
-                            AS <body>
-


Sample:

CREATE OR REPLACE FUNCTION somefunction
-    ( somearg int, anotherarg text, complexarg frozen<someUDT>, listarg list<bigint> )
-    RETURNS NULL ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-       // some Java code
-    $$;
-CREATE FUNCTION akeyspace.fname IF NOT EXISTS
-    ( someArg int )
-    CALLED ON NULL INPUT
-    RETURNS text
-    LANGUAGE java
-    AS $$
-       // some Java code
-    $$;
-

CREATE FUNCTION creates or replaces a user-defined function.

Function Signature

Signatures are used to distinguish individual functions. The signature consists of:

  1. The fully qualified function name – i.e keyspace plus function-name
  2. The concatenated list of all argument types

Note that keyspace names, function names and argument types are subject to the default naming conventions and case-sensitivity rules.

CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already exists.

Behavior on invocation with null values must be defined for each function. There are two options:

  1. RETURNS NULL ON NULL INPUT declares that the function will always return null if any of the input arguments is null.
  2. CALLED ON NULL INPUT declares that the function will always be executed.

If the optional IF NOT EXISTS keywords are used, the function will only be created if another function with the same signature does not exist.

OR REPLACE and IF NOT EXIST cannot be used together.

Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the system keyspaces.

See the section on user-defined functions for more information.

DROP FUNCTION

Syntax:

<drop-function-stmt> ::= DROP FUNCTION ( IF EXISTS )?
-                         ( <keyspace> '.' )? <function-name>
-                         ( '(' <arg-type> ( ',' <arg-type> )* ')' )?
-
-


Sample:

DROP FUNCTION myfunction;
-DROP FUNCTION mykeyspace.afunction;
-DROP FUNCTION afunction ( int );
-DROP FUNCTION afunction ( text );
-

DROP FUNCTION statement removes a function created using CREATE FUNCTION.
You must specify the argument types (signature ) of the function to drop if there are multiple functions with the same name but a different signature (overloaded functions).

DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists.

CREATE AGGREGATE

Syntax:

<create-aggregate-stmt> ::= CREATE ( OR REPLACE )? 
-                            AGGREGATE ( IF NOT EXISTS )?
-                            ( <keyspace> '.' )? <aggregate-name>
-                            '(' <arg-type> ( ',' <arg-type> )* ')'
-                            SFUNC <state-functionname>
-                            STYPE <state-type>
-                            ( FINALFUNC <final-functionname> )?
-                            ( INITCOND <init-cond> )?
-


Sample:

CREATE AGGREGATE myaggregate ( val text )
-  SFUNC myaggregate_state
-  STYPE text
-  FINALFUNC myaggregate_final
-  INITCOND 'foo';
-

See the section on user-defined aggregates for a complete example.

CREATE AGGREGATE creates or replaces a user-defined aggregate.

CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature already exists.

CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already exist.

OR REPLACE and IF NOT EXIST cannot be used together.

Aggregates belong to a keyspace. If no keyspace is specified in <aggregate-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined aggregate in one of the system keyspaces.

Signatures for user-defined aggregates follow the same rules as for user-defined functions.

STYPE defines the type of the state value and must be specified.

The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-@null@ INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT.

SFUNC references an existing function to be used as the state modifying function. The type of first argument of the state function must match STYPE. The remaining argument types of the state function must match the argument types of the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called with null.

The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS NULL ON NULL INPUT means that the aggregate’s return value will be null, if the last state is null.

If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is defined, it is the return type of that function.

See the section on user-defined aggregates for more information.

DROP AGGREGATE

Syntax:

<drop-aggregate-stmt> ::= DROP AGGREGATE ( IF EXISTS )?
-                         ( <keyspace> '.' )? <aggregate-name>
-                         ( '(' <arg-type> ( ',' <arg-type> )* ')' )?
-

Sample:

DROP AGGREGATE myAggregate;
-DROP AGGREGATE myKeyspace.anAggregate;
-DROP AGGREGATE someAggregate ( int );
-DROP AGGREGATE someAggregate ( text );
-

The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded aggregates).

DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a function with the signature does not exist.

Signatures for user-defined aggregates follow the same rules as for user-defined functions.

Data Manipulation

INSERT

Syntax:

<insertStatement> ::= INSERT INTO <tablename>
-                      ( ( <name-list> VALUES <value-list> )
-                      | ( JSON <string> ))
-                      ( IF NOT EXISTS )?
-                      ( USING <option> ( AND <option> )* )?
-
-<names-list> ::= '(' <identifier> ( ',' <identifier> )* ')'
-
-<value-list> ::= '(' <term-or-literal> ( ',' <term-or-literal> )* ')'
-
-<term-or-literal> ::= <term>
-                    | <collection-literal>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

INSERT INTO NerdMovies (movie, director, main_actor, year)
-                VALUES ('Serenity', 'Joss Whedon', 'Nathan Fillion', 2005)
-USING TTL 86400;
-
-INSERT INTO NerdMovies JSON '{"movie": "Serenity", "director": "Joss Whedon", "year": 2005}'
-


The INSERT statement writes one or more columns for a given row in a table. Note that since a row is identified by its PRIMARY KEY, at least the columns composing it must be specified. The list of columns to insert to must be supplied when using the VALUES syntax. When using the JSON syntax, they are optional. See the section on INSERT JSON for more details.

Note that unlike in SQL, INSERT does not check the prior existence of the row by default: the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.

It is however possible to use the IF NOT EXISTS condition to only insert if the row does not exist prior to the insertion. But please note that using IF NOT EXISTS will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

All updates for an INSERT are applied atomically and in isolation.

Please refer to the UPDATE section for information on the <option> available and to the collections section for use of <collection-literal>. Also note that INSERT does not support counters, while UPDATE does.

UPDATE

Syntax:

<update-stmt> ::= UPDATE <tablename>
-                  ( USING <option> ( AND <option> )* )?
-                  SET <assignment> ( ',' <assignment> )*
-                  WHERE <where-clause>
-                  ( IF <condition> ( AND condition )* )?
-
-<assignment> ::= <identifier> '=' <term>
-               | <identifier> '=' <identifier> ('+' | '-') (<int-term> | <set-literal> | <list-literal>)
-               | <identifier> '=' <identifier> '+' <map-literal>
-               | <identifier> '[' <term> ']' '=' <term>
-
-<condition> ::= <identifier> <op> <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' <op> <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> '=' <term>
-             | '(' <identifier> (',' <identifier>)* ')' '=' <term-tuple>
-             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | <identifier> IN <variable>
-             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
-             | '(' <identifier> (',' <identifier>)* ')' IN <variable>
-
-<option> ::= TIMESTAMP <integer>
-           | TTL <integer>
-


Sample:

UPDATE NerdMovies USING TTL 400
-SET director = 'Joss Whedon',
-    main_actor = 'Nathan Fillion',
-    year = 2005
-WHERE movie = 'Serenity';
-
-UPDATE UserActions SET total = total + 2 WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 AND action = 'click';
-


The UPDATE statement writes one or more columns for a given row in a table. The <where-clause> is used to select the row to update and must include all columns composing the PRIMARY KEY. Other columns values are specified through <assignment> after the SET keyword.

Note that unlike in SQL, UPDATE does not check the prior existence of the row by default (except through the use of <condition>, see below): the row is created if none existed before, and updated otherwise. Furthermore, there are no means to know whether a creation or update occurred.

It is however possible to use the conditions on some columns through IF, in which case the row will not be updated unless the conditions are met. But, please note that using IF conditions will incur a non-negligible performance cost (internally, Paxos will be used) so this should be used sparingly.

In an UPDATE statement, all updates within the same partition key are applied atomically and in isolation.

The c = c + 3 form of <assignment> is used to increment/decrement counters. The identifier after the ‘=’ sign must be the same than the one before the ‘=’ sign (Only increment/decrement is supported on counters, not the assignment of a specific value).

The id = id + <collection-literal> and id[value1] = value2 forms of <assignment> are for collections. Please refer to the relevant section for more details.

<options>

The UPDATE and INSERT statements support the following options:

  • TIMESTAMP: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.
  • TTL: specifies an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not the columns themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL is specified in that update). By default, values never expire. A TTL of 0 or a negative value is equivalent to no TTL.

DELETE

Syntax:

<delete-stmt> ::= DELETE ( <selection> ( ',' <selection> )* )?
-                  FROM <tablename>
-                  ( USING TIMESTAMP <integer>)?
-                  WHERE <where-clause>
-                  ( IF ( EXISTS | ( <condition> ( AND <condition> )*) ) )?
-
-<selection> ::= <identifier> ( '[' <term> ']' )?
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> <op> <term>
-             | '(' <identifier> (',' <identifier>)* ')' <op> <term-tuple>
-             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | <identifier> IN <variable>
-             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
-             | '(' <identifier> (',' <identifier>)* ')' IN <variable>
-
-<op> ::= '=' | '<' | '>' | '<=' | '>='
-
-<condition> ::= <identifier> (<op> | '!=') <term>
-              | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')')
-              | <identifier> '[' <term> ']' (<op> | '!=') <term>
-              | <identifier> '[' <term> ']' IN <term>
-
-


Sample:

DELETE FROM NerdMovies USING TIMESTAMP 1240003134 WHERE movie = 'Serenity';
-
-DELETE phone FROM Users WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14);
-


The DELETE statement deletes columns and rows. If column names are provided directly after the DELETE keyword, only those columns are deleted from the row indicated by the <where-clause> (the id[value] syntax in <selection> is for collection, please refer to the collection section for more details). Otherwise, whole rows are removed. The <where-clause> specifies which rows are to be deleted. Multiple rows may be deleted with one statement by using an IN clause. A range of rows may be deleted using an inequality operator (such as >=).

DELETE supports the TIMESTAMP option with the same semantics as the UPDATE statement.

In a DELETE statement, all deletions within the same partition key are applied atomically and in isolation.

A DELETE operation can be conditional through the use of an IF clause, similar to UPDATE and INSERT statements. However, as with INSERT and UPDATE statements, this will incur a non-negligible performance cost (internally, Paxos will be used) and so should be used sparingly.

BATCH

Syntax:

<batch-stmt> ::= BEGIN ( UNLOGGED | COUNTER ) BATCH
-                 ( USING <option> ( AND <option> )* )?
-                    <modification-stmt> ( ';' <modification-stmt> )*
-                 APPLY BATCH
-
-<modification-stmt> ::= <insert-stmt>
-                      | <update-stmt>
-                      | <delete-stmt>
-
-<option> ::= TIMESTAMP <integer>
-


Sample:

BEGIN BATCH
-  INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
-  UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
-  INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
-  DELETE name FROM users WHERE userid = 'user1';
-APPLY BATCH;
-

The BATCH statement group multiple modification statements (insertions/updates and deletions) into a single statement. It serves several purposes:

  1. It saves network round-trips between the client and the server (and sometimes between the server coordinator and the replicas) when batching multiple updates.
  2. All updates in a BATCH belonging to a given partition key are performed in isolation.
  3. By default, all operations in the batch are performed as LOGGED, to ensure all mutations eventually complete (or none will). See the notes on UNLOGGED for more details.

Note that:

  • BATCH statements may only contain UPDATE, INSERT and DELETE statements.
  • Batches are not a full analogue for SQL transactions.
  • If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp. Due to Cassandra’s conflict resolution procedure in the case of timestamp ties, operations may be applied in an order that is different from the order they are listed in the BATCH statement. To force a particular operation ordering, you must specify per-operation timestamps.

UNLOGGED

By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note however that operations are only isolated within a single partition).

There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur this penalty, you can tell Cassandra to skip the batchlog with the UNLOGGED option. If the UNLOGGED option is used, a failed batch might leave the patch only partly applied.

COUNTER

Use the COUNTER option for batched counter updates. Unlike other updates in Cassandra, counter updates are not idempotent.

<option>

BATCH supports both the TIMESTAMP option, with similar semantic to the one described in the UPDATE statement (the timestamp applies to all the statement inside the batch). However, if used, TIMESTAMP must not be used in the statements within the batch.

Queries

SELECT

Syntax:

<select-stmt> ::= SELECT ( JSON )? <select-clause>
-                  FROM <tablename>
-                  ( WHERE <where-clause> )?
-                  ( ORDER BY <order-by> )?
-                  ( LIMIT <integer> )?
-                  ( ALLOW FILTERING )?
-
-<select-clause> ::= DISTINCT? <selection-list>
-                  | COUNT '(' ( '*' | '1' ) ')' (AS <identifier>)?
-
-<selection-list> ::= <selector> (AS <identifier>)? ( ',' <selector> (AS <identifier>)? )*
-                   | '*'
-
-<selector> ::= <identifier>
-             | WRITETIME '(' <identifier> ')'
-             | TTL '(' <identifier> ')'
-             | <function> '(' (<selector> (',' <selector>)*)? ')'
-
-<where-clause> ::= <relation> ( AND <relation> )*
-
-<relation> ::= <identifier> <op> <term>
-             | '(' <identifier> (',' <identifier>)* ')' <op> <term-tuple>
-             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
-             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
-             | TOKEN '(' <identifier> ( ',' <identifer>)* ')' <op> <term>
-
-<op> ::= '=' | '<' | '>' | '<=' | '>=' | CONTAINS | CONTAINS KEY
-<order-by> ::= <ordering> ( ',' <odering> )*
-<ordering> ::= <identifer> ( ASC | DESC )?
-<term-tuple> ::= '(' <term> (',' <term>)* ')'
-


Sample:

SELECT name, occupation FROM users WHERE userid IN (199, 200, 207);
-
-SELECT JSON name, occupation FROM users WHERE userid = 199;
-
-SELECT name AS user_name, occupation AS user_occupation FROM users;
-
-SELECT time, value
-FROM events
-WHERE event_type = 'myEvent'
-  AND time > '2011-02-03'
-  AND time <= '2012-01-01'
-
-SELECT COUNT(*) FROM users;
-
-SELECT COUNT(*) AS user_count FROM users;
-
-


The SELECT statements reads one or more columns for one or more rows in a table. It returns a result-set of rows, where each row contains the collection of columns corresponding to the query. If the JSON keyword is used, the results for each row will contain only a single column named “json”. See the section on SELECT JSON for more details.

<select-clause>

The <select-clause> determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of or the wildcard character (*) to select all the columns defined for the table.

A <selector> is either a column name to retrieve or a <function> of one or more @@s. The function allowed are the same as for <term> and are described in the function section. In addition to these generic functions, the WRITETIME (resp. TTL) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)).

Any <selector> can be aliased using AS keyword (see examples). Please note that <where-clause> and <order-by> clause should refer to the columns by their original names and not by their aliases.

The COUNT keyword can be used with parenthesis enclosing *. If so, the query will return a single result: the number of rows matching the query. Note that COUNT(1) is supported as an alias.

<where-clause>

The <where-clause> specifies which rows must be queried. It is composed of relations on the columns that are part of the PRIMARY KEY and/or have a secondary index defined on them.

Not all relations are allowed in a query. For instance, non-equal relations (where IN is considered as an equal relation) on a partition key are not supported (but see the use of the TOKEN method below to do non-equal queries on the partition key). Moreover, for a given partition key, the clustering columns induce an ordering of rows and relations on them is restricted to the relations that allow to select a contiguous (for the ordering) set of rows. For instance, given

CREATE TABLE posts (
-    userid text,
-    blog_title text,
-    posted_at timestamp,
-    entry_title text,
-    content text,
-    category int,
-    PRIMARY KEY (userid, blog_title, posted_at)
-)
-

The following query is allowed:

SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John''s Blog' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are set):

// Needs a blog_title to be set to select ranges of posted_at
-SELECT entry_title, content FROM posts WHERE userid='john doe' AND posted_at >= '2012-01-01' AND posted_at < '2012-01-31'
-

When specifying relations, the TOKEN function can be used on the PARTITION KEY column to query. In that case, rows will be selected based on the token of their PARTITION_KEY rather than on the value. Note that the token of a key depends on the partitioner in use, and that in particular the RandomPartitioner won’t yield a meaningful order. Also note that ordering partitioners always order token values by bytes (so even if the partition key is of type int, token(-1) > token(0) in particular). Example:

SELECT * FROM posts WHERE token(userid) > token('tom') AND token(userid) < token('bob')
-

Moreover, the IN relation is only allowed on the last column of the partition key and on the last column of the full primary key.

It is also possible to “group” CLUSTERING COLUMNS together in a relation using the tuple notation. For instance:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) > ('John''s Blog', '2012-01-01')
-

will request all rows that sorts after the one having “John's Blog” as blog_tile and ‘2012-01-01’ for posted_at in the clustering order. In particular, rows having a post_at <= '2012-01-01' will be returned as long as their blog_title > 'John''s Blog', which wouldn’t be the case for:

SELECT * FROM posts WHERE userid='john doe' AND blog_title > 'John''s Blog' AND posted_at > '2012-01-01'
-

The tuple notation may also be used for IN clauses on CLUSTERING COLUMNS:

SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) IN (('John''s Blog', '2012-01-01), ('Extreme Chess', '2014-06-01'))
-

The CONTAINS operator may only be used on collection columns (lists, sets, and maps). In the case of maps, CONTAINS applies to the map values. The CONTAINS KEY operator may only be used on map columns and applies to the map keys.

<order-by>

The ORDER BY option allows to select the order of the returned results. It takes as argument a list of column names along with the order for the column (ASC for ascendant and DESC for descendant, omitting the order being equivalent to ASC). Currently the possible orderings are limited (which depends on the table CLUSTERING ORDER ):

  • if the table has been defined without any specific CLUSTERING ORDER, then then allowed orderings are the order induced by the clustering columns and the reverse of that one.
  • otherwise, the orderings allowed are the order of the CLUSTERING ORDER option and the reversed one.

LIMIT

The LIMIT option to a SELECT statement limits the number of rows returned by a query.

ALLOW FILTERING

By default, CQL only allows select queries that don’t involve “filtering” server side, i.e. queries where we know that all (live) record read will be returned (maybe partly) in the result set. The reasoning is that those “non filtering” queries have predictable performance in the sense that they will execute in a time that is proportional to the amount of data returned by the query (which can be controlled through LIMIT).

The ALLOW FILTERING option allows to explicitly allow (some) queries that require filtering. Please note that a query using ALLOW FILTERING may thus have unpredictable performance (for the definition above), i.e. even a query that selects a handful of records may exhibit performance that depends on the total amount of data stored in the cluster.

For instance, considering the following table holding user profiles with their year of birth (with a secondary index on it) and country of residence:

CREATE TABLE users (
-    username text PRIMARY KEY,
-    firstname text,
-    lastname text,
-    birth_year int,
-    country text
-)
-
-CREATE INDEX ON users(birth_year);
-

Then the following queries are valid:

SELECT * FROM users;
-SELECT firstname, lastname FROM users WHERE birth_year = 1981;
-

because in both case, Cassandra guarantees that these queries performance will be proportional to the amount of data returned. In particular, if no users are born in 1981, then the second query performance will not depend of the number of user profile stored in the database (not directly at least: due to secondary index implementation consideration, this query may still depend on the number of node in the cluster, which indirectly depends on the amount of data stored. Nevertheless, the number of nodes will always be multiple number of magnitude lower than the number of user profile stored). Of course, both query may return very large result set in practice, but the amount of data returned can always be controlled by adding a LIMIT.

However, the following query will be rejected:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR';
-

because Cassandra cannot guarantee that it won’t have to scan large amount of data even if the result to those query is small. Typically, it will scan all the index entries for users born in 1981 even if only a handful are actually from France. However, if you “know what you are doing”, you can force the execution of this query by using ALLOW FILTERING and so the following query is valid:

SELECT firstname, lastname FROM users WHERE birth_year = 1981 AND country = 'FR' ALLOW FILTERING;
-

Database Roles

CREATE ROLE

Syntax:

<create-role-stmt> ::= CREATE ROLE ( IF NOT EXISTS )? <identifier> ( WITH <option> ( AND <option> )* )?
-
-<option> ::= PASSWORD = <string>
-           | LOGIN = <boolean>
-           | SUPERUSER = <boolean>
-           | OPTIONS = <map_literal>
-

Sample:

CREATE ROLE new_role;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true;
-CREATE ROLE bob WITH PASSWORD = 'password_b' AND LOGIN = true AND SUPERUSER = true;
-CREATE ROLE carlos WITH OPTIONS = { 'custom_option1' : 'option1_value', 'custom_option2' : 99 };
-

By default roles do not possess LOGIN privileges or SUPERUSER status.

Permissions on database resources are granted to roles; types of resources include keyspaces, tables, functions and roles themselves. Roles may be granted to other roles to create hierarchical permissions structures; in these hierarchies, permissions and SUPERUSER status are inherited, but the LOGIN privilege is not.

If a role has the LOGIN privilege, clients may identify as that role when connecting. For the duration of that connection, the client will acquire any roles and privileges granted to that role.

Only a client with with the CREATE permission on the database roles resource may issue CREATE ROLE requests (see the relevant section below), unless the client is a SUPERUSER. Role management in Cassandra is pluggable and custom implementations may support only a subset of the listed options.

Role names should be quoted if they contain non-alphanumeric characters.

Setting credentials for internal authentication

Use the WITH PASSWORD clause to set a password for internal authentication, enclosing the password in single quotation marks.
If internal authentication has not been set up or the role does not have LOGIN privileges, the WITH PASSWORD clause is not necessary.

Creating a role conditionally

Attempting to create an existing role results in an invalid query condition unless the IF NOT EXISTS option is used. If the option is used and the role exists, the statement is a no-op.

CREATE ROLE other_role;
-CREATE ROLE IF NOT EXISTS other_role;
-

ALTER ROLE

Syntax:

<alter-role-stmt> ::= ALTER ROLE <identifier> ( WITH <option> ( AND <option> )* )?
-
-<option> ::= PASSWORD = <string>
-           | LOGIN = <boolean>
-           | SUPERUSER = <boolean>
-           | OPTIONS = <map_literal>
-

Sample:

ALTER ROLE bob WITH PASSWORD = 'PASSWORD_B' AND SUPERUSER = false;
-

Conditions on executing ALTER ROLE statements:

  • A client must have SUPERUSER status to alter the SUPERUSER status of another role
  • A client cannot alter the SUPERUSER status of any role it currently holds
  • A client can only modify certain properties of the role with which it identified at login (e.g. PASSWORD)
  • To modify properties of a role, the client must be granted ALTER permission on that role

DROP ROLE

Syntax:

<drop-role-stmt> ::= DROP ROLE ( IF EXISTS )? <identifier>
-

Sample:

DROP ROLE alice;
-DROP ROLE IF EXISTS bob;
-

DROP ROLE requires the client to have DROP permission on the role in question. In addition, client may not DROP the role with which it identified at login. Finaly, only a client with SUPERUSER status may DROP another SUPERUSER role.
Attempting to drop a role which does not exist results in an invalid query condition unless the IF EXISTS option is used. If the option is used and the role does not exist the statement is a no-op.

GRANT ROLE

Syntax:

<grant-role-stmt> ::= GRANT <identifier> TO <identifier>
-

Sample:

GRANT report_writer TO alice;
-

This statement grants the report_writer role to alice. Any permissions granted to report_writer are also acquired by alice.
Roles are modelled as a directed acyclic graph, so circular grants are not permitted. The following examples result in error conditions:

GRANT role_a TO role_b;
-GRANT role_b TO role_a;
-
GRANT role_a TO role_b;
-GRANT role_b TO role_c;
-GRANT role_c TO role_a;
-

REVOKE ROLE

Syntax:

<revoke-role-stmt> ::= REVOKE <identifier> FROM <identifier>
-

Sample:

REVOKE report_writer FROM alice;
-

This statement revokes the report_writer role from alice. Any permissions that alice has acquired via the report_writer role are also revoked.

LIST ROLES

Syntax:

<list-roles-stmt> ::= LIST ROLES ( OF <identifier> )? ( NORECURSIVE )?
-

Sample:

LIST ROLES;
-

Return all known roles in the system, this requires DESCRIBE permission on the database roles resource.

LIST ROLES OF @alice@;
-

Enumerate all roles granted to alice, including those transitively aquired.

LIST ROLES OF @bob@ NORECURSIVE
-

List all roles directly granted to bob.

CREATE USER

Prior to the introduction of roles in Cassandra 2.2, authentication and authorization were based around the concept of a USER. For backward compatibility, the legacy syntax has been preserved with USER centric statments becoming synonyms for the ROLE based equivalents.

Syntax:

<create-user-statement> ::= CREATE USER ( IF NOT EXISTS )? <identifier> ( WITH PASSWORD <string> )? (<option>)?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

Sample:

CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE USER bob WITH PASSWORD 'password_b' NOSUPERUSER;
-

CREATE USER is equivalent to CREATE ROLE where the LOGIN option is true. So, the following pairs of statements are equivalent:

CREATE USER alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER IF EXISTS alice WITH PASSWORD 'password_a' SUPERUSER;
-CREATE ROLE IF EXISTS alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = true;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' AND LOGIN = true AND SUPERUSER = false;
-
-CREATE USER alice WITH PASSWORD 'password_a' NOSUPERUSER;
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-
-CREATE USER alice WITH PASSWORD 'password_a';
-CREATE ROLE alice WITH PASSWORD = 'password_a' WITH LOGIN = true;
-

ALTER USER

Syntax:

<alter-user-statement> ::= ALTER USER <identifier> ( WITH PASSWORD <string> )? ( <option> )?
-
-<option> ::= SUPERUSER
-           | NOSUPERUSER
-

ALTER USER alice WITH PASSWORD 'PASSWORD_A';
-ALTER USER bob SUPERUSER;
-

DROP USER

Syntax:

<drop-user-stmt> ::= DROP USER ( IF EXISTS )? <identifier>
-

Sample:

DROP USER alice;
-DROP USER IF EXISTS bob;
-

LIST USERS

Syntax:

<list-users-stmt> ::= LIST USERS;
-

Sample:

LIST USERS;
-

This statement is equivalent to

LIST ROLES;
-

but only roles with the LOGIN privilege are included in the output.

Data Control

Permissions

Permissions on resources are granted to roles; there are several different types of resources in Cassandra and each type is modelled hierarchically:

  • The hierarchy of Data resources, Keyspaces and Tables has the structure ALL KEYSPACES -> KEYSPACE -> TABLE
  • Function resources have the structure ALL FUNCTIONS -> KEYSPACE -> FUNCTION
  • Resources representing roles have the structure ALL ROLES -> ROLE

Permissions can be granted at any level of these hierarchies and they flow downwards. So granting a permission on a resource higher up the chain automatically grants that same permission on all resources lower down. For example, granting SELECT on a KEYSPACE automatically grants it on all TABLES in that KEYSPACE. Likewise, granting a permission on ALL FUNCTIONS grants it on every defined function, regardless of which keyspace it is scoped in. It is also possible to grant permissions on all functions scoped to a particular keyspace.

Modifications to permissions are visible to existing client sessions; that is, connections need not be re-established following permissions changes.

The full set of available permissions is:

  • CREATE
  • ALTER
  • DROP
  • SELECT
  • MODIFY
  • AUTHORIZE
  • DESCRIBE
  • EXECUTE

Not all permissions are applicable to every type of resource. For instance, EXECUTE is only relevant in the context of functions; granting EXECUTE on a resource representing a table is nonsensical. Attempting to GRANT a permission on resource to which it cannot be applied results in an error response. The following illustrates which permissions can be granted on which types of resource, and which statements are enabled by that permission.

permission resource operations
CREATE ALL KEYSPACES CREATE KEYSPACE
CREATE TABLE in any keyspace
CREATE KEYSPACE CREATE TABLE in specified keyspace
CREATE ALL FUNCTIONS CREATE FUNCTION in any keyspace
CREATE AGGREGATE in any keyspace
CREATE ALL FUNCTIONS IN KEYSPACE CREATE FUNCTION in keyspace
CREATE AGGREGATE in keyspace
CREATE ALL ROLES CREATE ROLE
ALTER ALL KEYSPACES ALTER KEYSPACE
ALTER TABLE in any keyspace
ALTER KEYSPACE ALTER KEYSPACE
ALTER TABLE in keyspace
ALTER TABLE ALTER TABLE
ALTER ALL FUNCTIONS CREATE FUNCTION replacing any existing
CREATE AGGREGATE replacing any existing
ALTER ALL FUNCTIONS IN KEYSPACE CREATE FUNCTION replacing existing in keyspace
CREATE AGGREGATE replacing any existing in keyspace
ALTER FUNCTION CREATE FUNCTION replacing existing
CREATE AGGREGATE replacing existing
ALTER ALL ROLES ALTER ROLE on any role
ALTER ROLE ALTER ROLE
DROP ALL KEYSPACES DROP KEYSPACE
DROP TABLE in any keyspace
DROP KEYSPACE DROP TABLE in specified keyspace
DROP TABLE DROP TABLE
DROP ALL FUNCTIONS DROP FUNCTION in any keyspace
DROP AGGREGATE in any existing
DROP ALL FUNCTIONS IN KEYSPACE DROP FUNCTION in keyspace
DROP AGGREGATE in existing
DROP FUNCTION DROP FUNCTION
DROP ALL ROLES DROP ROLE on any role
DROP ROLE DROP ROLE
SELECT ALL KEYSPACES SELECT on any table
SELECT KEYSPACE SELECT on any table in keyspace
SELECT TABLE SELECT on specified table
MODIFY ALL KEYSPACES INSERT on any table
UPDATE on any table
DELETE on any table
TRUNCATE on any table
MODIFY KEYSPACE INSERT on any table in keyspace
UPDATE on any table in keyspace
== @DELETE@ on any table in keyspace ==
TRUNCATE on any table in keyspace
MODIFY TABLE INSERT
UPDATE
DELETE
TRUNCATE
AUTHORIZE ALL KEYSPACES GRANT PERMISSION on any table
REVOKE PERMISSION on any table
AUTHORIZE KEYSPACE GRANT PERMISSION on table in keyspace
REVOKE PERMISSION on table in keyspace
AUTHORIZE TABLE GRANT PERMISSION
REVOKE PERMISSION
AUTHORIZE ALL FUNCTIONS GRANT PERMISSION on any function
REVOKE PERMISSION on any function
AUTHORIZE ALL FUNCTIONS IN KEYSPACE GRANT PERMISSION in keyspace
REVOKE PERMISSION in keyspace
AUTHORIZE ALL FUNCTIONS IN KEYSPACE GRANT PERMISSION in keyspace
REVOKE PERMISSION in keyspace
AUTHORIZE FUNCTION GRANT PERMISSION
REVOKE PERMISSION
AUTHORIZE ALL ROLES GRANT ROLE grant any role
REVOKE ROLE revoke any role
AUTHORIZE ROLES GRANT ROLE grant role
REVOKE ROLE revoke role
DESCRIBE ALL ROLES LIST ROLES all roles or only roles granted to another, specified role
EXECUTE ALL FUNCTIONS SELECT, INSERT, UPDATE using any function
use of any function in CREATE AGGREGATE
EXECUTE ALL FUNCTIONS IN KEYSPACE SELECT, INSERT, UPDATE using any function in keyspace
use of any function in keyspace in CREATE AGGREGATE
EXECUTE FUNCTION SELECT, INSERT, UPDATE using function
use of function in CREATE AGGREGATE

GRANT PERMISSION

Syntax:

<grant-permission-stmt> ::= GRANT ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> TO <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

GRANT SELECT ON ALL KEYSPACES TO data_reader;
-

This gives any user with the role data_reader permission to execute SELECT statements on any table across all keyspaces

GRANT MODIFY ON KEYSPACE keyspace1 TO data_writer;
-

This give any user with the role data_writer permission to perform UPDATE, INSERT, UPDATE, DELETE and TRUNCATE queries on all tables in the keyspace1 keyspace

GRANT DROP ON keyspace1.table1 TO schema_owner;
-

This gives any user with the schema_owner role permissions to DROP keyspace1.table1.

GRANT EXECUTE ON FUNCTION keyspace1.user_function( int ) TO report_writer;
-

This grants any user with the report_writer role permission to execute SELECT, INSERT and UPDATE queries which use the function keyspace1.user_function( int )

GRANT DESCRIBE ON ALL ROLES TO role_admin;
-

This grants any user with the role_admin role permission to view any and all roles in the system with a LIST ROLES statement

GRANT ALL

When the GRANT ALL form is used, the appropriate set of permissions is determined automatically based on the target resource.

Automatic Granting

When a resource is created, via a CREATE KEYSPACE, CREATE TABLE, CREATE FUNCTION, CREATE AGGREGATE or CREATE ROLE statement, the creator (the role the database user who issues the statement is identified as), is automatically granted all applicable permissions on the new resource.

REVOKE PERMISSION

Syntax:

<revoke-permission-stmt> ::= REVOKE ( ALL ( PERMISSIONS )? | <permission> ( PERMISSION )? ) ON <resource> FROM <identifier>
-
-<permission> ::= CREATE | ALTER | DROP | SELECT | MODIFY | AUTHORIZE | DESRIBE | EXECUTE
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

REVOKE SELECT ON ALL KEYSPACES FROM data_reader;
-REVOKE MODIFY ON KEYSPACE keyspace1 FROM data_writer;
-REVOKE DROP ON keyspace1.table1 FROM schema_owner;
-REVOKE EXECUTE ON FUNCTION keyspace1.user_function( int ) FROM report_writer;
-REVOKE DESCRIBE ON ALL ROLES FROM role_admin;
-

LIST PERMISSIONS

Syntax:

<list-permissions-stmt> ::= LIST ( ALL ( PERMISSIONS )? | <permission> ) 
-                                 ( ON <resource> )? 
-                                 ( OF <identifier> ( NORECURSIVE )? )?
-
-<resource> ::= ALL KEYSPACES
-             | KEYSPACE <identifier>
-             | ( TABLE )? <tablename>
-             | ALL ROLES
-             | ROLE <identifier>
-             | ALL FUNCTIONS ( IN KEYSPACE <identifier> )?
-             | FUNCTION <functionname>
-

Sample:

LIST ALL PERMISSIONS OF alice;
-

Show all permissions granted to alice, including those acquired transitively from any other roles.

LIST ALL PERMISSIONS ON keyspace1.table1 OF bob;
-

Show all permissions on keyspace1.table1 granted to bob, including those acquired transitively from any other roles. This also includes any permissions higher up the resource hierarchy which can be applied to keyspace1.table1. For example, should bob have ALTER permission on keyspace1, that would be included in the results of this query. Adding the NORECURSIVE switch restricts the results to only those permissions which were directly granted to bob or one of bob's roles.

LIST SELECT PERMISSIONS OF carlos;
-

Show any permissions granted to carlos or any of carlos's roles, limited to SELECT permissions on any resource.

Data Types

CQL supports a rich set of data types for columns defined in a table, including collection types. On top of those native and collection types, users can also provide custom types (through a JAVA class extending AbstractType loadable by Cassandra). The syntax of types is thus:

<type> ::= <native-type>
-         | <collection-type>
-         | <tuple-type>
-         | <string>       // Used for custom types. The fully-qualified name of a JAVA class
-
-<native-type> ::= ascii
-                | bigint
-                | blob
-                | boolean
-                | counter
-                | date
-                | decimal
-                | double
-                | float
-                | inet
-                | int
-                | smallint
-                | text
-                | time
-                | timestamp
-                | timeuuid
-                | tinyint
-                | uuid
-                | varchar
-                | varint
-
-<collection-type> ::= list '<' <native-type> '>'
-                    | set  '<' <native-type> '>'
-                    | map  '<' <native-type> ',' <native-type> '>'
-<tuple-type> ::= tuple '<' <type> (',' <type>)* '>'
-

Note that the native types are keywords and as such are case-insensitive. They are however not reserved ones.

The following table gives additional informations on the native data types, and on which kind of constants each type supports:

type constants supporteddescription
ascii strings ASCII character string
bigint integers 64-bit signed long
blob blobs Arbitrary bytes (no validation)
boolean booleans true or false
counter integers Counter column (64-bit signed value). See Counters for details
date integers, strings A date (with no corresponding time value). See Working with dates below for more information.
decimal integers, floats Variable-precision decimal
double integers 64-bit IEEE-754 floating point
float integers, floats 32-bit IEEE-754 floating point
inet strings An IP address. It can be either 4 bytes long (IPv4) or 16 bytes long (IPv6). There is no inet constant, IP address should be inputed as strings
int integers 32-bit signed int
smallint integers 16-bit signed int
text strings UTF8 encoded string
time integers, strings A time with nanosecond precision. See Working with time below for more information.
timestamp integers, strings A timestamp. Strings constant are allow to input timestamps as dates, see Working with timestamps below for more information.
timeuuid uuids Type 1 UUID. This is generally used as a “conflict-free” timestamp. Also see the functions on Timeuuid
tinyint integers 8-bit signed int
uuid uuids Type 1 or type 4 UUID
varchar strings UTF8 encoded string
varint integers Arbitrary-precision integer

For more information on how to use the collection types, see the Working with collections section below.

Working with timestamps

Values of the timestamp type are encoded as 64-bit signed integers representing a number of milliseconds since the standard base time known as “the epoch”: January 1 1970 at 00:00:00 GMT.

Timestamp can be input in CQL as simple long integers, giving the number of milliseconds since the epoch, as defined above.

They can also be input as string literals in any of the following ISO 8601 formats, each representing the time and date Mar 2, 2011, at 04:05:00 AM, GMT.:

  • 2011-02-03 04:05+0000
  • 2011-02-03 04:05:00+0000
  • 2011-02-03 04:05:00.000+0000
  • 2011-02-03T04:05+0000
  • 2011-02-03T04:05:00+0000
  • 2011-02-03T04:05:00.000+0000

The +0000 above is an RFC 822 4-digit time zone specification; +0000 refers to GMT. US Pacific Standard Time is -0800. The time zone may be omitted if desired— the date will be interpreted as being in the time zone under which the coordinating Cassandra node is configured.

  • 2011-02-03 04:05
  • 2011-02-03 04:05:00
  • 2011-02-03 04:05:00.000
  • 2011-02-03T04:05
  • 2011-02-03T04:05:00
  • 2011-02-03T04:05:00.000

There are clear difficulties inherent in relying on the time zone configuration being as expected, though, so it is recommended that the time zone always be specified for timestamps when feasible.

The time of day may also be omitted, if the date is the only piece that matters:

  • 2011-02-03
  • 2011-02-03+0000

In that case, the time of day will default to 00:00:00, in the specified or default time zone.

Working with dates

Values of the date type are encoded as 32-bit unsigned integers representing a number of days with “the epoch” at the center of the range (2^31). Epoch is January 1st, 1970

A date can be input in CQL as an unsigned integer as defined above.

They can also be input as string literals in the following format:

  • 2014-01-01

Working with time

Values of the time type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.

A time can be input in CQL as simple long integers, giving the number of nanoseconds since midnight.

They can also be input as string literals in any of the following formats:

  • 08:12:54
  • 08:12:54.123
  • 08:12:54.123456
  • 08:12:54.123456789

Counters

The counter type is used to define counter columns. A counter column is a column whose value is a 64-bit signed integer and on which 2 operations are supported: incrementation and decrementation (see UPDATE for syntax). Note the value of a counter cannot be set. A counter doesn’t exist until first incremented/decremented, and the first incrementation/decrementation is made as if the previous value was 0. Deletion of counter columns is supported but have some limitations (see the Cassandra Wiki for more information).

The use of the counter type is limited in the following way:

  • It cannot be used for column that is part of the PRIMARY KEY of a table.
  • A table that contains a counter can only contain counters. In other words, either all the columns of a table outside the PRIMARY KEY have the counter type, or none of them have it.

Working with collections

Noteworthy characteristics

Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like “the phone numbers of a given user”, “labels applied to an email”, etc. But when items are expected to grow unbounded (“all the messages sent by a given user”, “events registered by a sensor”, ...), then collections are not appropriate anymore and a specific table (with clustering columns) should be used. Concretely, collections have the following limitations:

  • Collections are always read in their entirety (and reading one is not paged internally).
  • Collections cannot have more than 65535 elements. More precisely, while it may be possible to insert more than 65535 elements, it is not possible to read more than the 65535 first elements (see CASSANDRA-5428 for details).
  • While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do (see the section on lists below for details). It is thus advised to prefer sets over lists when possible.

Please note that while some of those limitations may or may not be loosen in the future, the general rule that collections are for denormalizing small amount of data is meant to stay.

Maps

A map is a typed set of key-value pairs, where keys are unique. Furthermore, note that the map are internally sorted by their keys and will thus always be returned in that order. To create a column of type map, use the map keyword suffixed with comma-separated key and value types, enclosed in angle brackets. For example:

CREATE TABLE users (
-    id text PRIMARY KEY,
-    given text,
-    surname text,
-    favs map<text, text>   // A map of text keys, and text values
-)
-

Writing map data is accomplished with a JSON-inspired syntax. To write a record using INSERT, specify the entire map as a JSON-style associative array. Note: This form will always replace the entire map.

// Inserting (or Updating)
-INSERT INTO users (id, given, surname, favs)
-           VALUES ('jsmith', 'John', 'Smith', { 'fruit' : 'apple', 'band' : 'Beatles' })
-

Adding or updating key-values of a (potentially) existing map can be accomplished either by subscripting the map column in an UPDATE statement or by adding a new map literal:

// Updating (or inserting)
-UPDATE users SET favs['author'] = 'Ed Poe' WHERE id = 'jsmith'
-UPDATE users SET favs = favs +  { 'movie' : 'Cassablanca' } WHERE id = 'jsmith'
-

Note that TTLs are allowed for both INSERT and UPDATE, but in both case the TTL set only apply to the newly inserted/updated values. In other words,

// Updating (or inserting)
-UPDATE users USING TTL 10 SET favs['color'] = 'green' WHERE id = 'jsmith'
-

will only apply the TTL to the { 'color' : 'green' } record, the rest of the map remaining unaffected.

Deleting a map record is done with:

DELETE favs['author'] FROM users WHERE id = 'jsmith'
-

Sets

A set is a typed collection of unique values. Sets are ordered by their values. To create a column of type set, use the set keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE images (
-    name text PRIMARY KEY,
-    owner text,
-    date timestamp,
-    tags set<text>
-);
-

Writing a set is accomplished by comma separating the set values, and enclosing them in curly braces. Note: An INSERT will always replace the entire set.

INSERT INTO images (name, owner, date, tags)
-            VALUES ('cat.jpg', 'jsmith', 'now', { 'kitten', 'cat', 'pet' });
-

Adding and removing values of a set can be accomplished with an UPDATE by adding/removing new set values to an existing set column.

UPDATE images SET tags = tags + { 'cute', 'cuddly' } WHERE name = 'cat.jpg';
-UPDATE images SET tags = tags - { 'lame' } WHERE name = 'cat.jpg';
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Lists

A list is a typed collection of non-unique values where elements are ordered by there position in the list. To create a column of type list, use the list keyword suffixed with the value type enclosed in angle brackets. For example:

CREATE TABLE plays (
-    id text PRIMARY KEY,
-    game text,
-    players int,
-    scores list<int>
-)
-

Do note that as explained below, lists have some limitations and performance considerations to take into account, and it is advised to prefer sets over lists when this is possible.

Writing list data is accomplished with a JSON-style syntax. To write a record using INSERT, specify the entire list as a JSON array. Note: An INSERT will always replace the entire list.

INSERT INTO plays (id, game, players, scores)
-           VALUES ('123-afde', 'quake', 3, [17, 4, 2]);
-

Adding (appending or prepending) values to a list can be accomplished by adding a new JSON-style array to an existing list column.

UPDATE plays SET players = 5, scores = scores + [ 14, 21 ] WHERE id = '123-afde';
-UPDATE plays SET players = 5, scores = [ 12 ] + scores WHERE id = '123-afde';
-

It should be noted that append and prepend are not idempotent operations. This means that if during an append or a prepend the operation timeout, it is not always safe to retry the operation (as this could result in the record appended or prepended twice).

Lists also provides the following operation: setting an element by its position in the list, removing an element by its position in the list and remove all the occurrence of a given value in the list. However, and contrarily to all the other collection operations, these three operations induce an internal read before the update, and will thus typically have slower performance characteristics. Those operations have the following syntax:

UPDATE plays SET scores[1] = 7 WHERE id = '123-afde';                // sets the 2nd element of scores to 7 (raises an error is scores has less than 2 elements)
-DELETE scores[1] FROM plays WHERE id = '123-afde';                   // deletes the 2nd element of scores (raises an error is scores has less than 2 elements)
-UPDATE plays SET scores = scores - [ 12, 21 ] WHERE id = '123-afde'; // removes all occurrences of 12 and 21 from scores
-

As with maps, TTLs if used only apply to the newly inserted/updated values.

Functions

CQL3 distinguishes between built-in functions (so called ‘native functions’) and user-defined functions. CQL3 includes several native functions, described below:

Token

The token function allows to compute the token for a given partition key. The exact signature of the token function depends on the table concerned and of the partitioner used by the cluster.

The type of the arguments of the token depend on the type of the partition key columns. The return type depend on the partitioner in use:

  • For Murmur3Partitioner, the return type is bigint.
  • For RandomPartitioner, the return type is varint.
  • For ByteOrderedPartitioner, the return type is blob.

For instance, in a cluster using the default Murmur3Partitioner, if a table is defined by

CREATE TABLE users (
-    userid text PRIMARY KEY,
-    username text,
-    ...
-)
-

then the token function will take a single argument of type text (in that case, the partition key is userid (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be bigint.

Uuid

The uuid function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.

Timeuuid functions

now

The now function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in WHERE clauses. For instance, a query of the form

SELECT * FROM myTable WHERE t = now()
-

will never return any result by design, since the value returned by now() is guaranteed to be unique.

minTimeuuid and maxTimeuuid

The minTimeuuid (resp. maxTimeuuid) function takes a timestamp value t (which can be either a timestamp or a date string ) and return a fake timeuuid corresponding to the smallest (resp. biggest) possible timeuuid having for timestamp t. So for instance:

SELECT * FROM myTable WHERE t > maxTimeuuid('2013-01-01 00:05+0000') AND t < minTimeuuid('2013-02-02 10:00+0000')
-

will select all rows where the timeuuid column t is strictly older than ‘2013-01-01 00:05+0000’ but strictly younger than ‘2013-02-02 10:00+0000’. Please note that t >= maxTimeuuid('2013-01-01 00:05+0000') would still not select a timeuuid generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to t > maxTimeuuid('2013-01-01 00:05+0000').

Warning: We called the values generated by minTimeuuid and maxTimeuuid fake UUID because they do no respect the Time-Based UUID generation process specified by the RFC 4122. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly a bad idea.

Time conversion functions

A number of functions are provided to “convert” a timeuuid, a timestamp or a date into another native type.

function name input type description
toDate timeuuid Converts the timeuuid argument into a date type
toDate timestamp Converts the timestamp argument into a date type
toTimestamp timeuuid Converts the timeuuid argument into a timestamp type
toTimestamp date Converts the date argument into a timestamp type
toUnixTimestamp timeuuid Converts the timeuuid argument into a bigInt raw value
toUnixTimestamp timestamp Converts the timestamp argument into a bigInt raw value
toUnixTimestamp date Converts the date argument into a bigInt raw value
dateOf timeuuid Similar to toTimestamp(timeuuid) (DEPRECATED)
unixTimestampOf timeuuid Similar to toUnixTimestamp(timeuuid) (DEPRECATED)

Blob conversion functions

A number of functions are provided to “convert” the native types into binary data (blob). For every <native-type> type supported by CQL3 (a notable exceptions is blob, for obvious reasons), the function typeAsBlob takes a argument of type type and return it as a blob. Conversely, the function blobAsType takes a 64-bit blob argument and convert it to a bigint value. And so for instance, bigintAsBlob(3) is 0x0000000000000003 and blobAsBigint(0x0000000000000003) is 3.

Aggregates

Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.
If normal columns, scalar functions, UDT fields, writetime or ttl are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.

CQL3 distinguishes between built-in aggregates (so called ‘native aggregates’) and user-defined aggregates. CQL3 includes several native aggregates, described below:

Count

The count function can be used to count the rows returned by a query. Example:

SELECT COUNT(*) FROM plays;
-SELECT COUNT(1) FROM plays;
-

It also can be used to count the non null value of a given column. Example:

SELECT COUNT(scores) FROM plays;
-

Max and Min

The max and min functions can be used to compute the maximum and the minimum value returned by a query for a given column.

SELECT MIN(players), MAX(players) FROM plays WHERE game = 'quake';
-

Sum

The sum function can be used to sum up all the values returned by a query for a given column.

SELECT SUM(players) FROM plays;
-

Avg

The avg function can be used to compute the average of all the values returned by a query for a given column.

SELECT AVG(players) FROM plays;
-

User-Defined Functions

User-defined functions allow execution of user-provided code in Cassandra. By default, Cassandra supports defining functions in Java and JavaScript. Support for other JSR 223 compliant scripting languages (such as Python, Ruby, and Scala) can be added by adding a JAR to the classpath.

UDFs are part of the Cassandra schema. As such, they are automatically propagated to all nodes in the cluster.

UDFs can be overloaded - i.e. multiple UDFs with different argument types but the same function name. Example:

CREATE FUNCTION sample ( arg int ) ...;
-CREATE FUNCTION sample ( arg text ) ...;
-

User-defined functions are susceptible to all of the normal problems with the chosen programming language. Accordingly, implementations should be safe against null pointer exceptions, illegal arguments, or any other potential source of exceptions. An exception during function execution will result in the entire statement failing.

It is valid to use complex types like collections, tuple types and user-defined types as argument and return types. Tuple types and user-defined types are handled by the conversion functions of the DataStax Java Driver. Please see the documentation of the Java Driver for details on handling tuple types and user-defined types.

Arguments for functions can be literals or terms. Prepared statement placeholders can be used, too.

Note that you can use the double-quoted string syntax to enclose the UDF source code. For example:

CREATE FUNCTION some_function ( arg int )
-  RETURNS NULL ON NULL INPUT
-  RETURNS int
-  LANGUAGE java
-  AS $$ return arg; $$;
-
-SELECT some_function(column) FROM atable ...;
-UPDATE atable SET col = some_function(?) ...;
-

CREATE TYPE custom_type (txt text, i int);
-CREATE FUNCTION fct_using_udt ( udtarg frozen<custom_type> )
-  RETURNS NULL ON NULL INPUT
-  RETURNS text
-  LANGUAGE java
-  AS $$ return udtarg.getString("txt"); $$;
-

User-defined functions can be used in SELECT, INSERT and UPDATE statements.

See CREATE FUNCTION and DROP FUNCTION.

User-Defined Aggregates

User-defined aggregates allow creation of custom aggregate functions using UDFs. Common examples of aggregate functions are count, min, and max.

Each aggregate requires an initial state (INITCOND, which defaults to null) of type STYPE. The first argument of the state function must have type STYPE. The remaining arguments of the state function must match the types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with last state value as its argument.

STYPE is mandatory in order to be able to distinguish possibly overloaded versions of the state and/or final function (since the overload can appear after creation of the aggregate).

User-defined aggregates can be used in SELECT statement.

A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the USE statement):

CREATE OR REPLACE FUNCTION averageState ( state tuple<int,bigint>, val int )
-  CALLED ON NULL INPUT
-  RETURNS tuple<int,bigint>
-  LANGUAGE java
-  AS '
-    if (val != null) {
-      state.setInt(0, state.getInt(0)+1);
-      state.setLong(1, state.getLong(1)+val.intValue());
-    }
-    return state;
-  ';
-
-CREATE OR REPLACE FUNCTION averageFinal ( state tuple<int,bigint> )
-  CALLED ON NULL INPUT
-  RETURNS double
-  LANGUAGE java
-  AS '
-    double r = 0;
-    if (state.getInt(0) == 0) return null;
-    r = state.getLong(1);
-    r /= state.getInt(0);
-    return Double.valueOf(r);
-  ';
-
-CREATE OR REPLACE AGGREGATE average ( int )
-  SFUNC averageState
-  STYPE tuple<int,bigint>
-  FINALFUNC averageFinal
-  INITCOND (0, 0);
-
-CREATE TABLE atable (
-  pk int PRIMARY KEY,
-  val int);
-INSERT INTO atable (pk, val) VALUES (1,1);
-INSERT INTO atable (pk, val) VALUES (2,2);
-INSERT INTO atable (pk, val) VALUES (3,3);
-INSERT INTO atable (pk, val) VALUES (4,4);
-SELECT average(val) FROM atable;
-

See CREATE AGGREGATE and DROP AGGREGATE.

JSON Support

Cassandra 2.2 introduces JSON support to SELECT and INSERT statements. This support does not fundamentally alter the CQL API (for example, the schema is still enforced), it simply provides a convenient way to work with JSON documents.

SELECT JSON

With SELECT statements, the new JSON keyword can be used to return each row as a single JSON encoded map. The remainder of the SELECT statment behavior is the same.

The result map keys are the same as the column names in a normal result set. For example, a statement like "SELECT JSON a, ttl(b) FROM ..." would result in a map with keys "a" and "ttl(b)". However, this is one notable exception: for symmetry with INSERT JSON behavior, case-sensitive column names with upper-case letters will be surrounded with double quotes. For example, "SELECT JSON myColumn FROM ..." would result in a map key "\"myColumn\"" (note the escaped quotes).

The map values will JSON-encoded representations (as described below) of the result set values.

INSERT JSON

With INSERT statements, the new JSON keyword can be used to enable inserting a JSON encoded map as a single row. The format of the JSON map should generally match that returned by a SELECT JSON statement on the same table. In particular, case-sensitive column names should be surrounded with double quotes. For example, to insert into a table with two columns named “myKey” and “value”, you would do the following:

INSERT INTO mytable JSON '{"\"myKey\"": 0, "value": 0}'
-

Any columns which are ommitted from the JSON map will be defaulted to a NULL value (which will result in a tombstone being created).

JSON Encoding of Cassandra Data Types

Where possible, Cassandra will represent and accept data types in their native JSON representation. Cassandra will also accept string representations matching the CQL literal format for all single-field types. For example, floats, ints, UUIDs, and dates can be represented by CQL literal strings. However, compound types, such as collections, tuples, and user-defined types must be represented by native JSON collections (maps and lists) or a JSON-encoded string representation of the collection.

The following table describes the encodings that Cassandra will accept in INSERT JSON values (and fromJson() arguments) as well as the format Cassandra will use when returning data for SELECT JSON statements (and fromJson()):

type formats accepted return format notes
ascii string string Uses JSON’s \u character escape
bigint integer, string integer String must be valid 64 bit integer
blob string string String should be 0x followed by an even number of hex digits
boolean boolean, string boolean String must be “true” or "false"
date string string Date in format YYYY-MM-DD, timezone UTC
decimal integer, float, stringfloat May exceed 32 or 64-bit IEEE-754 floating point precision in client-side decoder
double integer, float, stringfloat String must be valid integer or float
float integer, float, stringfloat String must be valid integer or float
inet string string IPv4 or IPv6 address
int integer, string integer String must be valid 32 bit integer
list list, string list Uses JSON’s native list representation
map map, string map Uses JSON’s native map representation
smallint integer, string integer String must be valid 16 bit integer
set list, string list Uses JSON’s native list representation
text string string Uses JSON’s \u character escape
time string string Time of day in format HH-MM-SS[.fffffffff]
timestampinteger, string string A timestamp. Strings constant are allow to input timestamps as dates, see Working with dates below for more information. Datestamps with format YYYY-MM-DD HH:MM:SS.SSS are returned.
timeuuid string string Type 1 UUID. See Constants for the UUID format
tinyint integer, string integer String must be valid 8 bit integer
tuple list, string list Uses JSON’s native list representation
UDT map, string map Uses JSON’s native map representation with field names as keys
uuid string string See Constants for the UUID format
varchar string string Uses JSON’s \u character escape
varint integer, string integer Variable length; may overflow 32 or 64 bit integers in client-side decoder

The fromJson() Function

The fromJson() function may be used similarly to INSERT JSON, but for a single column value. It may only be used in the VALUES clause of an INSERT statement or as one of the column values in an UPDATE, DELETE, or SELECT statement. For example, it cannot be used in the selection clause of a SELECT statement.

The toJson() Function

The toJson() function may be used similarly to SELECT JSON, but for a single column value. It may only be used in the selection clause of a SELECT statement.

Appendix A: CQL Keywords

CQL distinguishes between reserved and non-reserved keywords. Reserved keywords cannot be used as identifier, they are truly reserved for the language (but one can enclose a reserved keyword by double-quotes to use it as an identifier). Non-reserved keywords however only have a specific meaning in certain context but can used as identifer otherwise. The only raison d'être of these non-reserved keywords is convenience: some keyword are non-reserved when it was always easy for the parser to decide whether they were used as keywords or not.

Keyword Reserved?
ADD yes
AGGREGATE no
ALL no
ALLOW yes
ALTER yes
AND yes
APPLY yes
AS no
ASC yes
ASCII no
AUTHORIZE yes
BATCH yes
BEGIN yes
BIGINT no
BLOB no
BOOLEAN no
BY yes
CALLED no
CLUSTERING no
COLUMNFAMILY yes
COMPACT no
CONTAINS no
COUNT no
COUNTER no
CREATE yes
CUSTOM no
DATE no
DECIMAL no
DELETE yes
DESC yes
DESCRIBE yes
DISTINCT no
DOUBLE no
DROP yes
ENTRIES yes
EXECUTE yes
EXISTS no
FILTERING no
FINALFUNC no
FLOAT no
FROM yes
FROZEN no
FULL yes
FUNCTION no
FUNCTIONS no
GRANT yes
IF yes
IN yes
INDEX yes
INET no
INFINITY yes
INITCOND no
INPUT no
INSERT yes
INT no
INTO yes
JSON no
KEY no
KEYS no
KEYSPACE yes
KEYSPACES no
LANGUAGE no
LIMIT yes
LIST no
LOGIN no
MAP no
MODIFY yes
NAN yes
NOLOGIN no
NORECURSIVE yes
NOSUPERUSER no
NOT yes
NULL yes
OF yes
ON yes
OPTIONS no
OR yes
ORDER yes
PASSWORD no
PERMISSION no
PERMISSIONS no
PRIMARY yes
RENAME yes
REPLACE yes
RETURNS no
REVOKE yes
ROLE no
ROLES no
SCHEMA yes
SELECT yes
SET yes
SFUNC no
SMALLINT no
STATIC no
STORAGE no
STYPE no
SUPERUSER no
TABLE yes
TEXT no
TIME no
TIMESTAMP no
TIMEUUID no
TINYINT no
TO yes
TOKEN yes
TRIGGER no
TRUNCATE yes
TTL no
TUPLE no
TYPE no
UNLOGGED yes
UPDATE yes
USE yes
USER no
USERS no
USING yes
UUID no
VALUES no
VARCHAR no
VARINT no
WHERE yes
WITH yes
WRITETIME no

Appendix B: CQL Reserved Types

The following type names are not currently used by CQL, but are reserved for potential future use. User-defined types may not use reserved type names as their name.

type
bitstring
byte
complex
date
enum
interval
macaddr

Changes

The following describes the changes in each version of CQL.

3.4.0

  • Support for materialized views
  • DELETE support for inequality expressions and IN restrictions on any primary key columns
  • UPDATE support for IN restrictions on any primary key columns

3.3.1

  • The syntax TRUNCATE TABLE X is now accepted as an alias for TRUNCATE X

3.3.0

  • Adds new aggregates
  • User-defined functions are now supported through CREATE FUNCTION and DROP FUNCTION.
  • User-defined aggregates are now supported through CREATE AGGREGATE and DROP AGGREGATE.
  • Allows double-dollar enclosed strings literals as an alternative to single-quote enclosed strings.
  • Introduces Roles to supercede user based authentication and access control
  • Date and Time data types have been added
  • JSON support has been added
  • Tinyint and Smallint data types have been added
  • Adds new time conversion functions and deprecate dateOf and unixTimestampOf. See Time conversion functions

3.2.0

  • User-defined types are now supported through CREATE TYPE, ALTER TYPE, and DROP TYPE
  • CREATE INDEX now supports indexing collection columns, including indexing the keys of map collections through the keys() function
  • Indexes on collections may be queried using the new CONTAINS and CONTAINS KEY operators
  • Tuple types were added to hold fixed-length sets of typed positional fields (see the section on types )
  • DROP INDEX now supports optionally specifying a keyspace

3.1.7

  • SELECT statements now support selecting multiple rows in a single partition using an IN clause on combinations of clustering columns. See SELECT WHERE clauses.
  • IF NOT EXISTS and IF EXISTS syntax is now supported by CREATE USER and DROP USER statmenets, respectively.

3.1.6

  • A new uuid method has been added.
  • Support for DELETE ... IF EXISTS syntax.

3.1.5

3.1.4

3.1.3

  • Millisecond precision formats have been added to the timestamp parser (see working with dates ).

3.1.2

  • NaN and Infinity has been added as valid float contants. They are now reserved keywords. In the unlikely case you we using them as a column identifier (or keyspace/table one), you will noew need to double quote them (see quote identifiers ).

3.1.1

  • SELECT statement now allows listing the partition keys (using the DISTINCT modifier). See CASSANDRA-4536.
  • The syntax c IN ? is now supported in WHERE clauses. In that case, the value expected for the bind variable will be a list of whatever type c is.
  • It is now possible to use named bind variables (using :name instead of ?).

3.1.0

  • ALTER TABLE DROP option has been reenabled for CQL3 tables and has new semantics now: the space formerly used by dropped columns will now be eventually reclaimed (post-compaction). You should not readd previously dropped columns unless you use timestamps with microsecond precision (see CASSANDRA-3919 for more details).
  • SELECT statement now supports aliases in select clause. Aliases in WHERE and ORDER BY clauses are not supported. See the section on select for details.
  • CREATE statements for KEYSPACE, TABLE and INDEX now supports an IF NOT EXISTS condition. Similarly, DROP statements support a IF EXISTS condition.
  • INSERT statements optionally supports a IF NOT EXISTS condition and UPDATE supports IF conditions.

3.0.5

  • SELECT, UPDATE, and DELETE statements now allow empty IN relations (see CASSANDRA-5626).

3.0.4

  • Updated the syntax for custom secondary indexes.
  • Non-equal condition on the partition key are now never supported, even for ordering partitioner as this was not correct (the order was not the one of the type of the partition key). Instead, the token method should always be used for range queries on the partition key (see WHERE clauses ).

3.0.3

3.0.2

  • Type validation for the constants has been fixed. For instance, the implementation used to allow '2' as a valid value for an int column (interpreting it has the equivalent of 2), or 42 as a valid blob value (in which case 42 was interpreted as an hexadecimal representation of the blob). This is no longer the case, type validation of constants is now more strict. See the data types section for details on which constant is allowed for which type.
  • The type validation fixed of the previous point has lead to the introduction of blobs constants to allow inputing blobs. Do note that while inputing blobs as strings constant is still supported by this version (to allow smoother transition to blob constant), it is now deprecated (in particular the data types section does not list strings constants as valid blobs) and will be removed by a future version. If you were using strings as blobs, you should thus update your client code ASAP to switch blob constants.
  • A number of functions to convert native types to blobs have also been introduced. Furthermore the token function is now also allowed in select clauses. See the section on functions for details.

3.0.1

  • Date strings (and timestamps) are no longer accepted as valid timeuuid values. Doing so was a bug in the sense that date string are not valid timeuuid, and it was thus resulting in confusing behaviors. However, the following new methods have been added to help working with timeuuid: now, minTimeuuid, maxTimeuuid , dateOf and unixTimestampOf. See the section dedicated to these methods for more detail.
  • “Float constants”#constants now support the exponent notation. In other words, 4.2E10 is now a valid floating point value.

Versioning

Versioning of the CQL language adheres to the Semantic Versioning guidelines. Versions take the form X.Y.Z where X, Y, and Z are integer values representing major, minor, and patch level respectively. There is no correlation between Cassandra release versions and the CQL language version.

versiondescription
Major The major version must be bumped when backward incompatible changes are introduced. This should rarely occur.
Minor Minor version increments occur when new, but backward compatible, functionality is introduced.
Patch The patch version is incremented when bugs are fixed.
\ No newline at end of file diff --git a/src/doc/old/CQL.css b/src/doc/old/CQL.css deleted file mode 100644 index 601c01364..000000000 --- a/src/doc/old/CQL.css +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* Default (and example) style sheet for CQL.html */ - -pre.sample { - padding: 1ex; - padding-left: 4ex; - border: 1px solid #ada; - background-color: #fafffa; -} - -pre.syntax { - padding: 1ex; - padding-left: 4ex; - border: 1px solid #aaf; - background-color: #fafaff; -} - -p.banner { - padding: 1ex; - padding-left: 4ex; - border-top: 1px solid #faa; - border-bottom: 1px solid #faa; - background-color: #fffafa; - color: #b33; - text-align: center; -} - -table { - margin-left: 4ex; - /* width: 80%; */ - border-collapse: collapse; - border: 2px solid #bbb; -} - -td, th { - padding: 2px 1ex; - border: 1px solid #bbb; -} - -th { - background-color: #f0f0f0; -} - -body { - background-color: white; - padding: 0 2ex; -} - -h1 { - text-align: center; - margin-bottom: 3ex; -} - -h2 { - text-align: center; - border-top: 1px solid #aaa; - border-bottom: 1px solid #aaa; - background-color: #eee; - margin-top: 5ex; -} - -h3 { - padding-top: 3ex; -} - -a { - color:#333; - font-size:90%; - text-decoration:none; - font-weight:bold; -} - -a:link {color:#333;} -a:visited {color:#111;} -a:hover { - color:#777; - text-decoration: underline; -} - -span#tableOfContents { - border: 1px solid #bbb; - background-color: #fafafa; - display: inline-block; - padding-right: 6ex; -} diff --git a/src/doc/stable b/src/doc/stable deleted file mode 120000 index 902b2c90c..000000000 --- a/src/doc/stable +++ /dev/null @@ -1 +0,0 @@ -3.11 \ No newline at end of file diff --git a/src/download.md b/src/download.md deleted file mode 100644 index 384c79c12..000000000 --- a/src/download.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -layout: page -permalink: /download/ -title: Download -is_homepage: false -is_sphinx_doc: false ---- - -Downloading Cassandra ---------------------- - -### Latest Beta Version - -Download the latest Apache Cassandra 4.0 beta release: {{ "latest" | full_release_link }}. - -### Latest Stable Version - -Download the latest Apache Cassandra 3.11 release: {{ "3.11" | full_release_link }}. - -### Older Supported Releases - -The following older Cassandra releases are still supported: - -* Apache Cassandra 3.0 is supported until **6 months after 4.0 release (date TBD)**. The latest release is {{ "3.0" | full_release_link }}. -* Apache Cassandra 2.2 is supported until **4.0 release (date TBD)**. The latest release is {{ "2.2" | full_release_link }}. -* Apache Cassandra 2.1 is supported until **4.0 release (date TBD)** with **critical fixes only**. The latest release is - {{ "2.1" | full_release_link }}. - -Older (unsupported) versions of Cassandra are [archived here](http://archive.apache.org/dist/cassandra/). - -### Installation from Debian packages - -* For the `` specify the major version number, without dot, and with an appended `x`. -* The latest `` is `311x`. -* For older releases, the `` can be one of `30x`, `22x`, or `21x`. - -* Add the Apache repository of Cassandra to `/etc/apt/sources.list.d/cassandra.sources.list`, for example for the latest 3.11 version: - -``` -echo "deb https://downloads.apache.org/cassandra/debian 311x main" | sudo tee -a /etc/apt/sources.list.d/cassandra.sources.list -``` - -* Add the Apache Cassandra repository keys: - -``` -curl https://downloads.apache.org/cassandra/KEYS | sudo apt-key add - -``` - -* Update the repositories: - -``` -sudo apt-get update -``` - -* If you encounter this error: - -``` -GPG error: http://www.apache.org 311x InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A278B781FE4B2BDA -``` -Then add the public key A278B781FE4B2BDA as follows: - -``` -sudo apt-key adv --keyserver pool.sks-keyservers.net --recv-key A278B781FE4B2BDA -``` -and repeat `sudo apt-get update`. The actual key may be different, you get it from the error message itself. For a -full list of Apache contributors public keys, you can refer to . - -* Install Cassandra: - -``` -sudo apt-get install cassandra -``` - -* You can start Cassandra with `sudo service cassandra start` and stop it with `sudo service cassandra stop`. - However, normally the service will start automatically. For this reason be sure to stop it if you need to make any - configuration changes. -* Verify that Cassandra is running by invoking `nodetool status` from the command line. -* The default location of configuration files is `/etc/cassandra`. -* The default location of log and data directories is `/var/log/cassandra/` and `/var/lib/cassandra`. -* Start-up options (heap size, etc) can be configured in `/etc/default/cassandra`. - -### Installation from RPM packages - -* For the `` specify the major version number, without dot, and with an appended `x`. -* The latest `` is `311x`. -* For older releases, the `` can be one of `30x`, `22x`, or `21x`. -* (Not all versions of Apache Cassandra are available, since building RPMs is a recent addition to the project.) - -* Add the Apache repository of Cassandra to `/etc/yum.repos.d/cassandra.repo`, for example for the latest 3.11 version: - -```text -[cassandra] -name=Apache Cassandra -baseurl=https://downloads.apache.org/cassandra/redhat/311x/ -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://downloads.apache.org/cassandra/KEYS -``` - -* Install Cassandra, accepting the gpg key import prompts: - -``` -sudo yum install cassandra -``` - -Start Cassandra (will not start automatically): - -``` -service cassandra start -``` - -Systemd based distributions may require to run `systemctl daemon-reload` once to make Cassandra available as a systemd service. This should happen automatically by running the command above. - -Make Cassandra start automatically after reboot: - -``` -chkconfig cassandra on -``` - -Please note that official RPMs for Apache Cassandra only have been available recently and are not tested thoroughly on all platforms yet. We appreciate your feedback and support and ask you to post details on any issues in the [corresponding Jira ticket](https://issues.apache.org/jira/browse/CASSANDRA-13433). - - -### Source - -Development is done in the Apache Git repository. To check out a copy: - -``` -git clone https://gitbox.apache.org/repos/asf/cassandra.git -``` diff --git a/src/icons/back.gif b/src/icons/back.gif deleted file mode 100644 index f191b280c..000000000 Binary files a/src/icons/back.gif and /dev/null differ diff --git a/src/icons/blank.gif b/src/icons/blank.gif deleted file mode 100644 index f191b280c..000000000 Binary files a/src/icons/blank.gif and /dev/null differ diff --git a/src/icons/folder.gif b/src/icons/folder.gif deleted file mode 100644 index f191b280c..000000000 Binary files a/src/icons/folder.gif and /dev/null differ diff --git a/src/img/apachecon-2019.jpg b/src/img/apachecon-2019.jpg deleted file mode 100644 index 5683cbb8f..000000000 Binary files a/src/img/apachecon-2019.jpg and /dev/null differ diff --git a/src/img/asf_feather.png b/src/img/asf_feather.png deleted file mode 100644 index c0ebf11a6..000000000 Binary files a/src/img/asf_feather.png and /dev/null differ diff --git a/src/img/blog-post-apache-cassandra-4-0-beta1/apache-cassandra-infographic-final.jpg b/src/img/blog-post-apache-cassandra-4-0-beta1/apache-cassandra-infographic-final.jpg deleted file mode 100644 index 6d99f4958..000000000 Binary files a/src/img/blog-post-apache-cassandra-4-0-beta1/apache-cassandra-infographic-final.jpg and /dev/null differ diff --git a/src/img/blog-post-benchmarking-streaming/cassandra_streaming.png b/src/img/blog-post-benchmarking-streaming/cassandra_streaming.png deleted file mode 100644 index 99b7bc0f4..000000000 Binary files a/src/img/blog-post-benchmarking-streaming/cassandra_streaming.png and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-hash-ring-with-transient-replica.gif b/src/img/blog-post-introducing-transient-replication/diagram-hash-ring-with-transient-replica.gif deleted file mode 100644 index 23c11d9df..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-hash-ring-with-transient-replica.gif and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-hash-ring.gif b/src/img/blog-post-introducing-transient-replication/diagram-hash-ring.gif deleted file mode 100644 index 95a199584..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-hash-ring.gif and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-rapid-write-protection.gif b/src/img/blog-post-introducing-transient-replication/diagram-rapid-write-protection.gif deleted file mode 100644 index 7f54c8020..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-rapid-write-protection.gif and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-regular-write.gif b/src/img/blog-post-introducing-transient-replication/diagram-regular-write.gif deleted file mode 100644 index 2a8d128a9..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-regular-write.gif and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-transient-write-down-node.gif b/src/img/blog-post-introducing-transient-replication/diagram-transient-write-down-node.gif deleted file mode 100644 index f6986d016..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-transient-write-down-node.gif and /dev/null differ diff --git a/src/img/blog-post-introducing-transient-replication/diagram-transient-write.gif b/src/img/blog-post-introducing-transient-replication/diagram-transient-write.gif deleted file mode 100644 index cda41d715..000000000 Binary files a/src/img/blog-post-introducing-transient-replication/diagram-transient-write.gif and /dev/null differ diff --git a/src/img/cassandra_logo.png b/src/img/cassandra_logo.png deleted file mode 100644 index e87de50d7..000000000 Binary files a/src/img/cassandra_logo.png and /dev/null differ diff --git a/src/index.html b/src/index.html deleted file mode 100644 index 91aefe194..000000000 --- a/src/index.html +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: default -title: "Apache Cassandra" -is_homepage: true ---- - -
-
-
-

What is Cassandra?

-

- The Apache Cassandra database is the right choice when you need scalability and high availability without - compromising performance. Linear scalability - and proven fault-tolerance on commodity hardware or cloud infrastructure make it the perfect platform for - mission-critical data. Cassandra's support for replicating across multiple datacenters is best-in-class, providing - lower latency for your users and the peace of mind of knowing that you can survive regional outages. -

-
- -
-
-

Proven

-

- Cassandra is in use at - Activision, - Apple, - BazaarVoice, - Best Buy, - CERN, - Constant Contact, - Comcast, - eBay, - Fidelity, - Github, - Hulu, - ING, - Instagram, - Intuit, - Macy's™, - Macquarie Bank, - Microsoft, - McDonalds, - Netflix, - New York Times, - Outbrain, - Pearson Education, - Sky, - Spotify, - Uber, - Walmart, - and thousands of other companies that have large, active data sets. In fact, Cassandra is used by 40% of the Fortune 100.

-
- -
-

Fault tolerant

-

- Data is automatically replicated to multiple nodes for fault-tolerance. Replication across multiple data centers - is supported. Failed nodes can be replaced with no downtime. -

-
- -
-

Performant

-

- Cassandra consistently - outperforms popular - NoSQL alternatives in benchmarks and real applications, - primarily because of fundamental architectural choices. -

-
- -
-

Decentralized

-

- There are no single points of failure. There are no network bottlenecks. Every node in the cluster is - identical. -

-
- -
-

Scalable

-

- Some of the largest production deployments include Apple's, with over 75,000 nodes storing over 10 PB of data, - Netflix (2,500 nodes, 420 TB, over 1 trillion requests per day), Chinese search engine Easou (270 nodes, 300 TB, - over 800 million requests per day), and eBay (over 100 nodes, 250 TB). -

-
-
- -
-
-

Durable

-

- Cassandra is suitable for applications that can't afford to lose data, - even when an entire data center goes down. -

-
- -
-

You're in control

-

- Choose between synchronous or asynchronous replication for each update. Highly available asynchronous operations are - optimized with features like Hinted Handoff and - Read Repair.

-

-
- -
-

Elastic

-

- Read and write throughput both increase linearly as new machines are added, with no downtime or interruption - to applications. -

-
- -
-

Professionally Supported

-

- Cassandra support contracts and services are available from third parties. -

-
-
- -
-
diff --git a/src/js/doctools.js b/src/js/doctools.js deleted file mode 100644 index 816349563..000000000 --- a/src/js/doctools.js +++ /dev/null @@ -1,287 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this); - }); - } - } - return this.each(function() { - highlight(this); - }); -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') - return string; - return (typeof translated == 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); \ No newline at end of file diff --git a/src/js/searchtools.js b/src/js/searchtools.js deleted file mode 100644 index 64012152d..000000000 --- a/src/js/searchtools.js +++ /dev/null @@ -1,651 +0,0 @@ -/* - * searchtools.js_t - * ~~~~~~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for the full-text search. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - - -/* Non-minified version JS is _stemmer.js if file is provided */ -/** - * Porter Stemmer - */ -var Stemmer = function() { - - var step2list = { - ational: 'ate', - tional: 'tion', - enci: 'ence', - anci: 'ance', - izer: 'ize', - bli: 'ble', - alli: 'al', - entli: 'ent', - eli: 'e', - ousli: 'ous', - ization: 'ize', - ation: 'ate', - ator: 'ate', - alism: 'al', - iveness: 'ive', - fulness: 'ful', - ousness: 'ous', - aliti: 'al', - iviti: 'ive', - biliti: 'ble', - logi: 'log' - }; - - var step3list = { - icate: 'ic', - ative: '', - alize: 'al', - iciti: 'ic', - ical: 'ic', - ful: '', - ness: '' - }; - - var c = "[^aeiou]"; // consonant - var v = "[aeiouy]"; // vowel - var C = c + "[^aeiouy]*"; // consonant sequence - var V = v + "[aeiou]*"; // vowel sequence - - var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - - - -/** - * Simple result scoring code. - */ -var Scorer = { - // Implement the following function to further tweak the score for each result - // The function takes a result array [filename, title, anchor, descr, score] - // and returns the new score. - /* - score: function(result) { - return result[4]; - }, - */ - - // query matches the full name of an object - objNameMatch: 11, - // or matches in the last dotted part of the object name - objPartialMatch: 6, - // Additive scores depending on the priority of the object - objPrio: {0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5}, // used to be unimportantResults - // Used when the priority is not in the mapping. - objPrioDefault: 0, - - // query found in title - title: 15, - // query found in terms - term: 5 -}; - - -/** - * Search Module - */ -var Search = { - - _index : null, - _queued_query : null, - _pulse_status : -1, - - init : function() { - var params = $.getQueryParameters(); - if (params.q) { - var query = params.q[0]; - $('input[name="q"]')[0].value = query; - this.performSearch(query); - } - }, - - loadIndex : function(url) { - $.ajax({type: "GET", url: url, data: null, - dataType: "script", cache: true, - complete: function(jqxhr, textstatus) { - if (textstatus != "success") { - document.getElementById("searchindexloader").src = url; - } - }}); - }, - - setIndex : function(index) { - var q; - this._index = index; - if ((q = this._queued_query) !== null) { - this._queued_query = null; - Search.query(q); - } - }, - - hasIndex : function() { - return this._index !== null; - }, - - deferQuery : function(query) { - this._queued_query = query; - }, - - stopPulse : function() { - this._pulse_status = 0; - }, - - startPulse : function() { - if (this._pulse_status >= 0) - return; - function pulse() { - var i; - Search._pulse_status = (Search._pulse_status + 1) % 4; - var dotString = ''; - for (i = 0; i < Search._pulse_status; i++) - dotString += '.'; - Search.dots.text(dotString); - if (Search._pulse_status > -1) - window.setTimeout(pulse, 500); - } - pulse(); - }, - - /** - * perform a search for something (or wait until index is loaded) - */ - performSearch : function(query) { - // create the required interface elements - this.out = $('#search-results'); - this.title = $('

' + _('Searching') + '

').appendTo(this.out); - this.dots = $('').appendTo(this.title); - this.status = $('

').appendTo(this.out); - this.output = $('